Provide string description of definition, visibility and resolution in LTO plug-in.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-aarch64-low.c
CommitLineData
176eb98c
MS
1/* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
42a4f53d 4 Copyright (C) 2009-2019 Free Software Foundation, Inc.
176eb98c
MS
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "server.h"
23#include "linux-low.h"
db3cb7cb 24#include "nat/aarch64-linux.h"
554717a3 25#include "nat/aarch64-linux-hw-point.h"
bb903df0 26#include "arch/aarch64-insn.h"
3b53ae99 27#include "linux-aarch32-low.h"
176eb98c 28#include "elf/common.h"
afbe19f8
PL
29#include "ax.h"
30#include "tracepoint.h"
176eb98c
MS
31
32#include <signal.h>
33#include <sys/user.h>
5826e159 34#include "nat/gdb_ptrace.h"
e9dae05e 35#include <asm/ptrace.h>
bb903df0
PL
36#include <inttypes.h>
37#include <endian.h>
38#include <sys/uio.h>
176eb98c
MS
39
40#include "gdb_proc_service.h"
cc628f3d 41#include "arch/aarch64.h"
d6d7ce56 42#include "linux-aarch64-tdesc.h"
fefa175e 43#include "nat/aarch64-sve-linux-ptrace.h"
02895270 44#include "tdesc.h"
176eb98c 45
176eb98c
MS
46#ifdef HAVE_SYS_REG_H
47#include <sys/reg.h>
48#endif
49
176eb98c
MS
50/* Per-process arch-specific data we want to keep. */
51
52struct arch_process_info
53{
54 /* Hardware breakpoint/watchpoint data.
55 The reason for them to be per-process rather than per-thread is
56 due to the lack of information in the gdbserver environment;
57 gdbserver is not told that whether a requested hardware
58 breakpoint/watchpoint is thread specific or not, so it has to set
59 each hw bp/wp for every thread in the current process. The
60 higher level bp/wp management in gdb will resume a thread if a hw
61 bp/wp trap is not expected for it. Since the hw bp/wp setting is
62 same for each thread, it is reasonable for the data to live here.
63 */
64 struct aarch64_debug_reg_state debug_reg_state;
65};
66
3b53ae99
YQ
67/* Return true if the size of register 0 is 8 byte. */
68
69static int
70is_64bit_tdesc (void)
71{
72 struct regcache *regcache = get_thread_regcache (current_thread, 0);
73
74 return register_size (regcache->tdesc, 0) == 8;
75}
76
02895270
AH
77/* Return true if the regcache contains the number of SVE registers. */
78
79static bool
80is_sve_tdesc (void)
81{
82 struct regcache *regcache = get_thread_regcache (current_thread, 0);
83
84 return regcache->tdesc->reg_defs.size () == AARCH64_SVE_NUM_REGS;
85}
86
176eb98c
MS
87static void
88aarch64_fill_gregset (struct regcache *regcache, void *buf)
89{
6a69a054 90 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
176eb98c
MS
91 int i;
92
93 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
94 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
95 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
96 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
97 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
98}
99
100static void
101aarch64_store_gregset (struct regcache *regcache, const void *buf)
102{
6a69a054 103 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
176eb98c
MS
104 int i;
105
106 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
107 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
108 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
109 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
110 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
111}
112
113static void
114aarch64_fill_fpregset (struct regcache *regcache, void *buf)
115{
9caa3311 116 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
176eb98c
MS
117 int i;
118
119 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
120 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
121 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
122 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
123}
124
125static void
126aarch64_store_fpregset (struct regcache *regcache, const void *buf)
127{
9caa3311
YQ
128 const struct user_fpsimd_state *regset
129 = (const struct user_fpsimd_state *) buf;
176eb98c
MS
130 int i;
131
132 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
133 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
134 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
135 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
136}
137
1ef53e6b
AH
138/* Store the pauth registers to regcache. */
139
140static void
141aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
142{
143 uint64_t *pauth_regset = (uint64_t *) buf;
144 int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
145
146 if (pauth_base == 0)
147 return;
148
149 supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
150 &pauth_regset[0]);
151 supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
152 &pauth_regset[1]);
153}
154
176eb98c
MS
155/* Enable miscellaneous debugging output. The name is historical - it
156 was originally used to debug LinuxThreads support. */
157extern int debug_threads;
158
421530db
PL
159/* Implementation of linux_target_ops method "get_pc". */
160
176eb98c
MS
161static CORE_ADDR
162aarch64_get_pc (struct regcache *regcache)
163{
8a7e4587 164 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 165 return linux_get_pc_64bit (regcache);
8a7e4587 166 else
a5652c21 167 return linux_get_pc_32bit (regcache);
176eb98c
MS
168}
169
421530db
PL
170/* Implementation of linux_target_ops method "set_pc". */
171
176eb98c
MS
172static void
173aarch64_set_pc (struct regcache *regcache, CORE_ADDR pc)
174{
8a7e4587 175 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 176 linux_set_pc_64bit (regcache, pc);
8a7e4587 177 else
a5652c21 178 linux_set_pc_32bit (regcache, pc);
176eb98c
MS
179}
180
176eb98c
MS
181#define aarch64_breakpoint_len 4
182
37d66942
PL
183/* AArch64 BRK software debug mode instruction.
184 This instruction needs to match gdb/aarch64-tdep.c
185 (aarch64_default_breakpoint). */
186static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
176eb98c 187
421530db
PL
188/* Implementation of linux_target_ops method "breakpoint_at". */
189
176eb98c
MS
190static int
191aarch64_breakpoint_at (CORE_ADDR where)
192{
db91f502
YQ
193 if (is_64bit_tdesc ())
194 {
195 gdb_byte insn[aarch64_breakpoint_len];
176eb98c 196
db91f502
YQ
197 (*the_target->read_memory) (where, (unsigned char *) &insn,
198 aarch64_breakpoint_len);
199 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
200 return 1;
176eb98c 201
db91f502
YQ
202 return 0;
203 }
204 else
205 return arm_breakpoint_at (where);
176eb98c
MS
206}
207
176eb98c
MS
208static void
209aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
210{
211 int i;
212
213 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
214 {
215 state->dr_addr_bp[i] = 0;
216 state->dr_ctrl_bp[i] = 0;
217 state->dr_ref_count_bp[i] = 0;
218 }
219
220 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
221 {
222 state->dr_addr_wp[i] = 0;
223 state->dr_ctrl_wp[i] = 0;
224 state->dr_ref_count_wp[i] = 0;
225 }
226}
227
176eb98c
MS
228/* Return the pointer to the debug register state structure in the
229 current process' arch-specific data area. */
230
db3cb7cb 231struct aarch64_debug_reg_state *
88e2cf7e 232aarch64_get_debug_reg_state (pid_t pid)
176eb98c 233{
88e2cf7e 234 struct process_info *proc = find_process_pid (pid);
176eb98c 235
fe978cb0 236 return &proc->priv->arch_private->debug_reg_state;
176eb98c
MS
237}
238
421530db
PL
239/* Implementation of linux_target_ops method "supports_z_point_type". */
240
4ff0d3d8
PA
241static int
242aarch64_supports_z_point_type (char z_type)
243{
244 switch (z_type)
245 {
96c97461 246 case Z_PACKET_SW_BP:
4ff0d3d8
PA
247 case Z_PACKET_HW_BP:
248 case Z_PACKET_WRITE_WP:
249 case Z_PACKET_READ_WP:
250 case Z_PACKET_ACCESS_WP:
251 return 1;
252 default:
4ff0d3d8
PA
253 return 0;
254 }
255}
256
421530db 257/* Implementation of linux_target_ops method "insert_point".
176eb98c 258
421530db
PL
259 It actually only records the info of the to-be-inserted bp/wp;
260 the actual insertion will happen when threads are resumed. */
176eb98c
MS
261
262static int
802e8e6d
PA
263aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
264 int len, struct raw_breakpoint *bp)
176eb98c
MS
265{
266 int ret;
4ff0d3d8 267 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
268 struct aarch64_debug_reg_state *state
269 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 270
c5e92cca 271 if (show_debug_regs)
176eb98c
MS
272 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
273 (unsigned long) addr, len);
274
802e8e6d
PA
275 /* Determine the type from the raw breakpoint type. */
276 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
277
278 if (targ_type != hw_execute)
39edd165
YQ
279 {
280 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
281 ret = aarch64_handle_watchpoint (targ_type, addr, len,
282 1 /* is_insert */, state);
283 else
284 ret = -1;
285 }
176eb98c 286 else
8d689ee5
YQ
287 {
288 if (len == 3)
289 {
290 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
291 instruction. Set it to 2 to correctly encode length bit
292 mask in hardware/watchpoint control register. */
293 len = 2;
294 }
295 ret = aarch64_handle_breakpoint (targ_type, addr, len,
296 1 /* is_insert */, state);
297 }
176eb98c 298
60a191ed 299 if (show_debug_regs)
88e2cf7e
YQ
300 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
301 targ_type);
176eb98c
MS
302
303 return ret;
304}
305
421530db 306/* Implementation of linux_target_ops method "remove_point".
176eb98c 307
421530db
PL
308 It actually only records the info of the to-be-removed bp/wp,
309 the actual removal will be done when threads are resumed. */
176eb98c
MS
310
311static int
802e8e6d
PA
312aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
313 int len, struct raw_breakpoint *bp)
176eb98c
MS
314{
315 int ret;
4ff0d3d8 316 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
317 struct aarch64_debug_reg_state *state
318 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 319
c5e92cca 320 if (show_debug_regs)
176eb98c
MS
321 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
322 (unsigned long) addr, len);
323
802e8e6d
PA
324 /* Determine the type from the raw breakpoint type. */
325 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
326
327 /* Set up state pointers. */
328 if (targ_type != hw_execute)
329 ret =
c67ca4de
YQ
330 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
331 state);
176eb98c 332 else
8d689ee5
YQ
333 {
334 if (len == 3)
335 {
336 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
337 instruction. Set it to 2 to correctly encode length bit
338 mask in hardware/watchpoint control register. */
339 len = 2;
340 }
341 ret = aarch64_handle_breakpoint (targ_type, addr, len,
342 0 /* is_insert */, state);
343 }
176eb98c 344
60a191ed 345 if (show_debug_regs)
88e2cf7e
YQ
346 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
347 targ_type);
176eb98c
MS
348
349 return ret;
350}
351
421530db 352/* Implementation of linux_target_ops method "stopped_data_address". */
176eb98c
MS
353
354static CORE_ADDR
355aarch64_stopped_data_address (void)
356{
357 siginfo_t siginfo;
358 int pid, i;
359 struct aarch64_debug_reg_state *state;
360
0bfdf32f 361 pid = lwpid_of (current_thread);
176eb98c
MS
362
363 /* Get the siginfo. */
364 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
365 return (CORE_ADDR) 0;
366
367 /* Need to be a hardware breakpoint/watchpoint trap. */
368 if (siginfo.si_signo != SIGTRAP
369 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
370 return (CORE_ADDR) 0;
371
372 /* Check if the address matches any watched address. */
88e2cf7e 373 state = aarch64_get_debug_reg_state (pid_of (current_thread));
176eb98c
MS
374 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
375 {
a3b60e45
JK
376 const unsigned int offset
377 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
176eb98c
MS
378 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
379 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
a3b60e45
JK
380 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
381 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
382 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
383
176eb98c
MS
384 if (state->dr_ref_count_wp[i]
385 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
a3b60e45 386 && addr_trap >= addr_watch_aligned
176eb98c 387 && addr_trap < addr_watch + len)
a3b60e45
JK
388 {
389 /* ADDR_TRAP reports the first address of the memory range
390 accessed by the CPU, regardless of what was the memory
391 range watched. Thus, a large CPU access that straddles
392 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
393 ADDR_TRAP that is lower than the
394 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
395
396 addr: | 4 | 5 | 6 | 7 | 8 |
397 |---- range watched ----|
398 |----------- range accessed ------------|
399
400 In this case, ADDR_TRAP will be 4.
401
402 To match a watchpoint known to GDB core, we must never
403 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
404 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
405 positive on kernels older than 4.10. See PR
406 external/20207. */
407 return addr_orig;
408 }
176eb98c
MS
409 }
410
411 return (CORE_ADDR) 0;
412}
413
421530db 414/* Implementation of linux_target_ops method "stopped_by_watchpoint". */
176eb98c
MS
415
416static int
417aarch64_stopped_by_watchpoint (void)
418{
419 if (aarch64_stopped_data_address () != 0)
420 return 1;
421 else
422 return 0;
423}
424
425/* Fetch the thread-local storage pointer for libthread_db. */
426
427ps_err_e
754653a7 428ps_get_thread_area (struct ps_prochandle *ph,
176eb98c
MS
429 lwpid_t lwpid, int idx, void **base)
430{
a0cc84cd
YQ
431 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
432 is_64bit_tdesc ());
176eb98c
MS
433}
434
ade90bde
YQ
435/* Implementation of linux_target_ops method "siginfo_fixup". */
436
437static int
8adce034 438aarch64_linux_siginfo_fixup (siginfo_t *native, gdb_byte *inf, int direction)
ade90bde
YQ
439{
440 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
441 if (!is_64bit_tdesc ())
442 {
443 if (direction == 0)
444 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
445 native);
446 else
447 aarch64_siginfo_from_compat_siginfo (native,
448 (struct compat_siginfo *) inf);
449
450 return 1;
451 }
452
453 return 0;
454}
455
04ec7890 456/* Implementation of linux_target_ops method "new_process". */
176eb98c
MS
457
458static struct arch_process_info *
459aarch64_linux_new_process (void)
460{
8d749320 461 struct arch_process_info *info = XCNEW (struct arch_process_info);
176eb98c
MS
462
463 aarch64_init_debug_reg_state (&info->debug_reg_state);
464
465 return info;
466}
467
04ec7890
SM
468/* Implementation of linux_target_ops method "delete_process". */
469
470static void
471aarch64_linux_delete_process (struct arch_process_info *info)
472{
473 xfree (info);
474}
475
421530db
PL
476/* Implementation of linux_target_ops method "linux_new_fork". */
477
3a8a0396
DB
478static void
479aarch64_linux_new_fork (struct process_info *parent,
480 struct process_info *child)
481{
482 /* These are allocated by linux_add_process. */
61a7418c
DB
483 gdb_assert (parent->priv != NULL
484 && parent->priv->arch_private != NULL);
485 gdb_assert (child->priv != NULL
486 && child->priv->arch_private != NULL);
3a8a0396
DB
487
488 /* Linux kernel before 2.6.33 commit
489 72f674d203cd230426437cdcf7dd6f681dad8b0d
490 will inherit hardware debug registers from parent
491 on fork/vfork/clone. Newer Linux kernels create such tasks with
492 zeroed debug registers.
493
494 GDB core assumes the child inherits the watchpoints/hw
495 breakpoints of the parent, and will remove them all from the
496 forked off process. Copy the debug registers mirrors into the
497 new process so that all breakpoints and watchpoints can be
498 removed together. The debug registers mirror will become zeroed
499 in the end before detaching the forked off process, thus making
500 this compatible with older Linux kernels too. */
501
61a7418c 502 *child->priv->arch_private = *parent->priv->arch_private;
3a8a0396
DB
503}
504
ee4fbcfa
AH
505/* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
506#define AARCH64_HWCAP_PACA (1 << 30)
507
508/* Fetch the AT_HWCAP entry from the auxv vector. */
509
510static bool
511aarch64_get_hwcap (unsigned long *valp)
512{
513 unsigned char *data = (unsigned char *) alloca (16);
514 int offset = 0;
515
516 while ((*the_target->read_auxv) (offset, data, 16) == 16)
517 {
518 unsigned long *data_p = (unsigned long *)data;
519 if (data_p[0] == AT_HWCAP)
520 {
521 *valp = data_p[1];
522 return true;
523 }
524
525 offset += 16;
526 }
527
528 *valp = 0;
529 return false;
530}
531
d6d7ce56 532/* Implementation of linux_target_ops method "arch_setup". */
3b53ae99 533
d6d7ce56
AH
534static void
535aarch64_arch_setup (void)
3b53ae99
YQ
536{
537 unsigned int machine;
538 int is_elf64;
539 int tid;
540
541 tid = lwpid_of (current_thread);
542
543 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
544
545 if (is_elf64)
fefa175e
AH
546 {
547 uint64_t vq = aarch64_sve_get_vq (tid);
ee4fbcfa
AH
548 unsigned long hwcap = 0;
549 bool pauth_p = aarch64_get_hwcap (&hwcap) && (hwcap & AARCH64_HWCAP_PACA);
550
551 current_process ()->tdesc = aarch64_linux_read_description (vq, pauth_p);
fefa175e 552 }
3b53ae99 553 else
d6d7ce56 554 current_process ()->tdesc = tdesc_arm_with_neon;
176eb98c 555
af1b22f3 556 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
176eb98c
MS
557}
558
02895270
AH
559/* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
560
561static void
562aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
563{
564 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
565}
566
567/* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
568
569static void
570aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
571{
572 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
573}
574
3aee8918 575static struct regset_info aarch64_regsets[] =
176eb98c
MS
576{
577 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
578 sizeof (struct user_pt_regs), GENERAL_REGS,
579 aarch64_fill_gregset, aarch64_store_gregset },
580 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
581 sizeof (struct user_fpsimd_state), FP_REGS,
582 aarch64_fill_fpregset, aarch64_store_fpregset
583 },
1ef53e6b
AH
584 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
585 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
586 NULL, aarch64_store_pauthregset },
50bc912a 587 NULL_REGSET
176eb98c
MS
588};
589
3aee8918
PA
590static struct regsets_info aarch64_regsets_info =
591 {
592 aarch64_regsets, /* regsets */
593 0, /* num_regsets */
594 NULL, /* disabled_regsets */
595 };
596
3b53ae99 597static struct regs_info regs_info_aarch64 =
3aee8918
PA
598 {
599 NULL, /* regset_bitmap */
c2d65f38 600 NULL, /* usrregs */
3aee8918
PA
601 &aarch64_regsets_info,
602 };
603
02895270
AH
604static struct regset_info aarch64_sve_regsets[] =
605{
606 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
607 sizeof (struct user_pt_regs), GENERAL_REGS,
608 aarch64_fill_gregset, aarch64_store_gregset },
609 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
610 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
611 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
612 },
1ef53e6b
AH
613 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
614 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
615 NULL, aarch64_store_pauthregset },
02895270
AH
616 NULL_REGSET
617};
618
619static struct regsets_info aarch64_sve_regsets_info =
620 {
621 aarch64_sve_regsets, /* regsets. */
622 0, /* num_regsets. */
623 NULL, /* disabled_regsets. */
624 };
625
626static struct regs_info regs_info_aarch64_sve =
627 {
628 NULL, /* regset_bitmap. */
629 NULL, /* usrregs. */
630 &aarch64_sve_regsets_info,
631 };
632
421530db
PL
633/* Implementation of linux_target_ops method "regs_info". */
634
3aee8918
PA
635static const struct regs_info *
636aarch64_regs_info (void)
637{
02895270 638 if (!is_64bit_tdesc ())
3b53ae99 639 return &regs_info_aarch32;
02895270
AH
640
641 if (is_sve_tdesc ())
642 return &regs_info_aarch64_sve;
643
644 return &regs_info_aarch64;
3aee8918
PA
645}
646
7671bf47
PL
647/* Implementation of linux_target_ops method "supports_tracepoints". */
648
649static int
650aarch64_supports_tracepoints (void)
651{
524b57e6
YQ
652 if (current_thread == NULL)
653 return 1;
654 else
655 {
656 /* We don't support tracepoints on aarch32 now. */
657 return is_64bit_tdesc ();
658 }
7671bf47
PL
659}
660
bb903df0
PL
661/* Implementation of linux_target_ops method "get_thread_area". */
662
663static int
664aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
665{
666 struct iovec iovec;
667 uint64_t reg;
668
669 iovec.iov_base = &reg;
670 iovec.iov_len = sizeof (reg);
671
672 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
673 return -1;
674
675 *addrp = reg;
676
677 return 0;
678}
679
061fc021
YQ
680/* Implementation of linux_target_ops method "get_syscall_trapinfo". */
681
682static void
683aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
684{
685 int use_64bit = register_size (regcache->tdesc, 0) == 8;
686
687 if (use_64bit)
688 {
689 long l_sysno;
690
691 collect_register_by_name (regcache, "x8", &l_sysno);
692 *sysno = (int) l_sysno;
693 }
694 else
695 collect_register_by_name (regcache, "r7", sysno);
696}
697
afbe19f8
PL
698/* List of condition codes that we need. */
699
700enum aarch64_condition_codes
701{
702 EQ = 0x0,
703 NE = 0x1,
704 LO = 0x3,
705 GE = 0xa,
706 LT = 0xb,
707 GT = 0xc,
708 LE = 0xd,
bb903df0
PL
709};
710
6c1c9a8b
YQ
711enum aarch64_operand_type
712{
713 OPERAND_IMMEDIATE,
714 OPERAND_REGISTER,
715};
716
bb903df0
PL
717/* Representation of an operand. At this time, it only supports register
718 and immediate types. */
719
720struct aarch64_operand
721{
722 /* Type of the operand. */
6c1c9a8b
YQ
723 enum aarch64_operand_type type;
724
bb903df0
PL
725 /* Value of the operand according to the type. */
726 union
727 {
728 uint32_t imm;
729 struct aarch64_register reg;
730 };
731};
732
733/* List of registers that we are currently using, we can add more here as
734 we need to use them. */
735
736/* General purpose scratch registers (64 bit). */
737static const struct aarch64_register x0 = { 0, 1 };
738static const struct aarch64_register x1 = { 1, 1 };
739static const struct aarch64_register x2 = { 2, 1 };
740static const struct aarch64_register x3 = { 3, 1 };
741static const struct aarch64_register x4 = { 4, 1 };
742
743/* General purpose scratch registers (32 bit). */
afbe19f8 744static const struct aarch64_register w0 = { 0, 0 };
bb903df0
PL
745static const struct aarch64_register w2 = { 2, 0 };
746
747/* Intra-procedure scratch registers. */
748static const struct aarch64_register ip0 = { 16, 1 };
749
750/* Special purpose registers. */
afbe19f8
PL
751static const struct aarch64_register fp = { 29, 1 };
752static const struct aarch64_register lr = { 30, 1 };
bb903df0
PL
753static const struct aarch64_register sp = { 31, 1 };
754static const struct aarch64_register xzr = { 31, 1 };
755
756/* Dynamically allocate a new register. If we know the register
757 statically, we should make it a global as above instead of using this
758 helper function. */
759
760static struct aarch64_register
761aarch64_register (unsigned num, int is64)
762{
763 return (struct aarch64_register) { num, is64 };
764}
765
766/* Helper function to create a register operand, for instructions with
767 different types of operands.
768
769 For example:
770 p += emit_mov (p, x0, register_operand (x1)); */
771
772static struct aarch64_operand
773register_operand (struct aarch64_register reg)
774{
775 struct aarch64_operand operand;
776
777 operand.type = OPERAND_REGISTER;
778 operand.reg = reg;
779
780 return operand;
781}
782
783/* Helper function to create an immediate operand, for instructions with
784 different types of operands.
785
786 For example:
787 p += emit_mov (p, x0, immediate_operand (12)); */
788
789static struct aarch64_operand
790immediate_operand (uint32_t imm)
791{
792 struct aarch64_operand operand;
793
794 operand.type = OPERAND_IMMEDIATE;
795 operand.imm = imm;
796
797 return operand;
798}
799
bb903df0
PL
800/* Helper function to create an offset memory operand.
801
802 For example:
803 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
804
805static struct aarch64_memory_operand
806offset_memory_operand (int32_t offset)
807{
808 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
809}
810
811/* Helper function to create a pre-index memory operand.
812
813 For example:
814 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
815
816static struct aarch64_memory_operand
817preindex_memory_operand (int32_t index)
818{
819 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
820}
821
afbe19f8
PL
822/* Helper function to create a post-index memory operand.
823
824 For example:
825 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
826
827static struct aarch64_memory_operand
828postindex_memory_operand (int32_t index)
829{
830 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
831}
832
bb903df0
PL
833/* System control registers. These special registers can be written and
834 read with the MRS and MSR instructions.
835
836 - NZCV: Condition flags. GDB refers to this register under the CPSR
837 name.
838 - FPSR: Floating-point status register.
839 - FPCR: Floating-point control registers.
840 - TPIDR_EL0: Software thread ID register. */
841
842enum aarch64_system_control_registers
843{
844 /* op0 op1 crn crm op2 */
845 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
846 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
847 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
848 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
849};
850
bb903df0
PL
851/* Write a BLR instruction into *BUF.
852
853 BLR rn
854
855 RN is the register to branch to. */
856
857static int
858emit_blr (uint32_t *buf, struct aarch64_register rn)
859{
e1c587c3 860 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
bb903df0
PL
861}
862
afbe19f8 863/* Write a RET instruction into *BUF.
bb903df0 864
afbe19f8 865 RET xn
bb903df0 866
afbe19f8 867 RN is the register to branch to. */
bb903df0
PL
868
869static int
afbe19f8
PL
870emit_ret (uint32_t *buf, struct aarch64_register rn)
871{
e1c587c3 872 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
afbe19f8
PL
873}
874
875static int
876emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
877 struct aarch64_register rt,
878 struct aarch64_register rt2,
879 struct aarch64_register rn,
880 struct aarch64_memory_operand operand)
bb903df0
PL
881{
882 uint32_t opc;
883 uint32_t pre_index;
884 uint32_t write_back;
885
886 if (rt.is64)
887 opc = ENCODE (2, 2, 30);
888 else
889 opc = ENCODE (0, 2, 30);
890
891 switch (operand.type)
892 {
893 case MEMORY_OPERAND_OFFSET:
894 {
895 pre_index = ENCODE (1, 1, 24);
896 write_back = ENCODE (0, 1, 23);
897 break;
898 }
afbe19f8
PL
899 case MEMORY_OPERAND_POSTINDEX:
900 {
901 pre_index = ENCODE (0, 1, 24);
902 write_back = ENCODE (1, 1, 23);
903 break;
904 }
bb903df0
PL
905 case MEMORY_OPERAND_PREINDEX:
906 {
907 pre_index = ENCODE (1, 1, 24);
908 write_back = ENCODE (1, 1, 23);
909 break;
910 }
911 default:
912 return 0;
913 }
914
e1c587c3
YQ
915 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
916 | ENCODE (operand.index >> 3, 7, 15)
917 | ENCODE (rt2.num, 5, 10)
918 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
919}
920
afbe19f8
PL
921/* Write a STP instruction into *BUF.
922
923 STP rt, rt2, [rn, #offset]
924 STP rt, rt2, [rn, #index]!
925 STP rt, rt2, [rn], #index
926
927 RT and RT2 are the registers to store.
928 RN is the base address register.
929 OFFSET is the immediate to add to the base address. It is limited to a
930 -512 .. 504 range (7 bits << 3). */
931
932static int
933emit_stp (uint32_t *buf, struct aarch64_register rt,
934 struct aarch64_register rt2, struct aarch64_register rn,
935 struct aarch64_memory_operand operand)
936{
937 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
938}
939
940/* Write a LDP instruction into *BUF.
941
942 LDP rt, rt2, [rn, #offset]
943 LDP rt, rt2, [rn, #index]!
944 LDP rt, rt2, [rn], #index
945
946 RT and RT2 are the registers to store.
947 RN is the base address register.
948 OFFSET is the immediate to add to the base address. It is limited to a
949 -512 .. 504 range (7 bits << 3). */
950
951static int
952emit_ldp (uint32_t *buf, struct aarch64_register rt,
953 struct aarch64_register rt2, struct aarch64_register rn,
954 struct aarch64_memory_operand operand)
955{
956 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
957}
958
bb903df0
PL
959/* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
960
961 LDP qt, qt2, [rn, #offset]
962
963 RT and RT2 are the Q registers to store.
964 RN is the base address register.
965 OFFSET is the immediate to add to the base address. It is limited to
966 -1024 .. 1008 range (7 bits << 4). */
967
968static int
969emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
970 struct aarch64_register rn, int32_t offset)
971{
972 uint32_t opc = ENCODE (2, 2, 30);
973 uint32_t pre_index = ENCODE (1, 1, 24);
974
e1c587c3
YQ
975 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
976 | ENCODE (offset >> 4, 7, 15)
977 | ENCODE (rt2, 5, 10)
978 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
979}
980
981/* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
982
983 STP qt, qt2, [rn, #offset]
984
985 RT and RT2 are the Q registers to store.
986 RN is the base address register.
987 OFFSET is the immediate to add to the base address. It is limited to
988 -1024 .. 1008 range (7 bits << 4). */
989
990static int
991emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
992 struct aarch64_register rn, int32_t offset)
993{
994 uint32_t opc = ENCODE (2, 2, 30);
995 uint32_t pre_index = ENCODE (1, 1, 24);
996
e1c587c3 997 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
b6542f81
YQ
998 | ENCODE (offset >> 4, 7, 15)
999 | ENCODE (rt2, 5, 10)
1000 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1001}
1002
afbe19f8
PL
1003/* Write a LDRH instruction into *BUF.
1004
1005 LDRH wt, [xn, #offset]
1006 LDRH wt, [xn, #index]!
1007 LDRH wt, [xn], #index
1008
1009 RT is the register to store.
1010 RN is the base address register.
1011 OFFSET is the immediate to add to the base address. It is limited to
1012 0 .. 32760 range (12 bits << 3). */
1013
1014static int
1015emit_ldrh (uint32_t *buf, struct aarch64_register rt,
1016 struct aarch64_register rn,
1017 struct aarch64_memory_operand operand)
1018{
1c2e1515 1019 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
afbe19f8
PL
1020}
1021
1022/* Write a LDRB instruction into *BUF.
1023
1024 LDRB wt, [xn, #offset]
1025 LDRB wt, [xn, #index]!
1026 LDRB wt, [xn], #index
1027
1028 RT is the register to store.
1029 RN is the base address register.
1030 OFFSET is the immediate to add to the base address. It is limited to
1031 0 .. 32760 range (12 bits << 3). */
1032
1033static int
1034emit_ldrb (uint32_t *buf, struct aarch64_register rt,
1035 struct aarch64_register rn,
1036 struct aarch64_memory_operand operand)
1037{
1c2e1515 1038 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
afbe19f8
PL
1039}
1040
bb903df0 1041
bb903df0
PL
1042
1043/* Write a STR instruction into *BUF.
1044
1045 STR rt, [rn, #offset]
1046 STR rt, [rn, #index]!
afbe19f8 1047 STR rt, [rn], #index
bb903df0
PL
1048
1049 RT is the register to store.
1050 RN is the base address register.
1051 OFFSET is the immediate to add to the base address. It is limited to
1052 0 .. 32760 range (12 bits << 3). */
1053
1054static int
1055emit_str (uint32_t *buf, struct aarch64_register rt,
1056 struct aarch64_register rn,
1057 struct aarch64_memory_operand operand)
1058{
1c2e1515 1059 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
bb903df0
PL
1060}
1061
1062/* Helper function emitting an exclusive load or store instruction. */
1063
1064static int
1065emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1066 enum aarch64_opcodes opcode,
1067 struct aarch64_register rs,
1068 struct aarch64_register rt,
1069 struct aarch64_register rt2,
1070 struct aarch64_register rn)
1071{
e1c587c3
YQ
1072 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1073 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1074 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
1075}
1076
1077/* Write a LAXR instruction into *BUF.
1078
1079 LDAXR rt, [xn]
1080
1081 RT is the destination register.
1082 RN is the base address register. */
1083
1084static int
1085emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1086 struct aarch64_register rn)
1087{
1088 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1089 xzr, rn);
1090}
1091
1092/* Write a STXR instruction into *BUF.
1093
1094 STXR ws, rt, [xn]
1095
1096 RS is the result register, it indicates if the store succeeded or not.
1097 RT is the destination register.
1098 RN is the base address register. */
1099
1100static int
1101emit_stxr (uint32_t *buf, struct aarch64_register rs,
1102 struct aarch64_register rt, struct aarch64_register rn)
1103{
1104 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1105 xzr, rn);
1106}
1107
1108/* Write a STLR instruction into *BUF.
1109
1110 STLR rt, [xn]
1111
1112 RT is the register to store.
1113 RN is the base address register. */
1114
1115static int
1116emit_stlr (uint32_t *buf, struct aarch64_register rt,
1117 struct aarch64_register rn)
1118{
1119 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1120 xzr, rn);
1121}
1122
1123/* Helper function for data processing instructions with register sources. */
1124
1125static int
231c0592 1126emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
bb903df0
PL
1127 struct aarch64_register rd,
1128 struct aarch64_register rn,
1129 struct aarch64_register rm)
1130{
1131 uint32_t size = ENCODE (rd.is64, 1, 31);
1132
e1c587c3
YQ
1133 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1134 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1135}
1136
1137/* Helper function for data processing instructions taking either a register
1138 or an immediate. */
1139
1140static int
1141emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1142 struct aarch64_register rd,
1143 struct aarch64_register rn,
1144 struct aarch64_operand operand)
1145{
1146 uint32_t size = ENCODE (rd.is64, 1, 31);
1147 /* The opcode is different for register and immediate source operands. */
1148 uint32_t operand_opcode;
1149
1150 if (operand.type == OPERAND_IMMEDIATE)
1151 {
1152 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1153 operand_opcode = ENCODE (8, 4, 25);
1154
e1c587c3
YQ
1155 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1156 | ENCODE (operand.imm, 12, 10)
1157 | ENCODE (rn.num, 5, 5)
1158 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1159 }
1160 else
1161 {
1162 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1163 operand_opcode = ENCODE (5, 4, 25);
1164
1165 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1166 rn, operand.reg);
1167 }
1168}
1169
1170/* Write an ADD instruction into *BUF.
1171
1172 ADD rd, rn, #imm
1173 ADD rd, rn, rm
1174
1175 This function handles both an immediate and register add.
1176
1177 RD is the destination register.
1178 RN is the input register.
1179 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1180 OPERAND_REGISTER. */
1181
1182static int
1183emit_add (uint32_t *buf, struct aarch64_register rd,
1184 struct aarch64_register rn, struct aarch64_operand operand)
1185{
1186 return emit_data_processing (buf, ADD, rd, rn, operand);
1187}
1188
1189/* Write a SUB instruction into *BUF.
1190
1191 SUB rd, rn, #imm
1192 SUB rd, rn, rm
1193
1194 This function handles both an immediate and register sub.
1195
1196 RD is the destination register.
1197 RN is the input register.
1198 IMM is the immediate to substract to RN. */
1199
1200static int
1201emit_sub (uint32_t *buf, struct aarch64_register rd,
1202 struct aarch64_register rn, struct aarch64_operand operand)
1203{
1204 return emit_data_processing (buf, SUB, rd, rn, operand);
1205}
1206
1207/* Write a MOV instruction into *BUF.
1208
1209 MOV rd, #imm
1210 MOV rd, rm
1211
1212 This function handles both a wide immediate move and a register move,
1213 with the condition that the source register is not xzr. xzr and the
1214 stack pointer share the same encoding and this function only supports
1215 the stack pointer.
1216
1217 RD is the destination register.
1218 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1219 OPERAND_REGISTER. */
1220
1221static int
1222emit_mov (uint32_t *buf, struct aarch64_register rd,
1223 struct aarch64_operand operand)
1224{
1225 if (operand.type == OPERAND_IMMEDIATE)
1226 {
1227 uint32_t size = ENCODE (rd.is64, 1, 31);
1228 /* Do not shift the immediate. */
1229 uint32_t shift = ENCODE (0, 2, 21);
1230
e1c587c3
YQ
1231 return aarch64_emit_insn (buf, MOV | size | shift
1232 | ENCODE (operand.imm, 16, 5)
1233 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1234 }
1235 else
1236 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1237}
1238
1239/* Write a MOVK instruction into *BUF.
1240
1241 MOVK rd, #imm, lsl #shift
1242
1243 RD is the destination register.
1244 IMM is the immediate.
1245 SHIFT is the logical shift left to apply to IMM. */
1246
1247static int
7781c06f
YQ
1248emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1249 unsigned shift)
bb903df0
PL
1250{
1251 uint32_t size = ENCODE (rd.is64, 1, 31);
1252
e1c587c3
YQ
1253 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1254 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1255}
1256
1257/* Write instructions into *BUF in order to move ADDR into a register.
1258 ADDR can be a 64-bit value.
1259
1260 This function will emit a series of MOV and MOVK instructions, such as:
1261
1262 MOV xd, #(addr)
1263 MOVK xd, #(addr >> 16), lsl #16
1264 MOVK xd, #(addr >> 32), lsl #32
1265 MOVK xd, #(addr >> 48), lsl #48 */
1266
1267static int
1268emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1269{
1270 uint32_t *p = buf;
1271
1272 /* The MOV (wide immediate) instruction clears to top bits of the
1273 register. */
1274 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1275
1276 if ((addr >> 16) != 0)
1277 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1278 else
1279 return p - buf;
1280
1281 if ((addr >> 32) != 0)
1282 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1283 else
1284 return p - buf;
1285
1286 if ((addr >> 48) != 0)
1287 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1288
1289 return p - buf;
1290}
1291
afbe19f8
PL
1292/* Write a SUBS instruction into *BUF.
1293
1294 SUBS rd, rn, rm
1295
1296 This instruction update the condition flags.
1297
1298 RD is the destination register.
1299 RN and RM are the source registers. */
1300
1301static int
1302emit_subs (uint32_t *buf, struct aarch64_register rd,
1303 struct aarch64_register rn, struct aarch64_operand operand)
1304{
1305 return emit_data_processing (buf, SUBS, rd, rn, operand);
1306}
1307
1308/* Write a CMP instruction into *BUF.
1309
1310 CMP rn, rm
1311
1312 This instruction is an alias of SUBS xzr, rn, rm.
1313
1314 RN and RM are the registers to compare. */
1315
1316static int
1317emit_cmp (uint32_t *buf, struct aarch64_register rn,
1318 struct aarch64_operand operand)
1319{
1320 return emit_subs (buf, xzr, rn, operand);
1321}
1322
1323/* Write a AND instruction into *BUF.
1324
1325 AND rd, rn, rm
1326
1327 RD is the destination register.
1328 RN and RM are the source registers. */
1329
1330static int
1331emit_and (uint32_t *buf, struct aarch64_register rd,
1332 struct aarch64_register rn, struct aarch64_register rm)
1333{
1334 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1335}
1336
1337/* Write a ORR instruction into *BUF.
1338
1339 ORR rd, rn, rm
1340
1341 RD is the destination register.
1342 RN and RM are the source registers. */
1343
1344static int
1345emit_orr (uint32_t *buf, struct aarch64_register rd,
1346 struct aarch64_register rn, struct aarch64_register rm)
1347{
1348 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1349}
1350
1351/* Write a ORN instruction into *BUF.
1352
1353 ORN rd, rn, rm
1354
1355 RD is the destination register.
1356 RN and RM are the source registers. */
1357
1358static int
1359emit_orn (uint32_t *buf, struct aarch64_register rd,
1360 struct aarch64_register rn, struct aarch64_register rm)
1361{
1362 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1363}
1364
1365/* Write a EOR instruction into *BUF.
1366
1367 EOR rd, rn, rm
1368
1369 RD is the destination register.
1370 RN and RM are the source registers. */
1371
1372static int
1373emit_eor (uint32_t *buf, struct aarch64_register rd,
1374 struct aarch64_register rn, struct aarch64_register rm)
1375{
1376 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1377}
1378
1379/* Write a MVN instruction into *BUF.
1380
1381 MVN rd, rm
1382
1383 This is an alias for ORN rd, xzr, rm.
1384
1385 RD is the destination register.
1386 RM is the source register. */
1387
1388static int
1389emit_mvn (uint32_t *buf, struct aarch64_register rd,
1390 struct aarch64_register rm)
1391{
1392 return emit_orn (buf, rd, xzr, rm);
1393}
1394
1395/* Write a LSLV instruction into *BUF.
1396
1397 LSLV rd, rn, rm
1398
1399 RD is the destination register.
1400 RN and RM are the source registers. */
1401
1402static int
1403emit_lslv (uint32_t *buf, struct aarch64_register rd,
1404 struct aarch64_register rn, struct aarch64_register rm)
1405{
1406 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1407}
1408
1409/* Write a LSRV instruction into *BUF.
1410
1411 LSRV rd, rn, rm
1412
1413 RD is the destination register.
1414 RN and RM are the source registers. */
1415
1416static int
1417emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1418 struct aarch64_register rn, struct aarch64_register rm)
1419{
1420 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1421}
1422
1423/* Write a ASRV instruction into *BUF.
1424
1425 ASRV rd, rn, rm
1426
1427 RD is the destination register.
1428 RN and RM are the source registers. */
1429
1430static int
1431emit_asrv (uint32_t *buf, struct aarch64_register rd,
1432 struct aarch64_register rn, struct aarch64_register rm)
1433{
1434 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1435}
1436
1437/* Write a MUL instruction into *BUF.
1438
1439 MUL rd, rn, rm
1440
1441 RD is the destination register.
1442 RN and RM are the source registers. */
1443
1444static int
1445emit_mul (uint32_t *buf, struct aarch64_register rd,
1446 struct aarch64_register rn, struct aarch64_register rm)
1447{
1448 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1449}
1450
bb903df0
PL
1451/* Write a MRS instruction into *BUF. The register size is 64-bit.
1452
1453 MRS xt, system_reg
1454
1455 RT is the destination register.
1456 SYSTEM_REG is special purpose register to read. */
1457
1458static int
1459emit_mrs (uint32_t *buf, struct aarch64_register rt,
1460 enum aarch64_system_control_registers system_reg)
1461{
e1c587c3
YQ
1462 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1463 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1464}
1465
1466/* Write a MSR instruction into *BUF. The register size is 64-bit.
1467
1468 MSR system_reg, xt
1469
1470 SYSTEM_REG is special purpose register to write.
1471 RT is the input register. */
1472
1473static int
1474emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1475 struct aarch64_register rt)
1476{
e1c587c3
YQ
1477 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1478 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1479}
1480
1481/* Write a SEVL instruction into *BUF.
1482
1483 This is a hint instruction telling the hardware to trigger an event. */
1484
1485static int
1486emit_sevl (uint32_t *buf)
1487{
e1c587c3 1488 return aarch64_emit_insn (buf, SEVL);
bb903df0
PL
1489}
1490
1491/* Write a WFE instruction into *BUF.
1492
1493 This is a hint instruction telling the hardware to wait for an event. */
1494
1495static int
1496emit_wfe (uint32_t *buf)
1497{
e1c587c3 1498 return aarch64_emit_insn (buf, WFE);
bb903df0
PL
1499}
1500
afbe19f8
PL
1501/* Write a SBFM instruction into *BUF.
1502
1503 SBFM rd, rn, #immr, #imms
1504
1505 This instruction moves the bits from #immr to #imms into the
1506 destination, sign extending the result.
1507
1508 RD is the destination register.
1509 RN is the source register.
1510 IMMR is the bit number to start at (least significant bit).
1511 IMMS is the bit number to stop at (most significant bit). */
1512
1513static int
1514emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1515 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1516{
1517 uint32_t size = ENCODE (rd.is64, 1, 31);
1518 uint32_t n = ENCODE (rd.is64, 1, 22);
1519
e1c587c3
YQ
1520 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1521 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1522 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1523}
1524
1525/* Write a SBFX instruction into *BUF.
1526
1527 SBFX rd, rn, #lsb, #width
1528
1529 This instruction moves #width bits from #lsb into the destination, sign
1530 extending the result. This is an alias for:
1531
1532 SBFM rd, rn, #lsb, #(lsb + width - 1)
1533
1534 RD is the destination register.
1535 RN is the source register.
1536 LSB is the bit number to start at (least significant bit).
1537 WIDTH is the number of bits to move. */
1538
1539static int
1540emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1541 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1542{
1543 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1544}
1545
1546/* Write a UBFM instruction into *BUF.
1547
1548 UBFM rd, rn, #immr, #imms
1549
1550 This instruction moves the bits from #immr to #imms into the
1551 destination, extending the result with zeros.
1552
1553 RD is the destination register.
1554 RN is the source register.
1555 IMMR is the bit number to start at (least significant bit).
1556 IMMS is the bit number to stop at (most significant bit). */
1557
1558static int
1559emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1560 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1561{
1562 uint32_t size = ENCODE (rd.is64, 1, 31);
1563 uint32_t n = ENCODE (rd.is64, 1, 22);
1564
e1c587c3
YQ
1565 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1566 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1567 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1568}
1569
1570/* Write a UBFX instruction into *BUF.
1571
1572 UBFX rd, rn, #lsb, #width
1573
1574 This instruction moves #width bits from #lsb into the destination,
1575 extending the result with zeros. This is an alias for:
1576
1577 UBFM rd, rn, #lsb, #(lsb + width - 1)
1578
1579 RD is the destination register.
1580 RN is the source register.
1581 LSB is the bit number to start at (least significant bit).
1582 WIDTH is the number of bits to move. */
1583
1584static int
1585emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1586 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1587{
1588 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1589}
1590
1591/* Write a CSINC instruction into *BUF.
1592
1593 CSINC rd, rn, rm, cond
1594
1595 This instruction conditionally increments rn or rm and places the result
1596 in rd. rn is chosen is the condition is true.
1597
1598 RD is the destination register.
1599 RN and RM are the source registers.
1600 COND is the encoded condition. */
1601
1602static int
1603emit_csinc (uint32_t *buf, struct aarch64_register rd,
1604 struct aarch64_register rn, struct aarch64_register rm,
1605 unsigned cond)
1606{
1607 uint32_t size = ENCODE (rd.is64, 1, 31);
1608
e1c587c3
YQ
1609 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1610 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1611 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1612}
1613
1614/* Write a CSET instruction into *BUF.
1615
1616 CSET rd, cond
1617
1618 This instruction conditionally write 1 or 0 in the destination register.
1619 1 is written if the condition is true. This is an alias for:
1620
1621 CSINC rd, xzr, xzr, !cond
1622
1623 Note that the condition needs to be inverted.
1624
1625 RD is the destination register.
1626 RN and RM are the source registers.
1627 COND is the encoded condition. */
1628
1629static int
1630emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1631{
1632 /* The least significant bit of the condition needs toggling in order to
1633 invert it. */
1634 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1635}
1636
bb903df0
PL
1637/* Write LEN instructions from BUF into the inferior memory at *TO.
1638
1639 Note instructions are always little endian on AArch64, unlike data. */
1640
1641static void
1642append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1643{
1644 size_t byte_len = len * sizeof (uint32_t);
1645#if (__BYTE_ORDER == __BIG_ENDIAN)
cb93dc7f 1646 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
bb903df0
PL
1647 size_t i;
1648
1649 for (i = 0; i < len; i++)
1650 le_buf[i] = htole32 (buf[i]);
1651
1652 write_inferior_memory (*to, (const unsigned char *) le_buf, byte_len);
1653
1654 xfree (le_buf);
1655#else
1656 write_inferior_memory (*to, (const unsigned char *) buf, byte_len);
1657#endif
1658
1659 *to += byte_len;
1660}
1661
0badd99f
YQ
1662/* Sub-class of struct aarch64_insn_data, store information of
1663 instruction relocation for fast tracepoint. Visitor can
1664 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1665 the relocated instructions in buffer pointed by INSN_PTR. */
bb903df0 1666
0badd99f
YQ
1667struct aarch64_insn_relocation_data
1668{
1669 struct aarch64_insn_data base;
1670
1671 /* The new address the instruction is relocated to. */
1672 CORE_ADDR new_addr;
1673 /* Pointer to the buffer of relocated instruction(s). */
1674 uint32_t *insn_ptr;
1675};
1676
1677/* Implementation of aarch64_insn_visitor method "b". */
1678
1679static void
1680aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1681 struct aarch64_insn_data *data)
1682{
1683 struct aarch64_insn_relocation_data *insn_reloc
1684 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1685 int64_t new_offset
0badd99f
YQ
1686 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1687
1688 if (can_encode_int32 (new_offset, 28))
1689 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1690}
1691
1692/* Implementation of aarch64_insn_visitor method "b_cond". */
1693
1694static void
1695aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1696 struct aarch64_insn_data *data)
1697{
1698 struct aarch64_insn_relocation_data *insn_reloc
1699 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1700 int64_t new_offset
0badd99f
YQ
1701 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1702
1703 if (can_encode_int32 (new_offset, 21))
1704 {
1705 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1706 new_offset);
bb903df0 1707 }
0badd99f 1708 else if (can_encode_int32 (new_offset, 28))
bb903df0 1709 {
0badd99f
YQ
1710 /* The offset is out of range for a conditional branch
1711 instruction but not for a unconditional branch. We can use
1712 the following instructions instead:
bb903df0 1713
0badd99f
YQ
1714 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1715 B NOT_TAKEN ; Else jump over TAKEN and continue.
1716 TAKEN:
1717 B #(offset - 8)
1718 NOT_TAKEN:
1719
1720 */
1721
1722 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1723 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1724 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
bb903df0 1725 }
0badd99f 1726}
bb903df0 1727
0badd99f
YQ
1728/* Implementation of aarch64_insn_visitor method "cb". */
1729
1730static void
1731aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1732 const unsigned rn, int is64,
1733 struct aarch64_insn_data *data)
1734{
1735 struct aarch64_insn_relocation_data *insn_reloc
1736 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1737 int64_t new_offset
0badd99f
YQ
1738 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1739
1740 if (can_encode_int32 (new_offset, 21))
1741 {
1742 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1743 aarch64_register (rn, is64), new_offset);
bb903df0 1744 }
0badd99f 1745 else if (can_encode_int32 (new_offset, 28))
bb903df0 1746 {
0badd99f
YQ
1747 /* The offset is out of range for a compare and branch
1748 instruction but not for a unconditional branch. We can use
1749 the following instructions instead:
1750
1751 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1752 B NOT_TAKEN ; Else jump over TAKEN and continue.
1753 TAKEN:
1754 B #(offset - 8)
1755 NOT_TAKEN:
1756
1757 */
1758 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1759 aarch64_register (rn, is64), 8);
1760 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1761 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1762 }
1763}
bb903df0 1764
0badd99f 1765/* Implementation of aarch64_insn_visitor method "tb". */
bb903df0 1766
0badd99f
YQ
1767static void
1768aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1769 const unsigned rt, unsigned bit,
1770 struct aarch64_insn_data *data)
1771{
1772 struct aarch64_insn_relocation_data *insn_reloc
1773 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1774 int64_t new_offset
0badd99f
YQ
1775 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1776
1777 if (can_encode_int32 (new_offset, 16))
1778 {
1779 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1780 aarch64_register (rt, 1), new_offset);
bb903df0 1781 }
0badd99f 1782 else if (can_encode_int32 (new_offset, 28))
bb903df0 1783 {
0badd99f
YQ
1784 /* The offset is out of range for a test bit and branch
1785 instruction but not for a unconditional branch. We can use
1786 the following instructions instead:
1787
1788 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1789 B NOT_TAKEN ; Else jump over TAKEN and continue.
1790 TAKEN:
1791 B #(offset - 8)
1792 NOT_TAKEN:
1793
1794 */
1795 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1796 aarch64_register (rt, 1), 8);
1797 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1798 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1799 new_offset - 8);
1800 }
1801}
bb903df0 1802
0badd99f 1803/* Implementation of aarch64_insn_visitor method "adr". */
bb903df0 1804
0badd99f
YQ
1805static void
1806aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1807 const int is_adrp,
1808 struct aarch64_insn_data *data)
1809{
1810 struct aarch64_insn_relocation_data *insn_reloc
1811 = (struct aarch64_insn_relocation_data *) data;
1812 /* We know exactly the address the ADR{P,} instruction will compute.
1813 We can just write it to the destination register. */
1814 CORE_ADDR address = data->insn_addr + offset;
bb903df0 1815
0badd99f
YQ
1816 if (is_adrp)
1817 {
1818 /* Clear the lower 12 bits of the offset to get the 4K page. */
1819 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1820 aarch64_register (rd, 1),
1821 address & ~0xfff);
1822 }
1823 else
1824 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1825 aarch64_register (rd, 1), address);
1826}
bb903df0 1827
0badd99f 1828/* Implementation of aarch64_insn_visitor method "ldr_literal". */
bb903df0 1829
0badd99f
YQ
1830static void
1831aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1832 const unsigned rt, const int is64,
1833 struct aarch64_insn_data *data)
1834{
1835 struct aarch64_insn_relocation_data *insn_reloc
1836 = (struct aarch64_insn_relocation_data *) data;
1837 CORE_ADDR address = data->insn_addr + offset;
1838
1839 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1840 aarch64_register (rt, 1), address);
1841
1842 /* We know exactly what address to load from, and what register we
1843 can use:
1844
1845 MOV xd, #(oldloc + offset)
1846 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1847 ...
1848
1849 LDR xd, [xd] ; or LDRSW xd, [xd]
1850
1851 */
1852
1853 if (is_sw)
1854 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1855 aarch64_register (rt, 1),
1856 aarch64_register (rt, 1),
1857 offset_memory_operand (0));
bb903df0 1858 else
0badd99f
YQ
1859 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1860 aarch64_register (rt, is64),
1861 aarch64_register (rt, 1),
1862 offset_memory_operand (0));
1863}
1864
1865/* Implementation of aarch64_insn_visitor method "others". */
1866
1867static void
1868aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1869 struct aarch64_insn_data *data)
1870{
1871 struct aarch64_insn_relocation_data *insn_reloc
1872 = (struct aarch64_insn_relocation_data *) data;
bb903df0 1873
0badd99f
YQ
1874 /* The instruction is not PC relative. Just re-emit it at the new
1875 location. */
e1c587c3 1876 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
0badd99f
YQ
1877}
1878
1879static const struct aarch64_insn_visitor visitor =
1880{
1881 aarch64_ftrace_insn_reloc_b,
1882 aarch64_ftrace_insn_reloc_b_cond,
1883 aarch64_ftrace_insn_reloc_cb,
1884 aarch64_ftrace_insn_reloc_tb,
1885 aarch64_ftrace_insn_reloc_adr,
1886 aarch64_ftrace_insn_reloc_ldr_literal,
1887 aarch64_ftrace_insn_reloc_others,
1888};
1889
bb903df0
PL
1890/* Implementation of linux_target_ops method
1891 "install_fast_tracepoint_jump_pad". */
1892
1893static int
1894aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1895 CORE_ADDR tpaddr,
1896 CORE_ADDR collector,
1897 CORE_ADDR lockaddr,
1898 ULONGEST orig_size,
1899 CORE_ADDR *jump_entry,
1900 CORE_ADDR *trampoline,
1901 ULONGEST *trampoline_size,
1902 unsigned char *jjump_pad_insn,
1903 ULONGEST *jjump_pad_insn_size,
1904 CORE_ADDR *adjusted_insn_addr,
1905 CORE_ADDR *adjusted_insn_addr_end,
1906 char *err)
1907{
1908 uint32_t buf[256];
1909 uint32_t *p = buf;
2ac09a5b 1910 int64_t offset;
bb903df0 1911 int i;
70b439f0 1912 uint32_t insn;
bb903df0 1913 CORE_ADDR buildaddr = *jump_entry;
0badd99f 1914 struct aarch64_insn_relocation_data insn_data;
bb903df0
PL
1915
1916 /* We need to save the current state on the stack both to restore it
1917 later and to collect register values when the tracepoint is hit.
1918
1919 The saved registers are pushed in a layout that needs to be in sync
1920 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1921 the supply_fast_tracepoint_registers function will fill in the
1922 register cache from a pointer to saved registers on the stack we build
1923 here.
1924
1925 For simplicity, we set the size of each cell on the stack to 16 bytes.
1926 This way one cell can hold any register type, from system registers
1927 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1928 has to be 16 bytes aligned anyway.
1929
1930 Note that the CPSR register does not exist on AArch64. Instead we
1931 can access system bits describing the process state with the
1932 MRS/MSR instructions, namely the condition flags. We save them as
1933 if they are part of a CPSR register because that's how GDB
1934 interprets these system bits. At the moment, only the condition
1935 flags are saved in CPSR (NZCV).
1936
1937 Stack layout, each cell is 16 bytes (descending):
1938
1939 High *-------- SIMD&FP registers from 31 down to 0. --------*
1940 | q31 |
1941 . .
1942 . . 32 cells
1943 . .
1944 | q0 |
1945 *---- General purpose registers from 30 down to 0. ----*
1946 | x30 |
1947 . .
1948 . . 31 cells
1949 . .
1950 | x0 |
1951 *------------- Special purpose registers. -------------*
1952 | SP |
1953 | PC |
1954 | CPSR (NZCV) | 5 cells
1955 | FPSR |
1956 | FPCR | <- SP + 16
1957 *------------- collecting_t object --------------------*
1958 | TPIDR_EL0 | struct tracepoint * |
1959 Low *------------------------------------------------------*
1960
1961 After this stack is set up, we issue a call to the collector, passing
1962 it the saved registers at (SP + 16). */
1963
1964 /* Push SIMD&FP registers on the stack:
1965
1966 SUB sp, sp, #(32 * 16)
1967
1968 STP q30, q31, [sp, #(30 * 16)]
1969 ...
1970 STP q0, q1, [sp]
1971
1972 */
1973 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
1974 for (i = 30; i >= 0; i -= 2)
1975 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
1976
1977 /* Push general puspose registers on the stack. Note that we do not need
1978 to push x31 as it represents the xzr register and not the stack
1979 pointer in a STR instruction.
1980
1981 SUB sp, sp, #(31 * 16)
1982
1983 STR x30, [sp, #(30 * 16)]
1984 ...
1985 STR x0, [sp]
1986
1987 */
1988 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
1989 for (i = 30; i >= 0; i -= 1)
1990 p += emit_str (p, aarch64_register (i, 1), sp,
1991 offset_memory_operand (i * 16));
1992
1993 /* Make space for 5 more cells.
1994
1995 SUB sp, sp, #(5 * 16)
1996
1997 */
1998 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
1999
2000
2001 /* Save SP:
2002
2003 ADD x4, sp, #((32 + 31 + 5) * 16)
2004 STR x4, [sp, #(4 * 16)]
2005
2006 */
2007 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
2008 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
2009
2010 /* Save PC (tracepoint address):
2011
2012 MOV x3, #(tpaddr)
2013 ...
2014
2015 STR x3, [sp, #(3 * 16)]
2016
2017 */
2018
2019 p += emit_mov_addr (p, x3, tpaddr);
2020 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
2021
2022 /* Save CPSR (NZCV), FPSR and FPCR:
2023
2024 MRS x2, nzcv
2025 MRS x1, fpsr
2026 MRS x0, fpcr
2027
2028 STR x2, [sp, #(2 * 16)]
2029 STR x1, [sp, #(1 * 16)]
2030 STR x0, [sp, #(0 * 16)]
2031
2032 */
2033 p += emit_mrs (p, x2, NZCV);
2034 p += emit_mrs (p, x1, FPSR);
2035 p += emit_mrs (p, x0, FPCR);
2036 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2037 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2038 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2039
2040 /* Push the collecting_t object. It consist of the address of the
2041 tracepoint and an ID for the current thread. We get the latter by
2042 reading the tpidr_el0 system register. It corresponds to the
2043 NT_ARM_TLS register accessible with ptrace.
2044
2045 MOV x0, #(tpoint)
2046 ...
2047
2048 MRS x1, tpidr_el0
2049
2050 STP x0, x1, [sp, #-16]!
2051
2052 */
2053
2054 p += emit_mov_addr (p, x0, tpoint);
2055 p += emit_mrs (p, x1, TPIDR_EL0);
2056 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2057
2058 /* Spin-lock:
2059
2060 The shared memory for the lock is at lockaddr. It will hold zero
2061 if no-one is holding the lock, otherwise it contains the address of
2062 the collecting_t object on the stack of the thread which acquired it.
2063
2064 At this stage, the stack pointer points to this thread's collecting_t
2065 object.
2066
2067 We use the following registers:
2068 - x0: Address of the lock.
2069 - x1: Pointer to collecting_t object.
2070 - x2: Scratch register.
2071
2072 MOV x0, #(lockaddr)
2073 ...
2074 MOV x1, sp
2075
2076 ; Trigger an event local to this core. So the following WFE
2077 ; instruction is ignored.
2078 SEVL
2079 again:
2080 ; Wait for an event. The event is triggered by either the SEVL
2081 ; or STLR instructions (store release).
2082 WFE
2083
2084 ; Atomically read at lockaddr. This marks the memory location as
2085 ; exclusive. This instruction also has memory constraints which
2086 ; make sure all previous data reads and writes are done before
2087 ; executing it.
2088 LDAXR x2, [x0]
2089
2090 ; Try again if another thread holds the lock.
2091 CBNZ x2, again
2092
2093 ; We can lock it! Write the address of the collecting_t object.
2094 ; This instruction will fail if the memory location is not marked
2095 ; as exclusive anymore. If it succeeds, it will remove the
2096 ; exclusive mark on the memory location. This way, if another
2097 ; thread executes this instruction before us, we will fail and try
2098 ; all over again.
2099 STXR w2, x1, [x0]
2100 CBNZ w2, again
2101
2102 */
2103
2104 p += emit_mov_addr (p, x0, lockaddr);
2105 p += emit_mov (p, x1, register_operand (sp));
2106
2107 p += emit_sevl (p);
2108 p += emit_wfe (p);
2109 p += emit_ldaxr (p, x2, x0);
2110 p += emit_cb (p, 1, w2, -2 * 4);
2111 p += emit_stxr (p, w2, x1, x0);
2112 p += emit_cb (p, 1, x2, -4 * 4);
2113
2114 /* Call collector (struct tracepoint *, unsigned char *):
2115
2116 MOV x0, #(tpoint)
2117 ...
2118
2119 ; Saved registers start after the collecting_t object.
2120 ADD x1, sp, #16
2121
2122 ; We use an intra-procedure-call scratch register.
2123 MOV ip0, #(collector)
2124 ...
2125
2126 ; And call back to C!
2127 BLR ip0
2128
2129 */
2130
2131 p += emit_mov_addr (p, x0, tpoint);
2132 p += emit_add (p, x1, sp, immediate_operand (16));
2133
2134 p += emit_mov_addr (p, ip0, collector);
2135 p += emit_blr (p, ip0);
2136
2137 /* Release the lock.
2138
2139 MOV x0, #(lockaddr)
2140 ...
2141
2142 ; This instruction is a normal store with memory ordering
2143 ; constraints. Thanks to this we do not have to put a data
2144 ; barrier instruction to make sure all data read and writes are done
2145 ; before this instruction is executed. Furthermore, this instrucion
2146 ; will trigger an event, letting other threads know they can grab
2147 ; the lock.
2148 STLR xzr, [x0]
2149
2150 */
2151 p += emit_mov_addr (p, x0, lockaddr);
2152 p += emit_stlr (p, xzr, x0);
2153
2154 /* Free collecting_t object:
2155
2156 ADD sp, sp, #16
2157
2158 */
2159 p += emit_add (p, sp, sp, immediate_operand (16));
2160
2161 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2162 registers from the stack.
2163
2164 LDR x2, [sp, #(2 * 16)]
2165 LDR x1, [sp, #(1 * 16)]
2166 LDR x0, [sp, #(0 * 16)]
2167
2168 MSR NZCV, x2
2169 MSR FPSR, x1
2170 MSR FPCR, x0
2171
2172 ADD sp, sp #(5 * 16)
2173
2174 */
2175 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2176 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2177 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2178 p += emit_msr (p, NZCV, x2);
2179 p += emit_msr (p, FPSR, x1);
2180 p += emit_msr (p, FPCR, x0);
2181
2182 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2183
2184 /* Pop general purpose registers:
2185
2186 LDR x0, [sp]
2187 ...
2188 LDR x30, [sp, #(30 * 16)]
2189
2190 ADD sp, sp, #(31 * 16)
2191
2192 */
2193 for (i = 0; i <= 30; i += 1)
2194 p += emit_ldr (p, aarch64_register (i, 1), sp,
2195 offset_memory_operand (i * 16));
2196 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2197
2198 /* Pop SIMD&FP registers:
2199
2200 LDP q0, q1, [sp]
2201 ...
2202 LDP q30, q31, [sp, #(30 * 16)]
2203
2204 ADD sp, sp, #(32 * 16)
2205
2206 */
2207 for (i = 0; i <= 30; i += 2)
2208 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2209 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2210
2211 /* Write the code into the inferior memory. */
2212 append_insns (&buildaddr, p - buf, buf);
2213
2214 /* Now emit the relocated instruction. */
2215 *adjusted_insn_addr = buildaddr;
70b439f0 2216 target_read_uint32 (tpaddr, &insn);
0badd99f
YQ
2217
2218 insn_data.base.insn_addr = tpaddr;
2219 insn_data.new_addr = buildaddr;
2220 insn_data.insn_ptr = buf;
2221
2222 aarch64_relocate_instruction (insn, &visitor,
2223 (struct aarch64_insn_data *) &insn_data);
2224
bb903df0 2225 /* We may not have been able to relocate the instruction. */
0badd99f 2226 if (insn_data.insn_ptr == buf)
bb903df0
PL
2227 {
2228 sprintf (err,
2229 "E.Could not relocate instruction from %s to %s.",
2230 core_addr_to_string_nz (tpaddr),
2231 core_addr_to_string_nz (buildaddr));
2232 return 1;
2233 }
dfaffe9d 2234 else
0badd99f 2235 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
dfaffe9d 2236 *adjusted_insn_addr_end = buildaddr;
bb903df0
PL
2237
2238 /* Go back to the start of the buffer. */
2239 p = buf;
2240
2241 /* Emit a branch back from the jump pad. */
2242 offset = (tpaddr + orig_size - buildaddr);
2243 if (!can_encode_int32 (offset, 28))
2244 {
2245 sprintf (err,
2246 "E.Jump back from jump pad too far from tracepoint "
2ac09a5b 2247 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2248 offset);
2249 return 1;
2250 }
2251
2252 p += emit_b (p, 0, offset);
2253 append_insns (&buildaddr, p - buf, buf);
2254
2255 /* Give the caller a branch instruction into the jump pad. */
2256 offset = (*jump_entry - tpaddr);
2257 if (!can_encode_int32 (offset, 28))
2258 {
2259 sprintf (err,
2260 "E.Jump pad too far from tracepoint "
2ac09a5b 2261 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2262 offset);
2263 return 1;
2264 }
2265
2266 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2267 *jjump_pad_insn_size = 4;
2268
2269 /* Return the end address of our pad. */
2270 *jump_entry = buildaddr;
2271
2272 return 0;
2273}
2274
afbe19f8
PL
2275/* Helper function writing LEN instructions from START into
2276 current_insn_ptr. */
2277
2278static void
2279emit_ops_insns (const uint32_t *start, int len)
2280{
2281 CORE_ADDR buildaddr = current_insn_ptr;
2282
2283 if (debug_threads)
2284 debug_printf ("Adding %d instrucions at %s\n",
2285 len, paddress (buildaddr));
2286
2287 append_insns (&buildaddr, len, start);
2288 current_insn_ptr = buildaddr;
2289}
2290
2291/* Pop a register from the stack. */
2292
2293static int
2294emit_pop (uint32_t *buf, struct aarch64_register rt)
2295{
2296 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2297}
2298
2299/* Push a register on the stack. */
2300
2301static int
2302emit_push (uint32_t *buf, struct aarch64_register rt)
2303{
2304 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2305}
2306
2307/* Implementation of emit_ops method "emit_prologue". */
2308
2309static void
2310aarch64_emit_prologue (void)
2311{
2312 uint32_t buf[16];
2313 uint32_t *p = buf;
2314
2315 /* This function emit a prologue for the following function prototype:
2316
2317 enum eval_result_type f (unsigned char *regs,
2318 ULONGEST *value);
2319
2320 The first argument is a buffer of raw registers. The second
2321 argument is the result of
2322 evaluating the expression, which will be set to whatever is on top of
2323 the stack at the end.
2324
2325 The stack set up by the prologue is as such:
2326
2327 High *------------------------------------------------------*
2328 | LR |
2329 | FP | <- FP
2330 | x1 (ULONGEST *value) |
2331 | x0 (unsigned char *regs) |
2332 Low *------------------------------------------------------*
2333
2334 As we are implementing a stack machine, each opcode can expand the
2335 stack so we never know how far we are from the data saved by this
2336 prologue. In order to be able refer to value and regs later, we save
2337 the current stack pointer in the frame pointer. This way, it is not
2338 clobbered when calling C functions.
2339
2340 Finally, throughtout every operation, we are using register x0 as the
2341 top of the stack, and x1 as a scratch register. */
2342
2343 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2344 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2345 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2346
2347 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2348
2349
2350 emit_ops_insns (buf, p - buf);
2351}
2352
2353/* Implementation of emit_ops method "emit_epilogue". */
2354
2355static void
2356aarch64_emit_epilogue (void)
2357{
2358 uint32_t buf[16];
2359 uint32_t *p = buf;
2360
2361 /* Store the result of the expression (x0) in *value. */
2362 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2363 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2364 p += emit_str (p, x0, x1, offset_memory_operand (0));
2365
2366 /* Restore the previous state. */
2367 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2368 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2369
2370 /* Return expr_eval_no_error. */
2371 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2372 p += emit_ret (p, lr);
2373
2374 emit_ops_insns (buf, p - buf);
2375}
2376
2377/* Implementation of emit_ops method "emit_add". */
2378
2379static void
2380aarch64_emit_add (void)
2381{
2382 uint32_t buf[16];
2383 uint32_t *p = buf;
2384
2385 p += emit_pop (p, x1);
45e3745e 2386 p += emit_add (p, x0, x1, register_operand (x0));
afbe19f8
PL
2387
2388 emit_ops_insns (buf, p - buf);
2389}
2390
2391/* Implementation of emit_ops method "emit_sub". */
2392
2393static void
2394aarch64_emit_sub (void)
2395{
2396 uint32_t buf[16];
2397 uint32_t *p = buf;
2398
2399 p += emit_pop (p, x1);
45e3745e 2400 p += emit_sub (p, x0, x1, register_operand (x0));
afbe19f8
PL
2401
2402 emit_ops_insns (buf, p - buf);
2403}
2404
2405/* Implementation of emit_ops method "emit_mul". */
2406
2407static void
2408aarch64_emit_mul (void)
2409{
2410 uint32_t buf[16];
2411 uint32_t *p = buf;
2412
2413 p += emit_pop (p, x1);
2414 p += emit_mul (p, x0, x1, x0);
2415
2416 emit_ops_insns (buf, p - buf);
2417}
2418
2419/* Implementation of emit_ops method "emit_lsh". */
2420
2421static void
2422aarch64_emit_lsh (void)
2423{
2424 uint32_t buf[16];
2425 uint32_t *p = buf;
2426
2427 p += emit_pop (p, x1);
2428 p += emit_lslv (p, x0, x1, x0);
2429
2430 emit_ops_insns (buf, p - buf);
2431}
2432
2433/* Implementation of emit_ops method "emit_rsh_signed". */
2434
2435static void
2436aarch64_emit_rsh_signed (void)
2437{
2438 uint32_t buf[16];
2439 uint32_t *p = buf;
2440
2441 p += emit_pop (p, x1);
2442 p += emit_asrv (p, x0, x1, x0);
2443
2444 emit_ops_insns (buf, p - buf);
2445}
2446
2447/* Implementation of emit_ops method "emit_rsh_unsigned". */
2448
2449static void
2450aarch64_emit_rsh_unsigned (void)
2451{
2452 uint32_t buf[16];
2453 uint32_t *p = buf;
2454
2455 p += emit_pop (p, x1);
2456 p += emit_lsrv (p, x0, x1, x0);
2457
2458 emit_ops_insns (buf, p - buf);
2459}
2460
2461/* Implementation of emit_ops method "emit_ext". */
2462
2463static void
2464aarch64_emit_ext (int arg)
2465{
2466 uint32_t buf[16];
2467 uint32_t *p = buf;
2468
2469 p += emit_sbfx (p, x0, x0, 0, arg);
2470
2471 emit_ops_insns (buf, p - buf);
2472}
2473
2474/* Implementation of emit_ops method "emit_log_not". */
2475
2476static void
2477aarch64_emit_log_not (void)
2478{
2479 uint32_t buf[16];
2480 uint32_t *p = buf;
2481
2482 /* If the top of the stack is 0, replace it with 1. Else replace it with
2483 0. */
2484
2485 p += emit_cmp (p, x0, immediate_operand (0));
2486 p += emit_cset (p, x0, EQ);
2487
2488 emit_ops_insns (buf, p - buf);
2489}
2490
2491/* Implementation of emit_ops method "emit_bit_and". */
2492
2493static void
2494aarch64_emit_bit_and (void)
2495{
2496 uint32_t buf[16];
2497 uint32_t *p = buf;
2498
2499 p += emit_pop (p, x1);
2500 p += emit_and (p, x0, x0, x1);
2501
2502 emit_ops_insns (buf, p - buf);
2503}
2504
2505/* Implementation of emit_ops method "emit_bit_or". */
2506
2507static void
2508aarch64_emit_bit_or (void)
2509{
2510 uint32_t buf[16];
2511 uint32_t *p = buf;
2512
2513 p += emit_pop (p, x1);
2514 p += emit_orr (p, x0, x0, x1);
2515
2516 emit_ops_insns (buf, p - buf);
2517}
2518
2519/* Implementation of emit_ops method "emit_bit_xor". */
2520
2521static void
2522aarch64_emit_bit_xor (void)
2523{
2524 uint32_t buf[16];
2525 uint32_t *p = buf;
2526
2527 p += emit_pop (p, x1);
2528 p += emit_eor (p, x0, x0, x1);
2529
2530 emit_ops_insns (buf, p - buf);
2531}
2532
2533/* Implementation of emit_ops method "emit_bit_not". */
2534
2535static void
2536aarch64_emit_bit_not (void)
2537{
2538 uint32_t buf[16];
2539 uint32_t *p = buf;
2540
2541 p += emit_mvn (p, x0, x0);
2542
2543 emit_ops_insns (buf, p - buf);
2544}
2545
2546/* Implementation of emit_ops method "emit_equal". */
2547
2548static void
2549aarch64_emit_equal (void)
2550{
2551 uint32_t buf[16];
2552 uint32_t *p = buf;
2553
2554 p += emit_pop (p, x1);
2555 p += emit_cmp (p, x0, register_operand (x1));
2556 p += emit_cset (p, x0, EQ);
2557
2558 emit_ops_insns (buf, p - buf);
2559}
2560
2561/* Implementation of emit_ops method "emit_less_signed". */
2562
2563static void
2564aarch64_emit_less_signed (void)
2565{
2566 uint32_t buf[16];
2567 uint32_t *p = buf;
2568
2569 p += emit_pop (p, x1);
2570 p += emit_cmp (p, x1, register_operand (x0));
2571 p += emit_cset (p, x0, LT);
2572
2573 emit_ops_insns (buf, p - buf);
2574}
2575
2576/* Implementation of emit_ops method "emit_less_unsigned". */
2577
2578static void
2579aarch64_emit_less_unsigned (void)
2580{
2581 uint32_t buf[16];
2582 uint32_t *p = buf;
2583
2584 p += emit_pop (p, x1);
2585 p += emit_cmp (p, x1, register_operand (x0));
2586 p += emit_cset (p, x0, LO);
2587
2588 emit_ops_insns (buf, p - buf);
2589}
2590
2591/* Implementation of emit_ops method "emit_ref". */
2592
2593static void
2594aarch64_emit_ref (int size)
2595{
2596 uint32_t buf[16];
2597 uint32_t *p = buf;
2598
2599 switch (size)
2600 {
2601 case 1:
2602 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2603 break;
2604 case 2:
2605 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2606 break;
2607 case 4:
2608 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2609 break;
2610 case 8:
2611 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2612 break;
2613 default:
2614 /* Unknown size, bail on compilation. */
2615 emit_error = 1;
2616 break;
2617 }
2618
2619 emit_ops_insns (buf, p - buf);
2620}
2621
2622/* Implementation of emit_ops method "emit_if_goto". */
2623
2624static void
2625aarch64_emit_if_goto (int *offset_p, int *size_p)
2626{
2627 uint32_t buf[16];
2628 uint32_t *p = buf;
2629
2630 /* The Z flag is set or cleared here. */
2631 p += emit_cmp (p, x0, immediate_operand (0));
2632 /* This instruction must not change the Z flag. */
2633 p += emit_pop (p, x0);
2634 /* Branch over the next instruction if x0 == 0. */
2635 p += emit_bcond (p, EQ, 8);
2636
2637 /* The NOP instruction will be patched with an unconditional branch. */
2638 if (offset_p)
2639 *offset_p = (p - buf) * 4;
2640 if (size_p)
2641 *size_p = 4;
2642 p += emit_nop (p);
2643
2644 emit_ops_insns (buf, p - buf);
2645}
2646
2647/* Implementation of emit_ops method "emit_goto". */
2648
2649static void
2650aarch64_emit_goto (int *offset_p, int *size_p)
2651{
2652 uint32_t buf[16];
2653 uint32_t *p = buf;
2654
2655 /* The NOP instruction will be patched with an unconditional branch. */
2656 if (offset_p)
2657 *offset_p = 0;
2658 if (size_p)
2659 *size_p = 4;
2660 p += emit_nop (p);
2661
2662 emit_ops_insns (buf, p - buf);
2663}
2664
2665/* Implementation of emit_ops method "write_goto_address". */
2666
2667void
2668aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2669{
2670 uint32_t insn;
2671
2672 emit_b (&insn, 0, to - from);
2673 append_insns (&from, 1, &insn);
2674}
2675
2676/* Implementation of emit_ops method "emit_const". */
2677
2678static void
2679aarch64_emit_const (LONGEST num)
2680{
2681 uint32_t buf[16];
2682 uint32_t *p = buf;
2683
2684 p += emit_mov_addr (p, x0, num);
2685
2686 emit_ops_insns (buf, p - buf);
2687}
2688
2689/* Implementation of emit_ops method "emit_call". */
2690
2691static void
2692aarch64_emit_call (CORE_ADDR fn)
2693{
2694 uint32_t buf[16];
2695 uint32_t *p = buf;
2696
2697 p += emit_mov_addr (p, ip0, fn);
2698 p += emit_blr (p, ip0);
2699
2700 emit_ops_insns (buf, p - buf);
2701}
2702
2703/* Implementation of emit_ops method "emit_reg". */
2704
2705static void
2706aarch64_emit_reg (int reg)
2707{
2708 uint32_t buf[16];
2709 uint32_t *p = buf;
2710
2711 /* Set x0 to unsigned char *regs. */
2712 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2713 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2714 p += emit_mov (p, x1, immediate_operand (reg));
2715
2716 emit_ops_insns (buf, p - buf);
2717
2718 aarch64_emit_call (get_raw_reg_func_addr ());
2719}
2720
2721/* Implementation of emit_ops method "emit_pop". */
2722
2723static void
2724aarch64_emit_pop (void)
2725{
2726 uint32_t buf[16];
2727 uint32_t *p = buf;
2728
2729 p += emit_pop (p, x0);
2730
2731 emit_ops_insns (buf, p - buf);
2732}
2733
2734/* Implementation of emit_ops method "emit_stack_flush". */
2735
2736static void
2737aarch64_emit_stack_flush (void)
2738{
2739 uint32_t buf[16];
2740 uint32_t *p = buf;
2741
2742 p += emit_push (p, x0);
2743
2744 emit_ops_insns (buf, p - buf);
2745}
2746
2747/* Implementation of emit_ops method "emit_zero_ext". */
2748
2749static void
2750aarch64_emit_zero_ext (int arg)
2751{
2752 uint32_t buf[16];
2753 uint32_t *p = buf;
2754
2755 p += emit_ubfx (p, x0, x0, 0, arg);
2756
2757 emit_ops_insns (buf, p - buf);
2758}
2759
2760/* Implementation of emit_ops method "emit_swap". */
2761
2762static void
2763aarch64_emit_swap (void)
2764{
2765 uint32_t buf[16];
2766 uint32_t *p = buf;
2767
2768 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2769 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2770 p += emit_mov (p, x0, register_operand (x1));
2771
2772 emit_ops_insns (buf, p - buf);
2773}
2774
2775/* Implementation of emit_ops method "emit_stack_adjust". */
2776
2777static void
2778aarch64_emit_stack_adjust (int n)
2779{
2780 /* This is not needed with our design. */
2781 uint32_t buf[16];
2782 uint32_t *p = buf;
2783
2784 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2785
2786 emit_ops_insns (buf, p - buf);
2787}
2788
2789/* Implementation of emit_ops method "emit_int_call_1". */
2790
2791static void
2792aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2793{
2794 uint32_t buf[16];
2795 uint32_t *p = buf;
2796
2797 p += emit_mov (p, x0, immediate_operand (arg1));
2798
2799 emit_ops_insns (buf, p - buf);
2800
2801 aarch64_emit_call (fn);
2802}
2803
2804/* Implementation of emit_ops method "emit_void_call_2". */
2805
2806static void
2807aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2808{
2809 uint32_t buf[16];
2810 uint32_t *p = buf;
2811
2812 /* Push x0 on the stack. */
2813 aarch64_emit_stack_flush ();
2814
2815 /* Setup arguments for the function call:
2816
2817 x0: arg1
2818 x1: top of the stack
2819
2820 MOV x1, x0
2821 MOV x0, #arg1 */
2822
2823 p += emit_mov (p, x1, register_operand (x0));
2824 p += emit_mov (p, x0, immediate_operand (arg1));
2825
2826 emit_ops_insns (buf, p - buf);
2827
2828 aarch64_emit_call (fn);
2829
2830 /* Restore x0. */
2831 aarch64_emit_pop ();
2832}
2833
2834/* Implementation of emit_ops method "emit_eq_goto". */
2835
2836static void
2837aarch64_emit_eq_goto (int *offset_p, int *size_p)
2838{
2839 uint32_t buf[16];
2840 uint32_t *p = buf;
2841
2842 p += emit_pop (p, x1);
2843 p += emit_cmp (p, x1, register_operand (x0));
2844 /* Branch over the next instruction if x0 != x1. */
2845 p += emit_bcond (p, NE, 8);
2846 /* The NOP instruction will be patched with an unconditional branch. */
2847 if (offset_p)
2848 *offset_p = (p - buf) * 4;
2849 if (size_p)
2850 *size_p = 4;
2851 p += emit_nop (p);
2852
2853 emit_ops_insns (buf, p - buf);
2854}
2855
2856/* Implementation of emit_ops method "emit_ne_goto". */
2857
2858static void
2859aarch64_emit_ne_goto (int *offset_p, int *size_p)
2860{
2861 uint32_t buf[16];
2862 uint32_t *p = buf;
2863
2864 p += emit_pop (p, x1);
2865 p += emit_cmp (p, x1, register_operand (x0));
2866 /* Branch over the next instruction if x0 == x1. */
2867 p += emit_bcond (p, EQ, 8);
2868 /* The NOP instruction will be patched with an unconditional branch. */
2869 if (offset_p)
2870 *offset_p = (p - buf) * 4;
2871 if (size_p)
2872 *size_p = 4;
2873 p += emit_nop (p);
2874
2875 emit_ops_insns (buf, p - buf);
2876}
2877
2878/* Implementation of emit_ops method "emit_lt_goto". */
2879
2880static void
2881aarch64_emit_lt_goto (int *offset_p, int *size_p)
2882{
2883 uint32_t buf[16];
2884 uint32_t *p = buf;
2885
2886 p += emit_pop (p, x1);
2887 p += emit_cmp (p, x1, register_operand (x0));
2888 /* Branch over the next instruction if x0 >= x1. */
2889 p += emit_bcond (p, GE, 8);
2890 /* The NOP instruction will be patched with an unconditional branch. */
2891 if (offset_p)
2892 *offset_p = (p - buf) * 4;
2893 if (size_p)
2894 *size_p = 4;
2895 p += emit_nop (p);
2896
2897 emit_ops_insns (buf, p - buf);
2898}
2899
2900/* Implementation of emit_ops method "emit_le_goto". */
2901
2902static void
2903aarch64_emit_le_goto (int *offset_p, int *size_p)
2904{
2905 uint32_t buf[16];
2906 uint32_t *p = buf;
2907
2908 p += emit_pop (p, x1);
2909 p += emit_cmp (p, x1, register_operand (x0));
2910 /* Branch over the next instruction if x0 > x1. */
2911 p += emit_bcond (p, GT, 8);
2912 /* The NOP instruction will be patched with an unconditional branch. */
2913 if (offset_p)
2914 *offset_p = (p - buf) * 4;
2915 if (size_p)
2916 *size_p = 4;
2917 p += emit_nop (p);
2918
2919 emit_ops_insns (buf, p - buf);
2920}
2921
2922/* Implementation of emit_ops method "emit_gt_goto". */
2923
2924static void
2925aarch64_emit_gt_goto (int *offset_p, int *size_p)
2926{
2927 uint32_t buf[16];
2928 uint32_t *p = buf;
2929
2930 p += emit_pop (p, x1);
2931 p += emit_cmp (p, x1, register_operand (x0));
2932 /* Branch over the next instruction if x0 <= x1. */
2933 p += emit_bcond (p, LE, 8);
2934 /* The NOP instruction will be patched with an unconditional branch. */
2935 if (offset_p)
2936 *offset_p = (p - buf) * 4;
2937 if (size_p)
2938 *size_p = 4;
2939 p += emit_nop (p);
2940
2941 emit_ops_insns (buf, p - buf);
2942}
2943
2944/* Implementation of emit_ops method "emit_ge_got". */
2945
2946static void
2947aarch64_emit_ge_got (int *offset_p, int *size_p)
2948{
2949 uint32_t buf[16];
2950 uint32_t *p = buf;
2951
2952 p += emit_pop (p, x1);
2953 p += emit_cmp (p, x1, register_operand (x0));
2954 /* Branch over the next instruction if x0 <= x1. */
2955 p += emit_bcond (p, LT, 8);
2956 /* The NOP instruction will be patched with an unconditional branch. */
2957 if (offset_p)
2958 *offset_p = (p - buf) * 4;
2959 if (size_p)
2960 *size_p = 4;
2961 p += emit_nop (p);
2962
2963 emit_ops_insns (buf, p - buf);
2964}
2965
2966static struct emit_ops aarch64_emit_ops_impl =
2967{
2968 aarch64_emit_prologue,
2969 aarch64_emit_epilogue,
2970 aarch64_emit_add,
2971 aarch64_emit_sub,
2972 aarch64_emit_mul,
2973 aarch64_emit_lsh,
2974 aarch64_emit_rsh_signed,
2975 aarch64_emit_rsh_unsigned,
2976 aarch64_emit_ext,
2977 aarch64_emit_log_not,
2978 aarch64_emit_bit_and,
2979 aarch64_emit_bit_or,
2980 aarch64_emit_bit_xor,
2981 aarch64_emit_bit_not,
2982 aarch64_emit_equal,
2983 aarch64_emit_less_signed,
2984 aarch64_emit_less_unsigned,
2985 aarch64_emit_ref,
2986 aarch64_emit_if_goto,
2987 aarch64_emit_goto,
2988 aarch64_write_goto_address,
2989 aarch64_emit_const,
2990 aarch64_emit_call,
2991 aarch64_emit_reg,
2992 aarch64_emit_pop,
2993 aarch64_emit_stack_flush,
2994 aarch64_emit_zero_ext,
2995 aarch64_emit_swap,
2996 aarch64_emit_stack_adjust,
2997 aarch64_emit_int_call_1,
2998 aarch64_emit_void_call_2,
2999 aarch64_emit_eq_goto,
3000 aarch64_emit_ne_goto,
3001 aarch64_emit_lt_goto,
3002 aarch64_emit_le_goto,
3003 aarch64_emit_gt_goto,
3004 aarch64_emit_ge_got,
3005};
3006
3007/* Implementation of linux_target_ops method "emit_ops". */
3008
3009static struct emit_ops *
3010aarch64_emit_ops (void)
3011{
3012 return &aarch64_emit_ops_impl;
3013}
3014
bb903df0
PL
3015/* Implementation of linux_target_ops method
3016 "get_min_fast_tracepoint_insn_len". */
3017
3018static int
3019aarch64_get_min_fast_tracepoint_insn_len (void)
3020{
3021 return 4;
3022}
3023
d1d0aea1
PL
3024/* Implementation of linux_target_ops method "supports_range_stepping". */
3025
3026static int
3027aarch64_supports_range_stepping (void)
3028{
3029 return 1;
3030}
3031
dd373349
AT
3032/* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
3033
3034static const gdb_byte *
3035aarch64_sw_breakpoint_from_kind (int kind, int *size)
3036{
17b1509a
YQ
3037 if (is_64bit_tdesc ())
3038 {
3039 *size = aarch64_breakpoint_len;
3040 return aarch64_breakpoint;
3041 }
3042 else
3043 return arm_sw_breakpoint_from_kind (kind, size);
3044}
3045
3046/* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
3047
3048static int
3049aarch64_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
3050{
3051 if (is_64bit_tdesc ())
3052 return aarch64_breakpoint_len;
3053 else
3054 return arm_breakpoint_kind_from_pc (pcptr);
3055}
3056
3057/* Implementation of the linux_target_ops method
3058 "breakpoint_kind_from_current_state". */
3059
3060static int
3061aarch64_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
3062{
3063 if (is_64bit_tdesc ())
3064 return aarch64_breakpoint_len;
3065 else
3066 return arm_breakpoint_kind_from_current_state (pcptr);
dd373349
AT
3067}
3068
7d00775e
AT
3069/* Support for hardware single step. */
3070
3071static int
3072aarch64_supports_hardware_single_step (void)
3073{
3074 return 1;
3075}
3076
176eb98c
MS
3077struct linux_target_ops the_low_target =
3078{
3079 aarch64_arch_setup,
3aee8918 3080 aarch64_regs_info,
50138245
AH
3081 NULL, /* cannot_fetch_register */
3082 NULL, /* cannot_store_register */
421530db 3083 NULL, /* fetch_register */
176eb98c
MS
3084 aarch64_get_pc,
3085 aarch64_set_pc,
17b1509a 3086 aarch64_breakpoint_kind_from_pc,
dd373349 3087 aarch64_sw_breakpoint_from_kind,
fa5308bd 3088 NULL, /* get_next_pcs */
421530db 3089 0, /* decr_pc_after_break */
176eb98c 3090 aarch64_breakpoint_at,
802e8e6d 3091 aarch64_supports_z_point_type,
176eb98c
MS
3092 aarch64_insert_point,
3093 aarch64_remove_point,
3094 aarch64_stopped_by_watchpoint,
3095 aarch64_stopped_data_address,
421530db
PL
3096 NULL, /* collect_ptrace_register */
3097 NULL, /* supply_ptrace_register */
ade90bde 3098 aarch64_linux_siginfo_fixup,
176eb98c 3099 aarch64_linux_new_process,
04ec7890 3100 aarch64_linux_delete_process,
176eb98c 3101 aarch64_linux_new_thread,
466eecee 3102 aarch64_linux_delete_thread,
3a8a0396 3103 aarch64_linux_new_fork,
176eb98c 3104 aarch64_linux_prepare_to_resume,
421530db 3105 NULL, /* process_qsupported */
7671bf47 3106 aarch64_supports_tracepoints,
bb903df0
PL
3107 aarch64_get_thread_area,
3108 aarch64_install_fast_tracepoint_jump_pad,
afbe19f8 3109 aarch64_emit_ops,
bb903df0 3110 aarch64_get_min_fast_tracepoint_insn_len,
d1d0aea1 3111 aarch64_supports_range_stepping,
17b1509a 3112 aarch64_breakpoint_kind_from_current_state,
7d00775e 3113 aarch64_supports_hardware_single_step,
061fc021 3114 aarch64_get_syscall_trapinfo,
176eb98c 3115};
3aee8918
PA
3116
3117void
3118initialize_low_arch (void)
3119{
3b53ae99
YQ
3120 initialize_low_arch_aarch32 ();
3121
3aee8918 3122 initialize_regsets_info (&aarch64_regsets_info);
02895270 3123 initialize_regsets_info (&aarch64_sve_regsets_info);
6654d750
AH
3124
3125#if GDB_SELF_TEST
3126 initialize_low_tdesc ();
3127#endif
3aee8918 3128}
This page took 0.72959 seconds and 4 git commands to generate.