gdbserver/linux-low: turn 'get_pc' and 'set_pc' into methods
[deliverable/binutils-gdb.git] / gdbserver / linux-aarch64-low.cc
CommitLineData
176eb98c
MS
1/* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
b811d2c2 4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
176eb98c
MS
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "server.h"
23#include "linux-low.h"
db3cb7cb 24#include "nat/aarch64-linux.h"
554717a3 25#include "nat/aarch64-linux-hw-point.h"
bb903df0 26#include "arch/aarch64-insn.h"
3b53ae99 27#include "linux-aarch32-low.h"
176eb98c 28#include "elf/common.h"
afbe19f8
PL
29#include "ax.h"
30#include "tracepoint.h"
f9d949fb 31#include "debug.h"
176eb98c
MS
32
33#include <signal.h>
34#include <sys/user.h>
5826e159 35#include "nat/gdb_ptrace.h"
e9dae05e 36#include <asm/ptrace.h>
bb903df0
PL
37#include <inttypes.h>
38#include <endian.h>
39#include <sys/uio.h>
176eb98c
MS
40
41#include "gdb_proc_service.h"
cc628f3d 42#include "arch/aarch64.h"
7cc17433 43#include "linux-aarch32-tdesc.h"
d6d7ce56 44#include "linux-aarch64-tdesc.h"
fefa175e 45#include "nat/aarch64-sve-linux-ptrace.h"
02895270 46#include "tdesc.h"
176eb98c 47
176eb98c
MS
48#ifdef HAVE_SYS_REG_H
49#include <sys/reg.h>
50#endif
51
ef0478f6
TBA
52/* Linux target op definitions for the AArch64 architecture. */
53
54class aarch64_target : public linux_process_target
55{
56public:
57
aa8d21c9
TBA
58 const regs_info *get_regs_info () override;
59
797bcff5
TBA
60protected:
61
62 void low_arch_setup () override;
daca57a7
TBA
63
64 bool low_cannot_fetch_register (int regno) override;
65
66 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
67
68 bool low_supports_breakpoints () override;
69
70 CORE_ADDR low_get_pc (regcache *regcache) override;
71
72 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
ef0478f6
TBA
73};
74
75/* The singleton target ops object. */
76
77static aarch64_target the_aarch64_target;
78
daca57a7
TBA
79bool
80aarch64_target::low_cannot_fetch_register (int regno)
81{
82 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
83 "is not implemented by the target");
84}
85
86bool
87aarch64_target::low_cannot_store_register (int regno)
88{
89 gdb_assert_not_reached ("linux target op low_cannot_store_register "
90 "is not implemented by the target");
91}
92
176eb98c
MS
93/* Per-process arch-specific data we want to keep. */
94
95struct arch_process_info
96{
97 /* Hardware breakpoint/watchpoint data.
98 The reason for them to be per-process rather than per-thread is
99 due to the lack of information in the gdbserver environment;
100 gdbserver is not told that whether a requested hardware
101 breakpoint/watchpoint is thread specific or not, so it has to set
102 each hw bp/wp for every thread in the current process. The
103 higher level bp/wp management in gdb will resume a thread if a hw
104 bp/wp trap is not expected for it. Since the hw bp/wp setting is
105 same for each thread, it is reasonable for the data to live here.
106 */
107 struct aarch64_debug_reg_state debug_reg_state;
108};
109
3b53ae99
YQ
110/* Return true if the size of register 0 is 8 byte. */
111
112static int
113is_64bit_tdesc (void)
114{
115 struct regcache *regcache = get_thread_regcache (current_thread, 0);
116
117 return register_size (regcache->tdesc, 0) == 8;
118}
119
02895270
AH
120/* Return true if the regcache contains the number of SVE registers. */
121
122static bool
123is_sve_tdesc (void)
124{
125 struct regcache *regcache = get_thread_regcache (current_thread, 0);
126
6cdd651f 127 return tdesc_contains_feature (regcache->tdesc, "org.gnu.gdb.aarch64.sve");
02895270
AH
128}
129
176eb98c
MS
130static void
131aarch64_fill_gregset (struct regcache *regcache, void *buf)
132{
6a69a054 133 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
176eb98c
MS
134 int i;
135
136 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
137 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
138 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
139 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
140 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
141}
142
143static void
144aarch64_store_gregset (struct regcache *regcache, const void *buf)
145{
6a69a054 146 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
176eb98c
MS
147 int i;
148
149 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
150 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
151 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
152 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
153 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
154}
155
156static void
157aarch64_fill_fpregset (struct regcache *regcache, void *buf)
158{
9caa3311 159 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
176eb98c
MS
160 int i;
161
162 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
163 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
164 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
165 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
166}
167
168static void
169aarch64_store_fpregset (struct regcache *regcache, const void *buf)
170{
9caa3311
YQ
171 const struct user_fpsimd_state *regset
172 = (const struct user_fpsimd_state *) buf;
176eb98c
MS
173 int i;
174
175 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
176 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
177 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
178 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
179}
180
1ef53e6b
AH
181/* Store the pauth registers to regcache. */
182
183static void
184aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
185{
186 uint64_t *pauth_regset = (uint64_t *) buf;
187 int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
188
189 if (pauth_base == 0)
190 return;
191
192 supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
193 &pauth_regset[0]);
194 supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
195 &pauth_regset[1]);
196}
197
bf9ae9d8
TBA
198bool
199aarch64_target::low_supports_breakpoints ()
200{
201 return true;
202}
203
204/* Implementation of linux target ops method "low_get_pc". */
421530db 205
bf9ae9d8
TBA
206CORE_ADDR
207aarch64_target::low_get_pc (regcache *regcache)
176eb98c 208{
8a7e4587 209 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 210 return linux_get_pc_64bit (regcache);
8a7e4587 211 else
a5652c21 212 return linux_get_pc_32bit (regcache);
176eb98c
MS
213}
214
bf9ae9d8 215/* Implementation of linux target ops method "low_set_pc". */
421530db 216
bf9ae9d8
TBA
217void
218aarch64_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
176eb98c 219{
8a7e4587 220 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 221 linux_set_pc_64bit (regcache, pc);
8a7e4587 222 else
a5652c21 223 linux_set_pc_32bit (regcache, pc);
176eb98c
MS
224}
225
176eb98c
MS
226#define aarch64_breakpoint_len 4
227
37d66942
PL
228/* AArch64 BRK software debug mode instruction.
229 This instruction needs to match gdb/aarch64-tdep.c
230 (aarch64_default_breakpoint). */
231static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
176eb98c 232
421530db
PL
233/* Implementation of linux_target_ops method "breakpoint_at". */
234
176eb98c
MS
235static int
236aarch64_breakpoint_at (CORE_ADDR where)
237{
db91f502
YQ
238 if (is_64bit_tdesc ())
239 {
240 gdb_byte insn[aarch64_breakpoint_len];
176eb98c 241
52405d85
TBA
242 the_target->read_memory (where, (unsigned char *) &insn,
243 aarch64_breakpoint_len);
db91f502
YQ
244 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
245 return 1;
176eb98c 246
db91f502
YQ
247 return 0;
248 }
249 else
250 return arm_breakpoint_at (where);
176eb98c
MS
251}
252
176eb98c
MS
253static void
254aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
255{
256 int i;
257
258 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
259 {
260 state->dr_addr_bp[i] = 0;
261 state->dr_ctrl_bp[i] = 0;
262 state->dr_ref_count_bp[i] = 0;
263 }
264
265 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
266 {
267 state->dr_addr_wp[i] = 0;
268 state->dr_ctrl_wp[i] = 0;
269 state->dr_ref_count_wp[i] = 0;
270 }
271}
272
176eb98c
MS
273/* Return the pointer to the debug register state structure in the
274 current process' arch-specific data area. */
275
db3cb7cb 276struct aarch64_debug_reg_state *
88e2cf7e 277aarch64_get_debug_reg_state (pid_t pid)
176eb98c 278{
88e2cf7e 279 struct process_info *proc = find_process_pid (pid);
176eb98c 280
fe978cb0 281 return &proc->priv->arch_private->debug_reg_state;
176eb98c
MS
282}
283
421530db
PL
284/* Implementation of linux_target_ops method "supports_z_point_type". */
285
4ff0d3d8
PA
286static int
287aarch64_supports_z_point_type (char z_type)
288{
289 switch (z_type)
290 {
96c97461 291 case Z_PACKET_SW_BP:
4ff0d3d8
PA
292 case Z_PACKET_HW_BP:
293 case Z_PACKET_WRITE_WP:
294 case Z_PACKET_READ_WP:
295 case Z_PACKET_ACCESS_WP:
296 return 1;
297 default:
4ff0d3d8
PA
298 return 0;
299 }
300}
301
421530db 302/* Implementation of linux_target_ops method "insert_point".
176eb98c 303
421530db
PL
304 It actually only records the info of the to-be-inserted bp/wp;
305 the actual insertion will happen when threads are resumed. */
176eb98c
MS
306
307static int
802e8e6d
PA
308aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
309 int len, struct raw_breakpoint *bp)
176eb98c
MS
310{
311 int ret;
4ff0d3d8 312 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
313 struct aarch64_debug_reg_state *state
314 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 315
c5e92cca 316 if (show_debug_regs)
176eb98c
MS
317 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
318 (unsigned long) addr, len);
319
802e8e6d
PA
320 /* Determine the type from the raw breakpoint type. */
321 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
322
323 if (targ_type != hw_execute)
39edd165
YQ
324 {
325 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
326 ret = aarch64_handle_watchpoint (targ_type, addr, len,
327 1 /* is_insert */, state);
328 else
329 ret = -1;
330 }
176eb98c 331 else
8d689ee5
YQ
332 {
333 if (len == 3)
334 {
335 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
336 instruction. Set it to 2 to correctly encode length bit
337 mask in hardware/watchpoint control register. */
338 len = 2;
339 }
340 ret = aarch64_handle_breakpoint (targ_type, addr, len,
341 1 /* is_insert */, state);
342 }
176eb98c 343
60a191ed 344 if (show_debug_regs)
88e2cf7e
YQ
345 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
346 targ_type);
176eb98c
MS
347
348 return ret;
349}
350
421530db 351/* Implementation of linux_target_ops method "remove_point".
176eb98c 352
421530db
PL
353 It actually only records the info of the to-be-removed bp/wp,
354 the actual removal will be done when threads are resumed. */
176eb98c
MS
355
356static int
802e8e6d
PA
357aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
358 int len, struct raw_breakpoint *bp)
176eb98c
MS
359{
360 int ret;
4ff0d3d8 361 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
362 struct aarch64_debug_reg_state *state
363 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 364
c5e92cca 365 if (show_debug_regs)
176eb98c
MS
366 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
367 (unsigned long) addr, len);
368
802e8e6d
PA
369 /* Determine the type from the raw breakpoint type. */
370 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
371
372 /* Set up state pointers. */
373 if (targ_type != hw_execute)
374 ret =
c67ca4de
YQ
375 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
376 state);
176eb98c 377 else
8d689ee5
YQ
378 {
379 if (len == 3)
380 {
381 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
382 instruction. Set it to 2 to correctly encode length bit
383 mask in hardware/watchpoint control register. */
384 len = 2;
385 }
386 ret = aarch64_handle_breakpoint (targ_type, addr, len,
387 0 /* is_insert */, state);
388 }
176eb98c 389
60a191ed 390 if (show_debug_regs)
88e2cf7e
YQ
391 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
392 targ_type);
176eb98c
MS
393
394 return ret;
395}
396
421530db 397/* Implementation of linux_target_ops method "stopped_data_address". */
176eb98c
MS
398
399static CORE_ADDR
400aarch64_stopped_data_address (void)
401{
402 siginfo_t siginfo;
403 int pid, i;
404 struct aarch64_debug_reg_state *state;
405
0bfdf32f 406 pid = lwpid_of (current_thread);
176eb98c
MS
407
408 /* Get the siginfo. */
409 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
410 return (CORE_ADDR) 0;
411
412 /* Need to be a hardware breakpoint/watchpoint trap. */
413 if (siginfo.si_signo != SIGTRAP
414 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
415 return (CORE_ADDR) 0;
416
417 /* Check if the address matches any watched address. */
88e2cf7e 418 state = aarch64_get_debug_reg_state (pid_of (current_thread));
176eb98c
MS
419 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
420 {
a3b60e45
JK
421 const unsigned int offset
422 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
176eb98c
MS
423 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
424 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
a3b60e45
JK
425 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
426 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
427 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
428
176eb98c
MS
429 if (state->dr_ref_count_wp[i]
430 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
a3b60e45 431 && addr_trap >= addr_watch_aligned
176eb98c 432 && addr_trap < addr_watch + len)
a3b60e45
JK
433 {
434 /* ADDR_TRAP reports the first address of the memory range
435 accessed by the CPU, regardless of what was the memory
436 range watched. Thus, a large CPU access that straddles
437 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
438 ADDR_TRAP that is lower than the
439 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
440
441 addr: | 4 | 5 | 6 | 7 | 8 |
442 |---- range watched ----|
443 |----------- range accessed ------------|
444
445 In this case, ADDR_TRAP will be 4.
446
447 To match a watchpoint known to GDB core, we must never
448 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
449 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
450 positive on kernels older than 4.10. See PR
451 external/20207. */
452 return addr_orig;
453 }
176eb98c
MS
454 }
455
456 return (CORE_ADDR) 0;
457}
458
421530db 459/* Implementation of linux_target_ops method "stopped_by_watchpoint". */
176eb98c
MS
460
461static int
462aarch64_stopped_by_watchpoint (void)
463{
464 if (aarch64_stopped_data_address () != 0)
465 return 1;
466 else
467 return 0;
468}
469
470/* Fetch the thread-local storage pointer for libthread_db. */
471
472ps_err_e
754653a7 473ps_get_thread_area (struct ps_prochandle *ph,
176eb98c
MS
474 lwpid_t lwpid, int idx, void **base)
475{
a0cc84cd
YQ
476 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
477 is_64bit_tdesc ());
176eb98c
MS
478}
479
ade90bde
YQ
480/* Implementation of linux_target_ops method "siginfo_fixup". */
481
482static int
8adce034 483aarch64_linux_siginfo_fixup (siginfo_t *native, gdb_byte *inf, int direction)
ade90bde
YQ
484{
485 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
486 if (!is_64bit_tdesc ())
487 {
488 if (direction == 0)
489 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
490 native);
491 else
492 aarch64_siginfo_from_compat_siginfo (native,
493 (struct compat_siginfo *) inf);
494
495 return 1;
496 }
497
498 return 0;
499}
500
04ec7890 501/* Implementation of linux_target_ops method "new_process". */
176eb98c
MS
502
503static struct arch_process_info *
504aarch64_linux_new_process (void)
505{
8d749320 506 struct arch_process_info *info = XCNEW (struct arch_process_info);
176eb98c
MS
507
508 aarch64_init_debug_reg_state (&info->debug_reg_state);
509
510 return info;
511}
512
04ec7890
SM
513/* Implementation of linux_target_ops method "delete_process". */
514
515static void
516aarch64_linux_delete_process (struct arch_process_info *info)
517{
518 xfree (info);
519}
520
421530db
PL
521/* Implementation of linux_target_ops method "linux_new_fork". */
522
3a8a0396
DB
523static void
524aarch64_linux_new_fork (struct process_info *parent,
525 struct process_info *child)
526{
527 /* These are allocated by linux_add_process. */
61a7418c
DB
528 gdb_assert (parent->priv != NULL
529 && parent->priv->arch_private != NULL);
530 gdb_assert (child->priv != NULL
531 && child->priv->arch_private != NULL);
3a8a0396
DB
532
533 /* Linux kernel before 2.6.33 commit
534 72f674d203cd230426437cdcf7dd6f681dad8b0d
535 will inherit hardware debug registers from parent
536 on fork/vfork/clone. Newer Linux kernels create such tasks with
537 zeroed debug registers.
538
539 GDB core assumes the child inherits the watchpoints/hw
540 breakpoints of the parent, and will remove them all from the
541 forked off process. Copy the debug registers mirrors into the
542 new process so that all breakpoints and watchpoints can be
543 removed together. The debug registers mirror will become zeroed
544 in the end before detaching the forked off process, thus making
545 this compatible with older Linux kernels too. */
546
61a7418c 547 *child->priv->arch_private = *parent->priv->arch_private;
3a8a0396
DB
548}
549
ee4fbcfa
AH
550/* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
551#define AARCH64_HWCAP_PACA (1 << 30)
552
797bcff5 553/* Implementation of linux target ops method "low_arch_setup". */
3b53ae99 554
797bcff5
TBA
555void
556aarch64_target::low_arch_setup ()
3b53ae99
YQ
557{
558 unsigned int machine;
559 int is_elf64;
560 int tid;
561
562 tid = lwpid_of (current_thread);
563
564 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
565
566 if (is_elf64)
fefa175e
AH
567 {
568 uint64_t vq = aarch64_sve_get_vq (tid);
974c89e0
AH
569 unsigned long hwcap = linux_get_hwcap (8);
570 bool pauth_p = hwcap & AARCH64_HWCAP_PACA;
ee4fbcfa
AH
571
572 current_process ()->tdesc = aarch64_linux_read_description (vq, pauth_p);
fefa175e 573 }
3b53ae99 574 else
7cc17433 575 current_process ()->tdesc = aarch32_linux_read_description ();
176eb98c 576
af1b22f3 577 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
176eb98c
MS
578}
579
02895270
AH
580/* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
581
582static void
583aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
584{
585 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
586}
587
588/* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
589
590static void
591aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
592{
593 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
594}
595
3aee8918 596static struct regset_info aarch64_regsets[] =
176eb98c
MS
597{
598 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
599 sizeof (struct user_pt_regs), GENERAL_REGS,
600 aarch64_fill_gregset, aarch64_store_gregset },
601 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
602 sizeof (struct user_fpsimd_state), FP_REGS,
603 aarch64_fill_fpregset, aarch64_store_fpregset
604 },
1ef53e6b
AH
605 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
606 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
607 NULL, aarch64_store_pauthregset },
50bc912a 608 NULL_REGSET
176eb98c
MS
609};
610
3aee8918
PA
611static struct regsets_info aarch64_regsets_info =
612 {
613 aarch64_regsets, /* regsets */
614 0, /* num_regsets */
615 NULL, /* disabled_regsets */
616 };
617
3b53ae99 618static struct regs_info regs_info_aarch64 =
3aee8918
PA
619 {
620 NULL, /* regset_bitmap */
c2d65f38 621 NULL, /* usrregs */
3aee8918
PA
622 &aarch64_regsets_info,
623 };
624
02895270
AH
625static struct regset_info aarch64_sve_regsets[] =
626{
627 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
628 sizeof (struct user_pt_regs), GENERAL_REGS,
629 aarch64_fill_gregset, aarch64_store_gregset },
630 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
631 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
632 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
633 },
1ef53e6b
AH
634 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
635 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
636 NULL, aarch64_store_pauthregset },
02895270
AH
637 NULL_REGSET
638};
639
640static struct regsets_info aarch64_sve_regsets_info =
641 {
642 aarch64_sve_regsets, /* regsets. */
643 0, /* num_regsets. */
644 NULL, /* disabled_regsets. */
645 };
646
647static struct regs_info regs_info_aarch64_sve =
648 {
649 NULL, /* regset_bitmap. */
650 NULL, /* usrregs. */
651 &aarch64_sve_regsets_info,
652 };
653
aa8d21c9 654/* Implementation of linux target ops method "get_regs_info". */
421530db 655
aa8d21c9
TBA
656const regs_info *
657aarch64_target::get_regs_info ()
3aee8918 658{
02895270 659 if (!is_64bit_tdesc ())
3b53ae99 660 return &regs_info_aarch32;
02895270
AH
661
662 if (is_sve_tdesc ())
663 return &regs_info_aarch64_sve;
664
665 return &regs_info_aarch64;
3aee8918
PA
666}
667
7671bf47
PL
668/* Implementation of linux_target_ops method "supports_tracepoints". */
669
670static int
671aarch64_supports_tracepoints (void)
672{
524b57e6
YQ
673 if (current_thread == NULL)
674 return 1;
675 else
676 {
677 /* We don't support tracepoints on aarch32 now. */
678 return is_64bit_tdesc ();
679 }
7671bf47
PL
680}
681
bb903df0
PL
682/* Implementation of linux_target_ops method "get_thread_area". */
683
684static int
685aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
686{
687 struct iovec iovec;
688 uint64_t reg;
689
690 iovec.iov_base = &reg;
691 iovec.iov_len = sizeof (reg);
692
693 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
694 return -1;
695
696 *addrp = reg;
697
698 return 0;
699}
700
061fc021
YQ
701/* Implementation of linux_target_ops method "get_syscall_trapinfo". */
702
703static void
704aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
705{
706 int use_64bit = register_size (regcache->tdesc, 0) == 8;
707
708 if (use_64bit)
709 {
710 long l_sysno;
711
712 collect_register_by_name (regcache, "x8", &l_sysno);
713 *sysno = (int) l_sysno;
714 }
715 else
716 collect_register_by_name (regcache, "r7", sysno);
717}
718
afbe19f8
PL
719/* List of condition codes that we need. */
720
721enum aarch64_condition_codes
722{
723 EQ = 0x0,
724 NE = 0x1,
725 LO = 0x3,
726 GE = 0xa,
727 LT = 0xb,
728 GT = 0xc,
729 LE = 0xd,
bb903df0
PL
730};
731
6c1c9a8b
YQ
732enum aarch64_operand_type
733{
734 OPERAND_IMMEDIATE,
735 OPERAND_REGISTER,
736};
737
bb903df0
PL
738/* Representation of an operand. At this time, it only supports register
739 and immediate types. */
740
741struct aarch64_operand
742{
743 /* Type of the operand. */
6c1c9a8b
YQ
744 enum aarch64_operand_type type;
745
bb903df0
PL
746 /* Value of the operand according to the type. */
747 union
748 {
749 uint32_t imm;
750 struct aarch64_register reg;
751 };
752};
753
754/* List of registers that we are currently using, we can add more here as
755 we need to use them. */
756
757/* General purpose scratch registers (64 bit). */
758static const struct aarch64_register x0 = { 0, 1 };
759static const struct aarch64_register x1 = { 1, 1 };
760static const struct aarch64_register x2 = { 2, 1 };
761static const struct aarch64_register x3 = { 3, 1 };
762static const struct aarch64_register x4 = { 4, 1 };
763
764/* General purpose scratch registers (32 bit). */
afbe19f8 765static const struct aarch64_register w0 = { 0, 0 };
bb903df0
PL
766static const struct aarch64_register w2 = { 2, 0 };
767
768/* Intra-procedure scratch registers. */
769static const struct aarch64_register ip0 = { 16, 1 };
770
771/* Special purpose registers. */
afbe19f8
PL
772static const struct aarch64_register fp = { 29, 1 };
773static const struct aarch64_register lr = { 30, 1 };
bb903df0
PL
774static const struct aarch64_register sp = { 31, 1 };
775static const struct aarch64_register xzr = { 31, 1 };
776
777/* Dynamically allocate a new register. If we know the register
778 statically, we should make it a global as above instead of using this
779 helper function. */
780
781static struct aarch64_register
782aarch64_register (unsigned num, int is64)
783{
784 return (struct aarch64_register) { num, is64 };
785}
786
787/* Helper function to create a register operand, for instructions with
788 different types of operands.
789
790 For example:
791 p += emit_mov (p, x0, register_operand (x1)); */
792
793static struct aarch64_operand
794register_operand (struct aarch64_register reg)
795{
796 struct aarch64_operand operand;
797
798 operand.type = OPERAND_REGISTER;
799 operand.reg = reg;
800
801 return operand;
802}
803
804/* Helper function to create an immediate operand, for instructions with
805 different types of operands.
806
807 For example:
808 p += emit_mov (p, x0, immediate_operand (12)); */
809
810static struct aarch64_operand
811immediate_operand (uint32_t imm)
812{
813 struct aarch64_operand operand;
814
815 operand.type = OPERAND_IMMEDIATE;
816 operand.imm = imm;
817
818 return operand;
819}
820
bb903df0
PL
821/* Helper function to create an offset memory operand.
822
823 For example:
824 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
825
826static struct aarch64_memory_operand
827offset_memory_operand (int32_t offset)
828{
829 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
830}
831
832/* Helper function to create a pre-index memory operand.
833
834 For example:
835 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
836
837static struct aarch64_memory_operand
838preindex_memory_operand (int32_t index)
839{
840 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
841}
842
afbe19f8
PL
843/* Helper function to create a post-index memory operand.
844
845 For example:
846 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
847
848static struct aarch64_memory_operand
849postindex_memory_operand (int32_t index)
850{
851 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
852}
853
bb903df0
PL
854/* System control registers. These special registers can be written and
855 read with the MRS and MSR instructions.
856
857 - NZCV: Condition flags. GDB refers to this register under the CPSR
858 name.
859 - FPSR: Floating-point status register.
860 - FPCR: Floating-point control registers.
861 - TPIDR_EL0: Software thread ID register. */
862
863enum aarch64_system_control_registers
864{
865 /* op0 op1 crn crm op2 */
866 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
867 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
868 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
869 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
870};
871
bb903df0
PL
872/* Write a BLR instruction into *BUF.
873
874 BLR rn
875
876 RN is the register to branch to. */
877
878static int
879emit_blr (uint32_t *buf, struct aarch64_register rn)
880{
e1c587c3 881 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
bb903df0
PL
882}
883
afbe19f8 884/* Write a RET instruction into *BUF.
bb903df0 885
afbe19f8 886 RET xn
bb903df0 887
afbe19f8 888 RN is the register to branch to. */
bb903df0
PL
889
890static int
afbe19f8
PL
891emit_ret (uint32_t *buf, struct aarch64_register rn)
892{
e1c587c3 893 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
afbe19f8
PL
894}
895
896static int
897emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
898 struct aarch64_register rt,
899 struct aarch64_register rt2,
900 struct aarch64_register rn,
901 struct aarch64_memory_operand operand)
bb903df0
PL
902{
903 uint32_t opc;
904 uint32_t pre_index;
905 uint32_t write_back;
906
907 if (rt.is64)
908 opc = ENCODE (2, 2, 30);
909 else
910 opc = ENCODE (0, 2, 30);
911
912 switch (operand.type)
913 {
914 case MEMORY_OPERAND_OFFSET:
915 {
916 pre_index = ENCODE (1, 1, 24);
917 write_back = ENCODE (0, 1, 23);
918 break;
919 }
afbe19f8
PL
920 case MEMORY_OPERAND_POSTINDEX:
921 {
922 pre_index = ENCODE (0, 1, 24);
923 write_back = ENCODE (1, 1, 23);
924 break;
925 }
bb903df0
PL
926 case MEMORY_OPERAND_PREINDEX:
927 {
928 pre_index = ENCODE (1, 1, 24);
929 write_back = ENCODE (1, 1, 23);
930 break;
931 }
932 default:
933 return 0;
934 }
935
e1c587c3
YQ
936 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
937 | ENCODE (operand.index >> 3, 7, 15)
938 | ENCODE (rt2.num, 5, 10)
939 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
940}
941
afbe19f8
PL
942/* Write a STP instruction into *BUF.
943
944 STP rt, rt2, [rn, #offset]
945 STP rt, rt2, [rn, #index]!
946 STP rt, rt2, [rn], #index
947
948 RT and RT2 are the registers to store.
949 RN is the base address register.
950 OFFSET is the immediate to add to the base address. It is limited to a
951 -512 .. 504 range (7 bits << 3). */
952
953static int
954emit_stp (uint32_t *buf, struct aarch64_register rt,
955 struct aarch64_register rt2, struct aarch64_register rn,
956 struct aarch64_memory_operand operand)
957{
958 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
959}
960
961/* Write a LDP instruction into *BUF.
962
963 LDP rt, rt2, [rn, #offset]
964 LDP rt, rt2, [rn, #index]!
965 LDP rt, rt2, [rn], #index
966
967 RT and RT2 are the registers to store.
968 RN is the base address register.
969 OFFSET is the immediate to add to the base address. It is limited to a
970 -512 .. 504 range (7 bits << 3). */
971
972static int
973emit_ldp (uint32_t *buf, struct aarch64_register rt,
974 struct aarch64_register rt2, struct aarch64_register rn,
975 struct aarch64_memory_operand operand)
976{
977 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
978}
979
bb903df0
PL
980/* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
981
982 LDP qt, qt2, [rn, #offset]
983
984 RT and RT2 are the Q registers to store.
985 RN is the base address register.
986 OFFSET is the immediate to add to the base address. It is limited to
987 -1024 .. 1008 range (7 bits << 4). */
988
989static int
990emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
991 struct aarch64_register rn, int32_t offset)
992{
993 uint32_t opc = ENCODE (2, 2, 30);
994 uint32_t pre_index = ENCODE (1, 1, 24);
995
e1c587c3
YQ
996 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
997 | ENCODE (offset >> 4, 7, 15)
998 | ENCODE (rt2, 5, 10)
999 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1000}
1001
1002/* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1003
1004 STP qt, qt2, [rn, #offset]
1005
1006 RT and RT2 are the Q registers to store.
1007 RN is the base address register.
1008 OFFSET is the immediate to add to the base address. It is limited to
1009 -1024 .. 1008 range (7 bits << 4). */
1010
1011static int
1012emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1013 struct aarch64_register rn, int32_t offset)
1014{
1015 uint32_t opc = ENCODE (2, 2, 30);
1016 uint32_t pre_index = ENCODE (1, 1, 24);
1017
e1c587c3 1018 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
b6542f81
YQ
1019 | ENCODE (offset >> 4, 7, 15)
1020 | ENCODE (rt2, 5, 10)
1021 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1022}
1023
afbe19f8
PL
1024/* Write a LDRH instruction into *BUF.
1025
1026 LDRH wt, [xn, #offset]
1027 LDRH wt, [xn, #index]!
1028 LDRH wt, [xn], #index
1029
1030 RT is the register to store.
1031 RN is the base address register.
1032 OFFSET is the immediate to add to the base address. It is limited to
1033 0 .. 32760 range (12 bits << 3). */
1034
1035static int
1036emit_ldrh (uint32_t *buf, struct aarch64_register rt,
1037 struct aarch64_register rn,
1038 struct aarch64_memory_operand operand)
1039{
1c2e1515 1040 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
afbe19f8
PL
1041}
1042
1043/* Write a LDRB instruction into *BUF.
1044
1045 LDRB wt, [xn, #offset]
1046 LDRB wt, [xn, #index]!
1047 LDRB wt, [xn], #index
1048
1049 RT is the register to store.
1050 RN is the base address register.
1051 OFFSET is the immediate to add to the base address. It is limited to
1052 0 .. 32760 range (12 bits << 3). */
1053
1054static int
1055emit_ldrb (uint32_t *buf, struct aarch64_register rt,
1056 struct aarch64_register rn,
1057 struct aarch64_memory_operand operand)
1058{
1c2e1515 1059 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
afbe19f8
PL
1060}
1061
bb903df0 1062
bb903df0
PL
1063
1064/* Write a STR instruction into *BUF.
1065
1066 STR rt, [rn, #offset]
1067 STR rt, [rn, #index]!
afbe19f8 1068 STR rt, [rn], #index
bb903df0
PL
1069
1070 RT is the register to store.
1071 RN is the base address register.
1072 OFFSET is the immediate to add to the base address. It is limited to
1073 0 .. 32760 range (12 bits << 3). */
1074
1075static int
1076emit_str (uint32_t *buf, struct aarch64_register rt,
1077 struct aarch64_register rn,
1078 struct aarch64_memory_operand operand)
1079{
1c2e1515 1080 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
bb903df0
PL
1081}
1082
1083/* Helper function emitting an exclusive load or store instruction. */
1084
1085static int
1086emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1087 enum aarch64_opcodes opcode,
1088 struct aarch64_register rs,
1089 struct aarch64_register rt,
1090 struct aarch64_register rt2,
1091 struct aarch64_register rn)
1092{
e1c587c3
YQ
1093 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1094 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1095 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
1096}
1097
1098/* Write a LAXR instruction into *BUF.
1099
1100 LDAXR rt, [xn]
1101
1102 RT is the destination register.
1103 RN is the base address register. */
1104
1105static int
1106emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1107 struct aarch64_register rn)
1108{
1109 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1110 xzr, rn);
1111}
1112
1113/* Write a STXR instruction into *BUF.
1114
1115 STXR ws, rt, [xn]
1116
1117 RS is the result register, it indicates if the store succeeded or not.
1118 RT is the destination register.
1119 RN is the base address register. */
1120
1121static int
1122emit_stxr (uint32_t *buf, struct aarch64_register rs,
1123 struct aarch64_register rt, struct aarch64_register rn)
1124{
1125 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1126 xzr, rn);
1127}
1128
1129/* Write a STLR instruction into *BUF.
1130
1131 STLR rt, [xn]
1132
1133 RT is the register to store.
1134 RN is the base address register. */
1135
1136static int
1137emit_stlr (uint32_t *buf, struct aarch64_register rt,
1138 struct aarch64_register rn)
1139{
1140 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1141 xzr, rn);
1142}
1143
1144/* Helper function for data processing instructions with register sources. */
1145
1146static int
231c0592 1147emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
bb903df0
PL
1148 struct aarch64_register rd,
1149 struct aarch64_register rn,
1150 struct aarch64_register rm)
1151{
1152 uint32_t size = ENCODE (rd.is64, 1, 31);
1153
e1c587c3
YQ
1154 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1155 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1156}
1157
1158/* Helper function for data processing instructions taking either a register
1159 or an immediate. */
1160
1161static int
1162emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1163 struct aarch64_register rd,
1164 struct aarch64_register rn,
1165 struct aarch64_operand operand)
1166{
1167 uint32_t size = ENCODE (rd.is64, 1, 31);
1168 /* The opcode is different for register and immediate source operands. */
1169 uint32_t operand_opcode;
1170
1171 if (operand.type == OPERAND_IMMEDIATE)
1172 {
1173 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1174 operand_opcode = ENCODE (8, 4, 25);
1175
e1c587c3
YQ
1176 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1177 | ENCODE (operand.imm, 12, 10)
1178 | ENCODE (rn.num, 5, 5)
1179 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1180 }
1181 else
1182 {
1183 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1184 operand_opcode = ENCODE (5, 4, 25);
1185
1186 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1187 rn, operand.reg);
1188 }
1189}
1190
1191/* Write an ADD instruction into *BUF.
1192
1193 ADD rd, rn, #imm
1194 ADD rd, rn, rm
1195
1196 This function handles both an immediate and register add.
1197
1198 RD is the destination register.
1199 RN is the input register.
1200 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1201 OPERAND_REGISTER. */
1202
1203static int
1204emit_add (uint32_t *buf, struct aarch64_register rd,
1205 struct aarch64_register rn, struct aarch64_operand operand)
1206{
1207 return emit_data_processing (buf, ADD, rd, rn, operand);
1208}
1209
1210/* Write a SUB instruction into *BUF.
1211
1212 SUB rd, rn, #imm
1213 SUB rd, rn, rm
1214
1215 This function handles both an immediate and register sub.
1216
1217 RD is the destination register.
1218 RN is the input register.
1219 IMM is the immediate to substract to RN. */
1220
1221static int
1222emit_sub (uint32_t *buf, struct aarch64_register rd,
1223 struct aarch64_register rn, struct aarch64_operand operand)
1224{
1225 return emit_data_processing (buf, SUB, rd, rn, operand);
1226}
1227
1228/* Write a MOV instruction into *BUF.
1229
1230 MOV rd, #imm
1231 MOV rd, rm
1232
1233 This function handles both a wide immediate move and a register move,
1234 with the condition that the source register is not xzr. xzr and the
1235 stack pointer share the same encoding and this function only supports
1236 the stack pointer.
1237
1238 RD is the destination register.
1239 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1240 OPERAND_REGISTER. */
1241
1242static int
1243emit_mov (uint32_t *buf, struct aarch64_register rd,
1244 struct aarch64_operand operand)
1245{
1246 if (operand.type == OPERAND_IMMEDIATE)
1247 {
1248 uint32_t size = ENCODE (rd.is64, 1, 31);
1249 /* Do not shift the immediate. */
1250 uint32_t shift = ENCODE (0, 2, 21);
1251
e1c587c3
YQ
1252 return aarch64_emit_insn (buf, MOV | size | shift
1253 | ENCODE (operand.imm, 16, 5)
1254 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1255 }
1256 else
1257 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1258}
1259
1260/* Write a MOVK instruction into *BUF.
1261
1262 MOVK rd, #imm, lsl #shift
1263
1264 RD is the destination register.
1265 IMM is the immediate.
1266 SHIFT is the logical shift left to apply to IMM. */
1267
1268static int
7781c06f
YQ
1269emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1270 unsigned shift)
bb903df0
PL
1271{
1272 uint32_t size = ENCODE (rd.is64, 1, 31);
1273
e1c587c3
YQ
1274 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1275 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1276}
1277
1278/* Write instructions into *BUF in order to move ADDR into a register.
1279 ADDR can be a 64-bit value.
1280
1281 This function will emit a series of MOV and MOVK instructions, such as:
1282
1283 MOV xd, #(addr)
1284 MOVK xd, #(addr >> 16), lsl #16
1285 MOVK xd, #(addr >> 32), lsl #32
1286 MOVK xd, #(addr >> 48), lsl #48 */
1287
1288static int
1289emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1290{
1291 uint32_t *p = buf;
1292
1293 /* The MOV (wide immediate) instruction clears to top bits of the
1294 register. */
1295 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1296
1297 if ((addr >> 16) != 0)
1298 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1299 else
1300 return p - buf;
1301
1302 if ((addr >> 32) != 0)
1303 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1304 else
1305 return p - buf;
1306
1307 if ((addr >> 48) != 0)
1308 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1309
1310 return p - buf;
1311}
1312
afbe19f8
PL
1313/* Write a SUBS instruction into *BUF.
1314
1315 SUBS rd, rn, rm
1316
1317 This instruction update the condition flags.
1318
1319 RD is the destination register.
1320 RN and RM are the source registers. */
1321
1322static int
1323emit_subs (uint32_t *buf, struct aarch64_register rd,
1324 struct aarch64_register rn, struct aarch64_operand operand)
1325{
1326 return emit_data_processing (buf, SUBS, rd, rn, operand);
1327}
1328
1329/* Write a CMP instruction into *BUF.
1330
1331 CMP rn, rm
1332
1333 This instruction is an alias of SUBS xzr, rn, rm.
1334
1335 RN and RM are the registers to compare. */
1336
1337static int
1338emit_cmp (uint32_t *buf, struct aarch64_register rn,
1339 struct aarch64_operand operand)
1340{
1341 return emit_subs (buf, xzr, rn, operand);
1342}
1343
1344/* Write a AND instruction into *BUF.
1345
1346 AND rd, rn, rm
1347
1348 RD is the destination register.
1349 RN and RM are the source registers. */
1350
1351static int
1352emit_and (uint32_t *buf, struct aarch64_register rd,
1353 struct aarch64_register rn, struct aarch64_register rm)
1354{
1355 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1356}
1357
1358/* Write a ORR instruction into *BUF.
1359
1360 ORR rd, rn, rm
1361
1362 RD is the destination register.
1363 RN and RM are the source registers. */
1364
1365static int
1366emit_orr (uint32_t *buf, struct aarch64_register rd,
1367 struct aarch64_register rn, struct aarch64_register rm)
1368{
1369 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1370}
1371
1372/* Write a ORN instruction into *BUF.
1373
1374 ORN rd, rn, rm
1375
1376 RD is the destination register.
1377 RN and RM are the source registers. */
1378
1379static int
1380emit_orn (uint32_t *buf, struct aarch64_register rd,
1381 struct aarch64_register rn, struct aarch64_register rm)
1382{
1383 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1384}
1385
1386/* Write a EOR instruction into *BUF.
1387
1388 EOR rd, rn, rm
1389
1390 RD is the destination register.
1391 RN and RM are the source registers. */
1392
1393static int
1394emit_eor (uint32_t *buf, struct aarch64_register rd,
1395 struct aarch64_register rn, struct aarch64_register rm)
1396{
1397 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1398}
1399
1400/* Write a MVN instruction into *BUF.
1401
1402 MVN rd, rm
1403
1404 This is an alias for ORN rd, xzr, rm.
1405
1406 RD is the destination register.
1407 RM is the source register. */
1408
1409static int
1410emit_mvn (uint32_t *buf, struct aarch64_register rd,
1411 struct aarch64_register rm)
1412{
1413 return emit_orn (buf, rd, xzr, rm);
1414}
1415
1416/* Write a LSLV instruction into *BUF.
1417
1418 LSLV rd, rn, rm
1419
1420 RD is the destination register.
1421 RN and RM are the source registers. */
1422
1423static int
1424emit_lslv (uint32_t *buf, struct aarch64_register rd,
1425 struct aarch64_register rn, struct aarch64_register rm)
1426{
1427 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1428}
1429
1430/* Write a LSRV instruction into *BUF.
1431
1432 LSRV rd, rn, rm
1433
1434 RD is the destination register.
1435 RN and RM are the source registers. */
1436
1437static int
1438emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1439 struct aarch64_register rn, struct aarch64_register rm)
1440{
1441 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1442}
1443
1444/* Write a ASRV instruction into *BUF.
1445
1446 ASRV rd, rn, rm
1447
1448 RD is the destination register.
1449 RN and RM are the source registers. */
1450
1451static int
1452emit_asrv (uint32_t *buf, struct aarch64_register rd,
1453 struct aarch64_register rn, struct aarch64_register rm)
1454{
1455 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1456}
1457
1458/* Write a MUL instruction into *BUF.
1459
1460 MUL rd, rn, rm
1461
1462 RD is the destination register.
1463 RN and RM are the source registers. */
1464
1465static int
1466emit_mul (uint32_t *buf, struct aarch64_register rd,
1467 struct aarch64_register rn, struct aarch64_register rm)
1468{
1469 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1470}
1471
bb903df0
PL
1472/* Write a MRS instruction into *BUF. The register size is 64-bit.
1473
1474 MRS xt, system_reg
1475
1476 RT is the destination register.
1477 SYSTEM_REG is special purpose register to read. */
1478
1479static int
1480emit_mrs (uint32_t *buf, struct aarch64_register rt,
1481 enum aarch64_system_control_registers system_reg)
1482{
e1c587c3
YQ
1483 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1484 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1485}
1486
1487/* Write a MSR instruction into *BUF. The register size is 64-bit.
1488
1489 MSR system_reg, xt
1490
1491 SYSTEM_REG is special purpose register to write.
1492 RT is the input register. */
1493
1494static int
1495emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1496 struct aarch64_register rt)
1497{
e1c587c3
YQ
1498 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1499 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1500}
1501
1502/* Write a SEVL instruction into *BUF.
1503
1504 This is a hint instruction telling the hardware to trigger an event. */
1505
1506static int
1507emit_sevl (uint32_t *buf)
1508{
e1c587c3 1509 return aarch64_emit_insn (buf, SEVL);
bb903df0
PL
1510}
1511
1512/* Write a WFE instruction into *BUF.
1513
1514 This is a hint instruction telling the hardware to wait for an event. */
1515
1516static int
1517emit_wfe (uint32_t *buf)
1518{
e1c587c3 1519 return aarch64_emit_insn (buf, WFE);
bb903df0
PL
1520}
1521
afbe19f8
PL
1522/* Write a SBFM instruction into *BUF.
1523
1524 SBFM rd, rn, #immr, #imms
1525
1526 This instruction moves the bits from #immr to #imms into the
1527 destination, sign extending the result.
1528
1529 RD is the destination register.
1530 RN is the source register.
1531 IMMR is the bit number to start at (least significant bit).
1532 IMMS is the bit number to stop at (most significant bit). */
1533
1534static int
1535emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1536 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1537{
1538 uint32_t size = ENCODE (rd.is64, 1, 31);
1539 uint32_t n = ENCODE (rd.is64, 1, 22);
1540
e1c587c3
YQ
1541 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1542 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1543 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1544}
1545
1546/* Write a SBFX instruction into *BUF.
1547
1548 SBFX rd, rn, #lsb, #width
1549
1550 This instruction moves #width bits from #lsb into the destination, sign
1551 extending the result. This is an alias for:
1552
1553 SBFM rd, rn, #lsb, #(lsb + width - 1)
1554
1555 RD is the destination register.
1556 RN is the source register.
1557 LSB is the bit number to start at (least significant bit).
1558 WIDTH is the number of bits to move. */
1559
1560static int
1561emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1562 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1563{
1564 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1565}
1566
1567/* Write a UBFM instruction into *BUF.
1568
1569 UBFM rd, rn, #immr, #imms
1570
1571 This instruction moves the bits from #immr to #imms into the
1572 destination, extending the result with zeros.
1573
1574 RD is the destination register.
1575 RN is the source register.
1576 IMMR is the bit number to start at (least significant bit).
1577 IMMS is the bit number to stop at (most significant bit). */
1578
1579static int
1580emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1581 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1582{
1583 uint32_t size = ENCODE (rd.is64, 1, 31);
1584 uint32_t n = ENCODE (rd.is64, 1, 22);
1585
e1c587c3
YQ
1586 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1587 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1588 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1589}
1590
1591/* Write a UBFX instruction into *BUF.
1592
1593 UBFX rd, rn, #lsb, #width
1594
1595 This instruction moves #width bits from #lsb into the destination,
1596 extending the result with zeros. This is an alias for:
1597
1598 UBFM rd, rn, #lsb, #(lsb + width - 1)
1599
1600 RD is the destination register.
1601 RN is the source register.
1602 LSB is the bit number to start at (least significant bit).
1603 WIDTH is the number of bits to move. */
1604
1605static int
1606emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1607 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1608{
1609 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1610}
1611
1612/* Write a CSINC instruction into *BUF.
1613
1614 CSINC rd, rn, rm, cond
1615
1616 This instruction conditionally increments rn or rm and places the result
1617 in rd. rn is chosen is the condition is true.
1618
1619 RD is the destination register.
1620 RN and RM are the source registers.
1621 COND is the encoded condition. */
1622
1623static int
1624emit_csinc (uint32_t *buf, struct aarch64_register rd,
1625 struct aarch64_register rn, struct aarch64_register rm,
1626 unsigned cond)
1627{
1628 uint32_t size = ENCODE (rd.is64, 1, 31);
1629
e1c587c3
YQ
1630 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1631 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1632 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1633}
1634
1635/* Write a CSET instruction into *BUF.
1636
1637 CSET rd, cond
1638
1639 This instruction conditionally write 1 or 0 in the destination register.
1640 1 is written if the condition is true. This is an alias for:
1641
1642 CSINC rd, xzr, xzr, !cond
1643
1644 Note that the condition needs to be inverted.
1645
1646 RD is the destination register.
1647 RN and RM are the source registers.
1648 COND is the encoded condition. */
1649
1650static int
1651emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1652{
1653 /* The least significant bit of the condition needs toggling in order to
1654 invert it. */
1655 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1656}
1657
bb903df0
PL
1658/* Write LEN instructions from BUF into the inferior memory at *TO.
1659
1660 Note instructions are always little endian on AArch64, unlike data. */
1661
1662static void
1663append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1664{
1665 size_t byte_len = len * sizeof (uint32_t);
1666#if (__BYTE_ORDER == __BIG_ENDIAN)
cb93dc7f 1667 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
bb903df0
PL
1668 size_t i;
1669
1670 for (i = 0; i < len; i++)
1671 le_buf[i] = htole32 (buf[i]);
1672
4196ab2a 1673 target_write_memory (*to, (const unsigned char *) le_buf, byte_len);
bb903df0
PL
1674
1675 xfree (le_buf);
1676#else
4196ab2a 1677 target_write_memory (*to, (const unsigned char *) buf, byte_len);
bb903df0
PL
1678#endif
1679
1680 *to += byte_len;
1681}
1682
0badd99f
YQ
1683/* Sub-class of struct aarch64_insn_data, store information of
1684 instruction relocation for fast tracepoint. Visitor can
1685 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1686 the relocated instructions in buffer pointed by INSN_PTR. */
bb903df0 1687
0badd99f
YQ
1688struct aarch64_insn_relocation_data
1689{
1690 struct aarch64_insn_data base;
1691
1692 /* The new address the instruction is relocated to. */
1693 CORE_ADDR new_addr;
1694 /* Pointer to the buffer of relocated instruction(s). */
1695 uint32_t *insn_ptr;
1696};
1697
1698/* Implementation of aarch64_insn_visitor method "b". */
1699
1700static void
1701aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1702 struct aarch64_insn_data *data)
1703{
1704 struct aarch64_insn_relocation_data *insn_reloc
1705 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1706 int64_t new_offset
0badd99f
YQ
1707 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1708
1709 if (can_encode_int32 (new_offset, 28))
1710 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1711}
1712
1713/* Implementation of aarch64_insn_visitor method "b_cond". */
1714
1715static void
1716aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1717 struct aarch64_insn_data *data)
1718{
1719 struct aarch64_insn_relocation_data *insn_reloc
1720 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1721 int64_t new_offset
0badd99f
YQ
1722 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1723
1724 if (can_encode_int32 (new_offset, 21))
1725 {
1726 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1727 new_offset);
bb903df0 1728 }
0badd99f 1729 else if (can_encode_int32 (new_offset, 28))
bb903df0 1730 {
0badd99f
YQ
1731 /* The offset is out of range for a conditional branch
1732 instruction but not for a unconditional branch. We can use
1733 the following instructions instead:
bb903df0 1734
0badd99f
YQ
1735 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1736 B NOT_TAKEN ; Else jump over TAKEN and continue.
1737 TAKEN:
1738 B #(offset - 8)
1739 NOT_TAKEN:
1740
1741 */
1742
1743 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1744 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1745 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
bb903df0 1746 }
0badd99f 1747}
bb903df0 1748
0badd99f
YQ
1749/* Implementation of aarch64_insn_visitor method "cb". */
1750
1751static void
1752aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1753 const unsigned rn, int is64,
1754 struct aarch64_insn_data *data)
1755{
1756 struct aarch64_insn_relocation_data *insn_reloc
1757 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1758 int64_t new_offset
0badd99f
YQ
1759 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1760
1761 if (can_encode_int32 (new_offset, 21))
1762 {
1763 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1764 aarch64_register (rn, is64), new_offset);
bb903df0 1765 }
0badd99f 1766 else if (can_encode_int32 (new_offset, 28))
bb903df0 1767 {
0badd99f
YQ
1768 /* The offset is out of range for a compare and branch
1769 instruction but not for a unconditional branch. We can use
1770 the following instructions instead:
1771
1772 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1773 B NOT_TAKEN ; Else jump over TAKEN and continue.
1774 TAKEN:
1775 B #(offset - 8)
1776 NOT_TAKEN:
1777
1778 */
1779 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1780 aarch64_register (rn, is64), 8);
1781 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1782 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1783 }
1784}
bb903df0 1785
0badd99f 1786/* Implementation of aarch64_insn_visitor method "tb". */
bb903df0 1787
0badd99f
YQ
1788static void
1789aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1790 const unsigned rt, unsigned bit,
1791 struct aarch64_insn_data *data)
1792{
1793 struct aarch64_insn_relocation_data *insn_reloc
1794 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1795 int64_t new_offset
0badd99f
YQ
1796 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1797
1798 if (can_encode_int32 (new_offset, 16))
1799 {
1800 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1801 aarch64_register (rt, 1), new_offset);
bb903df0 1802 }
0badd99f 1803 else if (can_encode_int32 (new_offset, 28))
bb903df0 1804 {
0badd99f
YQ
1805 /* The offset is out of range for a test bit and branch
1806 instruction but not for a unconditional branch. We can use
1807 the following instructions instead:
1808
1809 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1810 B NOT_TAKEN ; Else jump over TAKEN and continue.
1811 TAKEN:
1812 B #(offset - 8)
1813 NOT_TAKEN:
1814
1815 */
1816 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1817 aarch64_register (rt, 1), 8);
1818 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1819 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1820 new_offset - 8);
1821 }
1822}
bb903df0 1823
0badd99f 1824/* Implementation of aarch64_insn_visitor method "adr". */
bb903df0 1825
0badd99f
YQ
1826static void
1827aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1828 const int is_adrp,
1829 struct aarch64_insn_data *data)
1830{
1831 struct aarch64_insn_relocation_data *insn_reloc
1832 = (struct aarch64_insn_relocation_data *) data;
1833 /* We know exactly the address the ADR{P,} instruction will compute.
1834 We can just write it to the destination register. */
1835 CORE_ADDR address = data->insn_addr + offset;
bb903df0 1836
0badd99f
YQ
1837 if (is_adrp)
1838 {
1839 /* Clear the lower 12 bits of the offset to get the 4K page. */
1840 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1841 aarch64_register (rd, 1),
1842 address & ~0xfff);
1843 }
1844 else
1845 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1846 aarch64_register (rd, 1), address);
1847}
bb903df0 1848
0badd99f 1849/* Implementation of aarch64_insn_visitor method "ldr_literal". */
bb903df0 1850
0badd99f
YQ
1851static void
1852aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1853 const unsigned rt, const int is64,
1854 struct aarch64_insn_data *data)
1855{
1856 struct aarch64_insn_relocation_data *insn_reloc
1857 = (struct aarch64_insn_relocation_data *) data;
1858 CORE_ADDR address = data->insn_addr + offset;
1859
1860 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1861 aarch64_register (rt, 1), address);
1862
1863 /* We know exactly what address to load from, and what register we
1864 can use:
1865
1866 MOV xd, #(oldloc + offset)
1867 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1868 ...
1869
1870 LDR xd, [xd] ; or LDRSW xd, [xd]
1871
1872 */
1873
1874 if (is_sw)
1875 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1876 aarch64_register (rt, 1),
1877 aarch64_register (rt, 1),
1878 offset_memory_operand (0));
bb903df0 1879 else
0badd99f
YQ
1880 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1881 aarch64_register (rt, is64),
1882 aarch64_register (rt, 1),
1883 offset_memory_operand (0));
1884}
1885
1886/* Implementation of aarch64_insn_visitor method "others". */
1887
1888static void
1889aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1890 struct aarch64_insn_data *data)
1891{
1892 struct aarch64_insn_relocation_data *insn_reloc
1893 = (struct aarch64_insn_relocation_data *) data;
bb903df0 1894
0badd99f
YQ
1895 /* The instruction is not PC relative. Just re-emit it at the new
1896 location. */
e1c587c3 1897 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
0badd99f
YQ
1898}
1899
1900static const struct aarch64_insn_visitor visitor =
1901{
1902 aarch64_ftrace_insn_reloc_b,
1903 aarch64_ftrace_insn_reloc_b_cond,
1904 aarch64_ftrace_insn_reloc_cb,
1905 aarch64_ftrace_insn_reloc_tb,
1906 aarch64_ftrace_insn_reloc_adr,
1907 aarch64_ftrace_insn_reloc_ldr_literal,
1908 aarch64_ftrace_insn_reloc_others,
1909};
1910
bb903df0
PL
1911/* Implementation of linux_target_ops method
1912 "install_fast_tracepoint_jump_pad". */
1913
1914static int
1915aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1916 CORE_ADDR tpaddr,
1917 CORE_ADDR collector,
1918 CORE_ADDR lockaddr,
1919 ULONGEST orig_size,
1920 CORE_ADDR *jump_entry,
1921 CORE_ADDR *trampoline,
1922 ULONGEST *trampoline_size,
1923 unsigned char *jjump_pad_insn,
1924 ULONGEST *jjump_pad_insn_size,
1925 CORE_ADDR *adjusted_insn_addr,
1926 CORE_ADDR *adjusted_insn_addr_end,
1927 char *err)
1928{
1929 uint32_t buf[256];
1930 uint32_t *p = buf;
2ac09a5b 1931 int64_t offset;
bb903df0 1932 int i;
70b439f0 1933 uint32_t insn;
bb903df0 1934 CORE_ADDR buildaddr = *jump_entry;
0badd99f 1935 struct aarch64_insn_relocation_data insn_data;
bb903df0
PL
1936
1937 /* We need to save the current state on the stack both to restore it
1938 later and to collect register values when the tracepoint is hit.
1939
1940 The saved registers are pushed in a layout that needs to be in sync
1941 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1942 the supply_fast_tracepoint_registers function will fill in the
1943 register cache from a pointer to saved registers on the stack we build
1944 here.
1945
1946 For simplicity, we set the size of each cell on the stack to 16 bytes.
1947 This way one cell can hold any register type, from system registers
1948 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1949 has to be 16 bytes aligned anyway.
1950
1951 Note that the CPSR register does not exist on AArch64. Instead we
1952 can access system bits describing the process state with the
1953 MRS/MSR instructions, namely the condition flags. We save them as
1954 if they are part of a CPSR register because that's how GDB
1955 interprets these system bits. At the moment, only the condition
1956 flags are saved in CPSR (NZCV).
1957
1958 Stack layout, each cell is 16 bytes (descending):
1959
1960 High *-------- SIMD&FP registers from 31 down to 0. --------*
1961 | q31 |
1962 . .
1963 . . 32 cells
1964 . .
1965 | q0 |
1966 *---- General purpose registers from 30 down to 0. ----*
1967 | x30 |
1968 . .
1969 . . 31 cells
1970 . .
1971 | x0 |
1972 *------------- Special purpose registers. -------------*
1973 | SP |
1974 | PC |
1975 | CPSR (NZCV) | 5 cells
1976 | FPSR |
1977 | FPCR | <- SP + 16
1978 *------------- collecting_t object --------------------*
1979 | TPIDR_EL0 | struct tracepoint * |
1980 Low *------------------------------------------------------*
1981
1982 After this stack is set up, we issue a call to the collector, passing
1983 it the saved registers at (SP + 16). */
1984
1985 /* Push SIMD&FP registers on the stack:
1986
1987 SUB sp, sp, #(32 * 16)
1988
1989 STP q30, q31, [sp, #(30 * 16)]
1990 ...
1991 STP q0, q1, [sp]
1992
1993 */
1994 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
1995 for (i = 30; i >= 0; i -= 2)
1996 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
1997
30baf67b 1998 /* Push general purpose registers on the stack. Note that we do not need
bb903df0
PL
1999 to push x31 as it represents the xzr register and not the stack
2000 pointer in a STR instruction.
2001
2002 SUB sp, sp, #(31 * 16)
2003
2004 STR x30, [sp, #(30 * 16)]
2005 ...
2006 STR x0, [sp]
2007
2008 */
2009 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
2010 for (i = 30; i >= 0; i -= 1)
2011 p += emit_str (p, aarch64_register (i, 1), sp,
2012 offset_memory_operand (i * 16));
2013
2014 /* Make space for 5 more cells.
2015
2016 SUB sp, sp, #(5 * 16)
2017
2018 */
2019 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
2020
2021
2022 /* Save SP:
2023
2024 ADD x4, sp, #((32 + 31 + 5) * 16)
2025 STR x4, [sp, #(4 * 16)]
2026
2027 */
2028 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
2029 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
2030
2031 /* Save PC (tracepoint address):
2032
2033 MOV x3, #(tpaddr)
2034 ...
2035
2036 STR x3, [sp, #(3 * 16)]
2037
2038 */
2039
2040 p += emit_mov_addr (p, x3, tpaddr);
2041 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
2042
2043 /* Save CPSR (NZCV), FPSR and FPCR:
2044
2045 MRS x2, nzcv
2046 MRS x1, fpsr
2047 MRS x0, fpcr
2048
2049 STR x2, [sp, #(2 * 16)]
2050 STR x1, [sp, #(1 * 16)]
2051 STR x0, [sp, #(0 * 16)]
2052
2053 */
2054 p += emit_mrs (p, x2, NZCV);
2055 p += emit_mrs (p, x1, FPSR);
2056 p += emit_mrs (p, x0, FPCR);
2057 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2058 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2059 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2060
2061 /* Push the collecting_t object. It consist of the address of the
2062 tracepoint and an ID for the current thread. We get the latter by
2063 reading the tpidr_el0 system register. It corresponds to the
2064 NT_ARM_TLS register accessible with ptrace.
2065
2066 MOV x0, #(tpoint)
2067 ...
2068
2069 MRS x1, tpidr_el0
2070
2071 STP x0, x1, [sp, #-16]!
2072
2073 */
2074
2075 p += emit_mov_addr (p, x0, tpoint);
2076 p += emit_mrs (p, x1, TPIDR_EL0);
2077 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2078
2079 /* Spin-lock:
2080
2081 The shared memory for the lock is at lockaddr. It will hold zero
2082 if no-one is holding the lock, otherwise it contains the address of
2083 the collecting_t object on the stack of the thread which acquired it.
2084
2085 At this stage, the stack pointer points to this thread's collecting_t
2086 object.
2087
2088 We use the following registers:
2089 - x0: Address of the lock.
2090 - x1: Pointer to collecting_t object.
2091 - x2: Scratch register.
2092
2093 MOV x0, #(lockaddr)
2094 ...
2095 MOV x1, sp
2096
2097 ; Trigger an event local to this core. So the following WFE
2098 ; instruction is ignored.
2099 SEVL
2100 again:
2101 ; Wait for an event. The event is triggered by either the SEVL
2102 ; or STLR instructions (store release).
2103 WFE
2104
2105 ; Atomically read at lockaddr. This marks the memory location as
2106 ; exclusive. This instruction also has memory constraints which
2107 ; make sure all previous data reads and writes are done before
2108 ; executing it.
2109 LDAXR x2, [x0]
2110
2111 ; Try again if another thread holds the lock.
2112 CBNZ x2, again
2113
2114 ; We can lock it! Write the address of the collecting_t object.
2115 ; This instruction will fail if the memory location is not marked
2116 ; as exclusive anymore. If it succeeds, it will remove the
2117 ; exclusive mark on the memory location. This way, if another
2118 ; thread executes this instruction before us, we will fail and try
2119 ; all over again.
2120 STXR w2, x1, [x0]
2121 CBNZ w2, again
2122
2123 */
2124
2125 p += emit_mov_addr (p, x0, lockaddr);
2126 p += emit_mov (p, x1, register_operand (sp));
2127
2128 p += emit_sevl (p);
2129 p += emit_wfe (p);
2130 p += emit_ldaxr (p, x2, x0);
2131 p += emit_cb (p, 1, w2, -2 * 4);
2132 p += emit_stxr (p, w2, x1, x0);
2133 p += emit_cb (p, 1, x2, -4 * 4);
2134
2135 /* Call collector (struct tracepoint *, unsigned char *):
2136
2137 MOV x0, #(tpoint)
2138 ...
2139
2140 ; Saved registers start after the collecting_t object.
2141 ADD x1, sp, #16
2142
2143 ; We use an intra-procedure-call scratch register.
2144 MOV ip0, #(collector)
2145 ...
2146
2147 ; And call back to C!
2148 BLR ip0
2149
2150 */
2151
2152 p += emit_mov_addr (p, x0, tpoint);
2153 p += emit_add (p, x1, sp, immediate_operand (16));
2154
2155 p += emit_mov_addr (p, ip0, collector);
2156 p += emit_blr (p, ip0);
2157
2158 /* Release the lock.
2159
2160 MOV x0, #(lockaddr)
2161 ...
2162
2163 ; This instruction is a normal store with memory ordering
2164 ; constraints. Thanks to this we do not have to put a data
2165 ; barrier instruction to make sure all data read and writes are done
30baf67b 2166 ; before this instruction is executed. Furthermore, this instruction
bb903df0
PL
2167 ; will trigger an event, letting other threads know they can grab
2168 ; the lock.
2169 STLR xzr, [x0]
2170
2171 */
2172 p += emit_mov_addr (p, x0, lockaddr);
2173 p += emit_stlr (p, xzr, x0);
2174
2175 /* Free collecting_t object:
2176
2177 ADD sp, sp, #16
2178
2179 */
2180 p += emit_add (p, sp, sp, immediate_operand (16));
2181
2182 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2183 registers from the stack.
2184
2185 LDR x2, [sp, #(2 * 16)]
2186 LDR x1, [sp, #(1 * 16)]
2187 LDR x0, [sp, #(0 * 16)]
2188
2189 MSR NZCV, x2
2190 MSR FPSR, x1
2191 MSR FPCR, x0
2192
2193 ADD sp, sp #(5 * 16)
2194
2195 */
2196 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2197 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2198 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2199 p += emit_msr (p, NZCV, x2);
2200 p += emit_msr (p, FPSR, x1);
2201 p += emit_msr (p, FPCR, x0);
2202
2203 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2204
2205 /* Pop general purpose registers:
2206
2207 LDR x0, [sp]
2208 ...
2209 LDR x30, [sp, #(30 * 16)]
2210
2211 ADD sp, sp, #(31 * 16)
2212
2213 */
2214 for (i = 0; i <= 30; i += 1)
2215 p += emit_ldr (p, aarch64_register (i, 1), sp,
2216 offset_memory_operand (i * 16));
2217 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2218
2219 /* Pop SIMD&FP registers:
2220
2221 LDP q0, q1, [sp]
2222 ...
2223 LDP q30, q31, [sp, #(30 * 16)]
2224
2225 ADD sp, sp, #(32 * 16)
2226
2227 */
2228 for (i = 0; i <= 30; i += 2)
2229 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2230 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2231
2232 /* Write the code into the inferior memory. */
2233 append_insns (&buildaddr, p - buf, buf);
2234
2235 /* Now emit the relocated instruction. */
2236 *adjusted_insn_addr = buildaddr;
70b439f0 2237 target_read_uint32 (tpaddr, &insn);
0badd99f
YQ
2238
2239 insn_data.base.insn_addr = tpaddr;
2240 insn_data.new_addr = buildaddr;
2241 insn_data.insn_ptr = buf;
2242
2243 aarch64_relocate_instruction (insn, &visitor,
2244 (struct aarch64_insn_data *) &insn_data);
2245
bb903df0 2246 /* We may not have been able to relocate the instruction. */
0badd99f 2247 if (insn_data.insn_ptr == buf)
bb903df0
PL
2248 {
2249 sprintf (err,
2250 "E.Could not relocate instruction from %s to %s.",
2251 core_addr_to_string_nz (tpaddr),
2252 core_addr_to_string_nz (buildaddr));
2253 return 1;
2254 }
dfaffe9d 2255 else
0badd99f 2256 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
dfaffe9d 2257 *adjusted_insn_addr_end = buildaddr;
bb903df0
PL
2258
2259 /* Go back to the start of the buffer. */
2260 p = buf;
2261
2262 /* Emit a branch back from the jump pad. */
2263 offset = (tpaddr + orig_size - buildaddr);
2264 if (!can_encode_int32 (offset, 28))
2265 {
2266 sprintf (err,
2267 "E.Jump back from jump pad too far from tracepoint "
2ac09a5b 2268 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2269 offset);
2270 return 1;
2271 }
2272
2273 p += emit_b (p, 0, offset);
2274 append_insns (&buildaddr, p - buf, buf);
2275
2276 /* Give the caller a branch instruction into the jump pad. */
2277 offset = (*jump_entry - tpaddr);
2278 if (!can_encode_int32 (offset, 28))
2279 {
2280 sprintf (err,
2281 "E.Jump pad too far from tracepoint "
2ac09a5b 2282 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2283 offset);
2284 return 1;
2285 }
2286
2287 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2288 *jjump_pad_insn_size = 4;
2289
2290 /* Return the end address of our pad. */
2291 *jump_entry = buildaddr;
2292
2293 return 0;
2294}
2295
afbe19f8
PL
2296/* Helper function writing LEN instructions from START into
2297 current_insn_ptr. */
2298
2299static void
2300emit_ops_insns (const uint32_t *start, int len)
2301{
2302 CORE_ADDR buildaddr = current_insn_ptr;
2303
2304 if (debug_threads)
2305 debug_printf ("Adding %d instrucions at %s\n",
2306 len, paddress (buildaddr));
2307
2308 append_insns (&buildaddr, len, start);
2309 current_insn_ptr = buildaddr;
2310}
2311
2312/* Pop a register from the stack. */
2313
2314static int
2315emit_pop (uint32_t *buf, struct aarch64_register rt)
2316{
2317 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2318}
2319
2320/* Push a register on the stack. */
2321
2322static int
2323emit_push (uint32_t *buf, struct aarch64_register rt)
2324{
2325 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2326}
2327
2328/* Implementation of emit_ops method "emit_prologue". */
2329
2330static void
2331aarch64_emit_prologue (void)
2332{
2333 uint32_t buf[16];
2334 uint32_t *p = buf;
2335
2336 /* This function emit a prologue for the following function prototype:
2337
2338 enum eval_result_type f (unsigned char *regs,
2339 ULONGEST *value);
2340
2341 The first argument is a buffer of raw registers. The second
2342 argument is the result of
2343 evaluating the expression, which will be set to whatever is on top of
2344 the stack at the end.
2345
2346 The stack set up by the prologue is as such:
2347
2348 High *------------------------------------------------------*
2349 | LR |
2350 | FP | <- FP
2351 | x1 (ULONGEST *value) |
2352 | x0 (unsigned char *regs) |
2353 Low *------------------------------------------------------*
2354
2355 As we are implementing a stack machine, each opcode can expand the
2356 stack so we never know how far we are from the data saved by this
2357 prologue. In order to be able refer to value and regs later, we save
2358 the current stack pointer in the frame pointer. This way, it is not
2359 clobbered when calling C functions.
2360
30baf67b 2361 Finally, throughout every operation, we are using register x0 as the
afbe19f8
PL
2362 top of the stack, and x1 as a scratch register. */
2363
2364 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2365 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2366 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2367
2368 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2369
2370
2371 emit_ops_insns (buf, p - buf);
2372}
2373
2374/* Implementation of emit_ops method "emit_epilogue". */
2375
2376static void
2377aarch64_emit_epilogue (void)
2378{
2379 uint32_t buf[16];
2380 uint32_t *p = buf;
2381
2382 /* Store the result of the expression (x0) in *value. */
2383 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2384 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2385 p += emit_str (p, x0, x1, offset_memory_operand (0));
2386
2387 /* Restore the previous state. */
2388 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2389 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2390
2391 /* Return expr_eval_no_error. */
2392 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2393 p += emit_ret (p, lr);
2394
2395 emit_ops_insns (buf, p - buf);
2396}
2397
2398/* Implementation of emit_ops method "emit_add". */
2399
2400static void
2401aarch64_emit_add (void)
2402{
2403 uint32_t buf[16];
2404 uint32_t *p = buf;
2405
2406 p += emit_pop (p, x1);
45e3745e 2407 p += emit_add (p, x0, x1, register_operand (x0));
afbe19f8
PL
2408
2409 emit_ops_insns (buf, p - buf);
2410}
2411
2412/* Implementation of emit_ops method "emit_sub". */
2413
2414static void
2415aarch64_emit_sub (void)
2416{
2417 uint32_t buf[16];
2418 uint32_t *p = buf;
2419
2420 p += emit_pop (p, x1);
45e3745e 2421 p += emit_sub (p, x0, x1, register_operand (x0));
afbe19f8
PL
2422
2423 emit_ops_insns (buf, p - buf);
2424}
2425
2426/* Implementation of emit_ops method "emit_mul". */
2427
2428static void
2429aarch64_emit_mul (void)
2430{
2431 uint32_t buf[16];
2432 uint32_t *p = buf;
2433
2434 p += emit_pop (p, x1);
2435 p += emit_mul (p, x0, x1, x0);
2436
2437 emit_ops_insns (buf, p - buf);
2438}
2439
2440/* Implementation of emit_ops method "emit_lsh". */
2441
2442static void
2443aarch64_emit_lsh (void)
2444{
2445 uint32_t buf[16];
2446 uint32_t *p = buf;
2447
2448 p += emit_pop (p, x1);
2449 p += emit_lslv (p, x0, x1, x0);
2450
2451 emit_ops_insns (buf, p - buf);
2452}
2453
2454/* Implementation of emit_ops method "emit_rsh_signed". */
2455
2456static void
2457aarch64_emit_rsh_signed (void)
2458{
2459 uint32_t buf[16];
2460 uint32_t *p = buf;
2461
2462 p += emit_pop (p, x1);
2463 p += emit_asrv (p, x0, x1, x0);
2464
2465 emit_ops_insns (buf, p - buf);
2466}
2467
2468/* Implementation of emit_ops method "emit_rsh_unsigned". */
2469
2470static void
2471aarch64_emit_rsh_unsigned (void)
2472{
2473 uint32_t buf[16];
2474 uint32_t *p = buf;
2475
2476 p += emit_pop (p, x1);
2477 p += emit_lsrv (p, x0, x1, x0);
2478
2479 emit_ops_insns (buf, p - buf);
2480}
2481
2482/* Implementation of emit_ops method "emit_ext". */
2483
2484static void
2485aarch64_emit_ext (int arg)
2486{
2487 uint32_t buf[16];
2488 uint32_t *p = buf;
2489
2490 p += emit_sbfx (p, x0, x0, 0, arg);
2491
2492 emit_ops_insns (buf, p - buf);
2493}
2494
2495/* Implementation of emit_ops method "emit_log_not". */
2496
2497static void
2498aarch64_emit_log_not (void)
2499{
2500 uint32_t buf[16];
2501 uint32_t *p = buf;
2502
2503 /* If the top of the stack is 0, replace it with 1. Else replace it with
2504 0. */
2505
2506 p += emit_cmp (p, x0, immediate_operand (0));
2507 p += emit_cset (p, x0, EQ);
2508
2509 emit_ops_insns (buf, p - buf);
2510}
2511
2512/* Implementation of emit_ops method "emit_bit_and". */
2513
2514static void
2515aarch64_emit_bit_and (void)
2516{
2517 uint32_t buf[16];
2518 uint32_t *p = buf;
2519
2520 p += emit_pop (p, x1);
2521 p += emit_and (p, x0, x0, x1);
2522
2523 emit_ops_insns (buf, p - buf);
2524}
2525
2526/* Implementation of emit_ops method "emit_bit_or". */
2527
2528static void
2529aarch64_emit_bit_or (void)
2530{
2531 uint32_t buf[16];
2532 uint32_t *p = buf;
2533
2534 p += emit_pop (p, x1);
2535 p += emit_orr (p, x0, x0, x1);
2536
2537 emit_ops_insns (buf, p - buf);
2538}
2539
2540/* Implementation of emit_ops method "emit_bit_xor". */
2541
2542static void
2543aarch64_emit_bit_xor (void)
2544{
2545 uint32_t buf[16];
2546 uint32_t *p = buf;
2547
2548 p += emit_pop (p, x1);
2549 p += emit_eor (p, x0, x0, x1);
2550
2551 emit_ops_insns (buf, p - buf);
2552}
2553
2554/* Implementation of emit_ops method "emit_bit_not". */
2555
2556static void
2557aarch64_emit_bit_not (void)
2558{
2559 uint32_t buf[16];
2560 uint32_t *p = buf;
2561
2562 p += emit_mvn (p, x0, x0);
2563
2564 emit_ops_insns (buf, p - buf);
2565}
2566
2567/* Implementation of emit_ops method "emit_equal". */
2568
2569static void
2570aarch64_emit_equal (void)
2571{
2572 uint32_t buf[16];
2573 uint32_t *p = buf;
2574
2575 p += emit_pop (p, x1);
2576 p += emit_cmp (p, x0, register_operand (x1));
2577 p += emit_cset (p, x0, EQ);
2578
2579 emit_ops_insns (buf, p - buf);
2580}
2581
2582/* Implementation of emit_ops method "emit_less_signed". */
2583
2584static void
2585aarch64_emit_less_signed (void)
2586{
2587 uint32_t buf[16];
2588 uint32_t *p = buf;
2589
2590 p += emit_pop (p, x1);
2591 p += emit_cmp (p, x1, register_operand (x0));
2592 p += emit_cset (p, x0, LT);
2593
2594 emit_ops_insns (buf, p - buf);
2595}
2596
2597/* Implementation of emit_ops method "emit_less_unsigned". */
2598
2599static void
2600aarch64_emit_less_unsigned (void)
2601{
2602 uint32_t buf[16];
2603 uint32_t *p = buf;
2604
2605 p += emit_pop (p, x1);
2606 p += emit_cmp (p, x1, register_operand (x0));
2607 p += emit_cset (p, x0, LO);
2608
2609 emit_ops_insns (buf, p - buf);
2610}
2611
2612/* Implementation of emit_ops method "emit_ref". */
2613
2614static void
2615aarch64_emit_ref (int size)
2616{
2617 uint32_t buf[16];
2618 uint32_t *p = buf;
2619
2620 switch (size)
2621 {
2622 case 1:
2623 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2624 break;
2625 case 2:
2626 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2627 break;
2628 case 4:
2629 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2630 break;
2631 case 8:
2632 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2633 break;
2634 default:
2635 /* Unknown size, bail on compilation. */
2636 emit_error = 1;
2637 break;
2638 }
2639
2640 emit_ops_insns (buf, p - buf);
2641}
2642
2643/* Implementation of emit_ops method "emit_if_goto". */
2644
2645static void
2646aarch64_emit_if_goto (int *offset_p, int *size_p)
2647{
2648 uint32_t buf[16];
2649 uint32_t *p = buf;
2650
2651 /* The Z flag is set or cleared here. */
2652 p += emit_cmp (p, x0, immediate_operand (0));
2653 /* This instruction must not change the Z flag. */
2654 p += emit_pop (p, x0);
2655 /* Branch over the next instruction if x0 == 0. */
2656 p += emit_bcond (p, EQ, 8);
2657
2658 /* The NOP instruction will be patched with an unconditional branch. */
2659 if (offset_p)
2660 *offset_p = (p - buf) * 4;
2661 if (size_p)
2662 *size_p = 4;
2663 p += emit_nop (p);
2664
2665 emit_ops_insns (buf, p - buf);
2666}
2667
2668/* Implementation of emit_ops method "emit_goto". */
2669
2670static void
2671aarch64_emit_goto (int *offset_p, int *size_p)
2672{
2673 uint32_t buf[16];
2674 uint32_t *p = buf;
2675
2676 /* The NOP instruction will be patched with an unconditional branch. */
2677 if (offset_p)
2678 *offset_p = 0;
2679 if (size_p)
2680 *size_p = 4;
2681 p += emit_nop (p);
2682
2683 emit_ops_insns (buf, p - buf);
2684}
2685
2686/* Implementation of emit_ops method "write_goto_address". */
2687
bb1183e2 2688static void
afbe19f8
PL
2689aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2690{
2691 uint32_t insn;
2692
2693 emit_b (&insn, 0, to - from);
2694 append_insns (&from, 1, &insn);
2695}
2696
2697/* Implementation of emit_ops method "emit_const". */
2698
2699static void
2700aarch64_emit_const (LONGEST num)
2701{
2702 uint32_t buf[16];
2703 uint32_t *p = buf;
2704
2705 p += emit_mov_addr (p, x0, num);
2706
2707 emit_ops_insns (buf, p - buf);
2708}
2709
2710/* Implementation of emit_ops method "emit_call". */
2711
2712static void
2713aarch64_emit_call (CORE_ADDR fn)
2714{
2715 uint32_t buf[16];
2716 uint32_t *p = buf;
2717
2718 p += emit_mov_addr (p, ip0, fn);
2719 p += emit_blr (p, ip0);
2720
2721 emit_ops_insns (buf, p - buf);
2722}
2723
2724/* Implementation of emit_ops method "emit_reg". */
2725
2726static void
2727aarch64_emit_reg (int reg)
2728{
2729 uint32_t buf[16];
2730 uint32_t *p = buf;
2731
2732 /* Set x0 to unsigned char *regs. */
2733 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2734 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2735 p += emit_mov (p, x1, immediate_operand (reg));
2736
2737 emit_ops_insns (buf, p - buf);
2738
2739 aarch64_emit_call (get_raw_reg_func_addr ());
2740}
2741
2742/* Implementation of emit_ops method "emit_pop". */
2743
2744static void
2745aarch64_emit_pop (void)
2746{
2747 uint32_t buf[16];
2748 uint32_t *p = buf;
2749
2750 p += emit_pop (p, x0);
2751
2752 emit_ops_insns (buf, p - buf);
2753}
2754
2755/* Implementation of emit_ops method "emit_stack_flush". */
2756
2757static void
2758aarch64_emit_stack_flush (void)
2759{
2760 uint32_t buf[16];
2761 uint32_t *p = buf;
2762
2763 p += emit_push (p, x0);
2764
2765 emit_ops_insns (buf, p - buf);
2766}
2767
2768/* Implementation of emit_ops method "emit_zero_ext". */
2769
2770static void
2771aarch64_emit_zero_ext (int arg)
2772{
2773 uint32_t buf[16];
2774 uint32_t *p = buf;
2775
2776 p += emit_ubfx (p, x0, x0, 0, arg);
2777
2778 emit_ops_insns (buf, p - buf);
2779}
2780
2781/* Implementation of emit_ops method "emit_swap". */
2782
2783static void
2784aarch64_emit_swap (void)
2785{
2786 uint32_t buf[16];
2787 uint32_t *p = buf;
2788
2789 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2790 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2791 p += emit_mov (p, x0, register_operand (x1));
2792
2793 emit_ops_insns (buf, p - buf);
2794}
2795
2796/* Implementation of emit_ops method "emit_stack_adjust". */
2797
2798static void
2799aarch64_emit_stack_adjust (int n)
2800{
2801 /* This is not needed with our design. */
2802 uint32_t buf[16];
2803 uint32_t *p = buf;
2804
2805 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2806
2807 emit_ops_insns (buf, p - buf);
2808}
2809
2810/* Implementation of emit_ops method "emit_int_call_1". */
2811
2812static void
2813aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2814{
2815 uint32_t buf[16];
2816 uint32_t *p = buf;
2817
2818 p += emit_mov (p, x0, immediate_operand (arg1));
2819
2820 emit_ops_insns (buf, p - buf);
2821
2822 aarch64_emit_call (fn);
2823}
2824
2825/* Implementation of emit_ops method "emit_void_call_2". */
2826
2827static void
2828aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2829{
2830 uint32_t buf[16];
2831 uint32_t *p = buf;
2832
2833 /* Push x0 on the stack. */
2834 aarch64_emit_stack_flush ();
2835
2836 /* Setup arguments for the function call:
2837
2838 x0: arg1
2839 x1: top of the stack
2840
2841 MOV x1, x0
2842 MOV x0, #arg1 */
2843
2844 p += emit_mov (p, x1, register_operand (x0));
2845 p += emit_mov (p, x0, immediate_operand (arg1));
2846
2847 emit_ops_insns (buf, p - buf);
2848
2849 aarch64_emit_call (fn);
2850
2851 /* Restore x0. */
2852 aarch64_emit_pop ();
2853}
2854
2855/* Implementation of emit_ops method "emit_eq_goto". */
2856
2857static void
2858aarch64_emit_eq_goto (int *offset_p, int *size_p)
2859{
2860 uint32_t buf[16];
2861 uint32_t *p = buf;
2862
2863 p += emit_pop (p, x1);
2864 p += emit_cmp (p, x1, register_operand (x0));
2865 /* Branch over the next instruction if x0 != x1. */
2866 p += emit_bcond (p, NE, 8);
2867 /* The NOP instruction will be patched with an unconditional branch. */
2868 if (offset_p)
2869 *offset_p = (p - buf) * 4;
2870 if (size_p)
2871 *size_p = 4;
2872 p += emit_nop (p);
2873
2874 emit_ops_insns (buf, p - buf);
2875}
2876
2877/* Implementation of emit_ops method "emit_ne_goto". */
2878
2879static void
2880aarch64_emit_ne_goto (int *offset_p, int *size_p)
2881{
2882 uint32_t buf[16];
2883 uint32_t *p = buf;
2884
2885 p += emit_pop (p, x1);
2886 p += emit_cmp (p, x1, register_operand (x0));
2887 /* Branch over the next instruction if x0 == x1. */
2888 p += emit_bcond (p, EQ, 8);
2889 /* The NOP instruction will be patched with an unconditional branch. */
2890 if (offset_p)
2891 *offset_p = (p - buf) * 4;
2892 if (size_p)
2893 *size_p = 4;
2894 p += emit_nop (p);
2895
2896 emit_ops_insns (buf, p - buf);
2897}
2898
2899/* Implementation of emit_ops method "emit_lt_goto". */
2900
2901static void
2902aarch64_emit_lt_goto (int *offset_p, int *size_p)
2903{
2904 uint32_t buf[16];
2905 uint32_t *p = buf;
2906
2907 p += emit_pop (p, x1);
2908 p += emit_cmp (p, x1, register_operand (x0));
2909 /* Branch over the next instruction if x0 >= x1. */
2910 p += emit_bcond (p, GE, 8);
2911 /* The NOP instruction will be patched with an unconditional branch. */
2912 if (offset_p)
2913 *offset_p = (p - buf) * 4;
2914 if (size_p)
2915 *size_p = 4;
2916 p += emit_nop (p);
2917
2918 emit_ops_insns (buf, p - buf);
2919}
2920
2921/* Implementation of emit_ops method "emit_le_goto". */
2922
2923static void
2924aarch64_emit_le_goto (int *offset_p, int *size_p)
2925{
2926 uint32_t buf[16];
2927 uint32_t *p = buf;
2928
2929 p += emit_pop (p, x1);
2930 p += emit_cmp (p, x1, register_operand (x0));
2931 /* Branch over the next instruction if x0 > x1. */
2932 p += emit_bcond (p, GT, 8);
2933 /* The NOP instruction will be patched with an unconditional branch. */
2934 if (offset_p)
2935 *offset_p = (p - buf) * 4;
2936 if (size_p)
2937 *size_p = 4;
2938 p += emit_nop (p);
2939
2940 emit_ops_insns (buf, p - buf);
2941}
2942
2943/* Implementation of emit_ops method "emit_gt_goto". */
2944
2945static void
2946aarch64_emit_gt_goto (int *offset_p, int *size_p)
2947{
2948 uint32_t buf[16];
2949 uint32_t *p = buf;
2950
2951 p += emit_pop (p, x1);
2952 p += emit_cmp (p, x1, register_operand (x0));
2953 /* Branch over the next instruction if x0 <= x1. */
2954 p += emit_bcond (p, LE, 8);
2955 /* The NOP instruction will be patched with an unconditional branch. */
2956 if (offset_p)
2957 *offset_p = (p - buf) * 4;
2958 if (size_p)
2959 *size_p = 4;
2960 p += emit_nop (p);
2961
2962 emit_ops_insns (buf, p - buf);
2963}
2964
2965/* Implementation of emit_ops method "emit_ge_got". */
2966
2967static void
2968aarch64_emit_ge_got (int *offset_p, int *size_p)
2969{
2970 uint32_t buf[16];
2971 uint32_t *p = buf;
2972
2973 p += emit_pop (p, x1);
2974 p += emit_cmp (p, x1, register_operand (x0));
2975 /* Branch over the next instruction if x0 <= x1. */
2976 p += emit_bcond (p, LT, 8);
2977 /* The NOP instruction will be patched with an unconditional branch. */
2978 if (offset_p)
2979 *offset_p = (p - buf) * 4;
2980 if (size_p)
2981 *size_p = 4;
2982 p += emit_nop (p);
2983
2984 emit_ops_insns (buf, p - buf);
2985}
2986
2987static struct emit_ops aarch64_emit_ops_impl =
2988{
2989 aarch64_emit_prologue,
2990 aarch64_emit_epilogue,
2991 aarch64_emit_add,
2992 aarch64_emit_sub,
2993 aarch64_emit_mul,
2994 aarch64_emit_lsh,
2995 aarch64_emit_rsh_signed,
2996 aarch64_emit_rsh_unsigned,
2997 aarch64_emit_ext,
2998 aarch64_emit_log_not,
2999 aarch64_emit_bit_and,
3000 aarch64_emit_bit_or,
3001 aarch64_emit_bit_xor,
3002 aarch64_emit_bit_not,
3003 aarch64_emit_equal,
3004 aarch64_emit_less_signed,
3005 aarch64_emit_less_unsigned,
3006 aarch64_emit_ref,
3007 aarch64_emit_if_goto,
3008 aarch64_emit_goto,
3009 aarch64_write_goto_address,
3010 aarch64_emit_const,
3011 aarch64_emit_call,
3012 aarch64_emit_reg,
3013 aarch64_emit_pop,
3014 aarch64_emit_stack_flush,
3015 aarch64_emit_zero_ext,
3016 aarch64_emit_swap,
3017 aarch64_emit_stack_adjust,
3018 aarch64_emit_int_call_1,
3019 aarch64_emit_void_call_2,
3020 aarch64_emit_eq_goto,
3021 aarch64_emit_ne_goto,
3022 aarch64_emit_lt_goto,
3023 aarch64_emit_le_goto,
3024 aarch64_emit_gt_goto,
3025 aarch64_emit_ge_got,
3026};
3027
3028/* Implementation of linux_target_ops method "emit_ops". */
3029
3030static struct emit_ops *
3031aarch64_emit_ops (void)
3032{
3033 return &aarch64_emit_ops_impl;
3034}
3035
bb903df0
PL
3036/* Implementation of linux_target_ops method
3037 "get_min_fast_tracepoint_insn_len". */
3038
3039static int
3040aarch64_get_min_fast_tracepoint_insn_len (void)
3041{
3042 return 4;
3043}
3044
d1d0aea1
PL
3045/* Implementation of linux_target_ops method "supports_range_stepping". */
3046
3047static int
3048aarch64_supports_range_stepping (void)
3049{
3050 return 1;
3051}
3052
dd373349
AT
3053/* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
3054
3055static const gdb_byte *
3056aarch64_sw_breakpoint_from_kind (int kind, int *size)
3057{
17b1509a
YQ
3058 if (is_64bit_tdesc ())
3059 {
3060 *size = aarch64_breakpoint_len;
3061 return aarch64_breakpoint;
3062 }
3063 else
3064 return arm_sw_breakpoint_from_kind (kind, size);
3065}
3066
3067/* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
3068
3069static int
3070aarch64_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
3071{
3072 if (is_64bit_tdesc ())
3073 return aarch64_breakpoint_len;
3074 else
3075 return arm_breakpoint_kind_from_pc (pcptr);
3076}
3077
3078/* Implementation of the linux_target_ops method
3079 "breakpoint_kind_from_current_state". */
3080
3081static int
3082aarch64_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
3083{
3084 if (is_64bit_tdesc ())
3085 return aarch64_breakpoint_len;
3086 else
3087 return arm_breakpoint_kind_from_current_state (pcptr);
dd373349
AT
3088}
3089
7d00775e
AT
3090/* Support for hardware single step. */
3091
3092static int
3093aarch64_supports_hardware_single_step (void)
3094{
3095 return 1;
3096}
3097
176eb98c
MS
3098struct linux_target_ops the_low_target =
3099{
17b1509a 3100 aarch64_breakpoint_kind_from_pc,
dd373349 3101 aarch64_sw_breakpoint_from_kind,
fa5308bd 3102 NULL, /* get_next_pcs */
421530db 3103 0, /* decr_pc_after_break */
176eb98c 3104 aarch64_breakpoint_at,
802e8e6d 3105 aarch64_supports_z_point_type,
176eb98c
MS
3106 aarch64_insert_point,
3107 aarch64_remove_point,
3108 aarch64_stopped_by_watchpoint,
3109 aarch64_stopped_data_address,
421530db
PL
3110 NULL, /* collect_ptrace_register */
3111 NULL, /* supply_ptrace_register */
ade90bde 3112 aarch64_linux_siginfo_fixup,
176eb98c 3113 aarch64_linux_new_process,
04ec7890 3114 aarch64_linux_delete_process,
176eb98c 3115 aarch64_linux_new_thread,
466eecee 3116 aarch64_linux_delete_thread,
3a8a0396 3117 aarch64_linux_new_fork,
176eb98c 3118 aarch64_linux_prepare_to_resume,
421530db 3119 NULL, /* process_qsupported */
7671bf47 3120 aarch64_supports_tracepoints,
bb903df0
PL
3121 aarch64_get_thread_area,
3122 aarch64_install_fast_tracepoint_jump_pad,
afbe19f8 3123 aarch64_emit_ops,
bb903df0 3124 aarch64_get_min_fast_tracepoint_insn_len,
d1d0aea1 3125 aarch64_supports_range_stepping,
17b1509a 3126 aarch64_breakpoint_kind_from_current_state,
7d00775e 3127 aarch64_supports_hardware_single_step,
061fc021 3128 aarch64_get_syscall_trapinfo,
176eb98c 3129};
3aee8918 3130
ef0478f6
TBA
3131/* The linux target ops object. */
3132
3133linux_process_target *the_linux_target = &the_aarch64_target;
3134
3aee8918
PA
3135void
3136initialize_low_arch (void)
3137{
3b53ae99
YQ
3138 initialize_low_arch_aarch32 ();
3139
3aee8918 3140 initialize_regsets_info (&aarch64_regsets_info);
02895270 3141 initialize_regsets_info (&aarch64_sve_regsets_info);
3aee8918 3142}
This page took 0.736174 seconds and 4 git commands to generate.