gdbserver/linux-low: turn the 'decr_pc_after_break' field into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-aarch64-low.cc
CommitLineData
176eb98c
MS
1/* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
b811d2c2 4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
176eb98c
MS
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "server.h"
23#include "linux-low.h"
db3cb7cb 24#include "nat/aarch64-linux.h"
554717a3 25#include "nat/aarch64-linux-hw-point.h"
bb903df0 26#include "arch/aarch64-insn.h"
3b53ae99 27#include "linux-aarch32-low.h"
176eb98c 28#include "elf/common.h"
afbe19f8
PL
29#include "ax.h"
30#include "tracepoint.h"
f9d949fb 31#include "debug.h"
176eb98c
MS
32
33#include <signal.h>
34#include <sys/user.h>
5826e159 35#include "nat/gdb_ptrace.h"
e9dae05e 36#include <asm/ptrace.h>
bb903df0
PL
37#include <inttypes.h>
38#include <endian.h>
39#include <sys/uio.h>
176eb98c
MS
40
41#include "gdb_proc_service.h"
cc628f3d 42#include "arch/aarch64.h"
7cc17433 43#include "linux-aarch32-tdesc.h"
d6d7ce56 44#include "linux-aarch64-tdesc.h"
fefa175e 45#include "nat/aarch64-sve-linux-ptrace.h"
02895270 46#include "tdesc.h"
176eb98c 47
176eb98c
MS
48#ifdef HAVE_SYS_REG_H
49#include <sys/reg.h>
50#endif
51
ef0478f6
TBA
52/* Linux target op definitions for the AArch64 architecture. */
53
54class aarch64_target : public linux_process_target
55{
56public:
57
aa8d21c9
TBA
58 const regs_info *get_regs_info () override;
59
06250e4e
TBA
60 int breakpoint_kind_from_pc (CORE_ADDR *pcptr) override;
61
62 int breakpoint_kind_from_current_state (CORE_ADDR *pcptr) override;
63
3ca4edb6
TBA
64 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
65
797bcff5
TBA
66protected:
67
68 void low_arch_setup () override;
daca57a7
TBA
69
70 bool low_cannot_fetch_register (int regno) override;
71
72 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
73
74 bool low_supports_breakpoints () override;
75
76 CORE_ADDR low_get_pc (regcache *regcache) override;
77
78 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
ef0478f6
TBA
79};
80
81/* The singleton target ops object. */
82
83static aarch64_target the_aarch64_target;
84
daca57a7
TBA
85bool
86aarch64_target::low_cannot_fetch_register (int regno)
87{
88 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
89 "is not implemented by the target");
90}
91
92bool
93aarch64_target::low_cannot_store_register (int regno)
94{
95 gdb_assert_not_reached ("linux target op low_cannot_store_register "
96 "is not implemented by the target");
97}
98
176eb98c
MS
99/* Per-process arch-specific data we want to keep. */
100
101struct arch_process_info
102{
103 /* Hardware breakpoint/watchpoint data.
104 The reason for them to be per-process rather than per-thread is
105 due to the lack of information in the gdbserver environment;
106 gdbserver is not told that whether a requested hardware
107 breakpoint/watchpoint is thread specific or not, so it has to set
108 each hw bp/wp for every thread in the current process. The
109 higher level bp/wp management in gdb will resume a thread if a hw
110 bp/wp trap is not expected for it. Since the hw bp/wp setting is
111 same for each thread, it is reasonable for the data to live here.
112 */
113 struct aarch64_debug_reg_state debug_reg_state;
114};
115
3b53ae99
YQ
116/* Return true if the size of register 0 is 8 byte. */
117
118static int
119is_64bit_tdesc (void)
120{
121 struct regcache *regcache = get_thread_regcache (current_thread, 0);
122
123 return register_size (regcache->tdesc, 0) == 8;
124}
125
02895270
AH
126/* Return true if the regcache contains the number of SVE registers. */
127
128static bool
129is_sve_tdesc (void)
130{
131 struct regcache *regcache = get_thread_regcache (current_thread, 0);
132
6cdd651f 133 return tdesc_contains_feature (regcache->tdesc, "org.gnu.gdb.aarch64.sve");
02895270
AH
134}
135
176eb98c
MS
136static void
137aarch64_fill_gregset (struct regcache *regcache, void *buf)
138{
6a69a054 139 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
176eb98c
MS
140 int i;
141
142 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
143 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
144 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
145 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
146 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
147}
148
149static void
150aarch64_store_gregset (struct regcache *regcache, const void *buf)
151{
6a69a054 152 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
176eb98c
MS
153 int i;
154
155 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
156 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
157 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
158 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
159 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
160}
161
162static void
163aarch64_fill_fpregset (struct regcache *regcache, void *buf)
164{
9caa3311 165 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
176eb98c
MS
166 int i;
167
168 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
169 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
170 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
171 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
172}
173
174static void
175aarch64_store_fpregset (struct regcache *regcache, const void *buf)
176{
9caa3311
YQ
177 const struct user_fpsimd_state *regset
178 = (const struct user_fpsimd_state *) buf;
176eb98c
MS
179 int i;
180
181 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
182 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
183 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
184 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
185}
186
1ef53e6b
AH
187/* Store the pauth registers to regcache. */
188
189static void
190aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
191{
192 uint64_t *pauth_regset = (uint64_t *) buf;
193 int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
194
195 if (pauth_base == 0)
196 return;
197
198 supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
199 &pauth_regset[0]);
200 supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
201 &pauth_regset[1]);
202}
203
bf9ae9d8
TBA
204bool
205aarch64_target::low_supports_breakpoints ()
206{
207 return true;
208}
209
210/* Implementation of linux target ops method "low_get_pc". */
421530db 211
bf9ae9d8
TBA
212CORE_ADDR
213aarch64_target::low_get_pc (regcache *regcache)
176eb98c 214{
8a7e4587 215 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 216 return linux_get_pc_64bit (regcache);
8a7e4587 217 else
a5652c21 218 return linux_get_pc_32bit (regcache);
176eb98c
MS
219}
220
bf9ae9d8 221/* Implementation of linux target ops method "low_set_pc". */
421530db 222
bf9ae9d8
TBA
223void
224aarch64_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
176eb98c 225{
8a7e4587 226 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 227 linux_set_pc_64bit (regcache, pc);
8a7e4587 228 else
a5652c21 229 linux_set_pc_32bit (regcache, pc);
176eb98c
MS
230}
231
176eb98c
MS
232#define aarch64_breakpoint_len 4
233
37d66942
PL
234/* AArch64 BRK software debug mode instruction.
235 This instruction needs to match gdb/aarch64-tdep.c
236 (aarch64_default_breakpoint). */
237static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
176eb98c 238
421530db
PL
239/* Implementation of linux_target_ops method "breakpoint_at". */
240
176eb98c
MS
241static int
242aarch64_breakpoint_at (CORE_ADDR where)
243{
db91f502
YQ
244 if (is_64bit_tdesc ())
245 {
246 gdb_byte insn[aarch64_breakpoint_len];
176eb98c 247
52405d85
TBA
248 the_target->read_memory (where, (unsigned char *) &insn,
249 aarch64_breakpoint_len);
db91f502
YQ
250 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
251 return 1;
176eb98c 252
db91f502
YQ
253 return 0;
254 }
255 else
256 return arm_breakpoint_at (where);
176eb98c
MS
257}
258
176eb98c
MS
259static void
260aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
261{
262 int i;
263
264 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
265 {
266 state->dr_addr_bp[i] = 0;
267 state->dr_ctrl_bp[i] = 0;
268 state->dr_ref_count_bp[i] = 0;
269 }
270
271 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
272 {
273 state->dr_addr_wp[i] = 0;
274 state->dr_ctrl_wp[i] = 0;
275 state->dr_ref_count_wp[i] = 0;
276 }
277}
278
176eb98c
MS
279/* Return the pointer to the debug register state structure in the
280 current process' arch-specific data area. */
281
db3cb7cb 282struct aarch64_debug_reg_state *
88e2cf7e 283aarch64_get_debug_reg_state (pid_t pid)
176eb98c 284{
88e2cf7e 285 struct process_info *proc = find_process_pid (pid);
176eb98c 286
fe978cb0 287 return &proc->priv->arch_private->debug_reg_state;
176eb98c
MS
288}
289
421530db
PL
290/* Implementation of linux_target_ops method "supports_z_point_type". */
291
4ff0d3d8
PA
292static int
293aarch64_supports_z_point_type (char z_type)
294{
295 switch (z_type)
296 {
96c97461 297 case Z_PACKET_SW_BP:
4ff0d3d8
PA
298 case Z_PACKET_HW_BP:
299 case Z_PACKET_WRITE_WP:
300 case Z_PACKET_READ_WP:
301 case Z_PACKET_ACCESS_WP:
302 return 1;
303 default:
4ff0d3d8
PA
304 return 0;
305 }
306}
307
421530db 308/* Implementation of linux_target_ops method "insert_point".
176eb98c 309
421530db
PL
310 It actually only records the info of the to-be-inserted bp/wp;
311 the actual insertion will happen when threads are resumed. */
176eb98c
MS
312
313static int
802e8e6d
PA
314aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
315 int len, struct raw_breakpoint *bp)
176eb98c
MS
316{
317 int ret;
4ff0d3d8 318 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
319 struct aarch64_debug_reg_state *state
320 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 321
c5e92cca 322 if (show_debug_regs)
176eb98c
MS
323 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
324 (unsigned long) addr, len);
325
802e8e6d
PA
326 /* Determine the type from the raw breakpoint type. */
327 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
328
329 if (targ_type != hw_execute)
39edd165
YQ
330 {
331 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
332 ret = aarch64_handle_watchpoint (targ_type, addr, len,
333 1 /* is_insert */, state);
334 else
335 ret = -1;
336 }
176eb98c 337 else
8d689ee5
YQ
338 {
339 if (len == 3)
340 {
341 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
342 instruction. Set it to 2 to correctly encode length bit
343 mask in hardware/watchpoint control register. */
344 len = 2;
345 }
346 ret = aarch64_handle_breakpoint (targ_type, addr, len,
347 1 /* is_insert */, state);
348 }
176eb98c 349
60a191ed 350 if (show_debug_regs)
88e2cf7e
YQ
351 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
352 targ_type);
176eb98c
MS
353
354 return ret;
355}
356
421530db 357/* Implementation of linux_target_ops method "remove_point".
176eb98c 358
421530db
PL
359 It actually only records the info of the to-be-removed bp/wp,
360 the actual removal will be done when threads are resumed. */
176eb98c
MS
361
362static int
802e8e6d
PA
363aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
364 int len, struct raw_breakpoint *bp)
176eb98c
MS
365{
366 int ret;
4ff0d3d8 367 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
368 struct aarch64_debug_reg_state *state
369 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 370
c5e92cca 371 if (show_debug_regs)
176eb98c
MS
372 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
373 (unsigned long) addr, len);
374
802e8e6d
PA
375 /* Determine the type from the raw breakpoint type. */
376 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
377
378 /* Set up state pointers. */
379 if (targ_type != hw_execute)
380 ret =
c67ca4de
YQ
381 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
382 state);
176eb98c 383 else
8d689ee5
YQ
384 {
385 if (len == 3)
386 {
387 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
388 instruction. Set it to 2 to correctly encode length bit
389 mask in hardware/watchpoint control register. */
390 len = 2;
391 }
392 ret = aarch64_handle_breakpoint (targ_type, addr, len,
393 0 /* is_insert */, state);
394 }
176eb98c 395
60a191ed 396 if (show_debug_regs)
88e2cf7e
YQ
397 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
398 targ_type);
176eb98c
MS
399
400 return ret;
401}
402
421530db 403/* Implementation of linux_target_ops method "stopped_data_address". */
176eb98c
MS
404
405static CORE_ADDR
406aarch64_stopped_data_address (void)
407{
408 siginfo_t siginfo;
409 int pid, i;
410 struct aarch64_debug_reg_state *state;
411
0bfdf32f 412 pid = lwpid_of (current_thread);
176eb98c
MS
413
414 /* Get the siginfo. */
415 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
416 return (CORE_ADDR) 0;
417
418 /* Need to be a hardware breakpoint/watchpoint trap. */
419 if (siginfo.si_signo != SIGTRAP
420 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
421 return (CORE_ADDR) 0;
422
423 /* Check if the address matches any watched address. */
88e2cf7e 424 state = aarch64_get_debug_reg_state (pid_of (current_thread));
176eb98c
MS
425 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
426 {
a3b60e45
JK
427 const unsigned int offset
428 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
176eb98c
MS
429 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
430 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
a3b60e45
JK
431 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
432 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
433 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
434
176eb98c
MS
435 if (state->dr_ref_count_wp[i]
436 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
a3b60e45 437 && addr_trap >= addr_watch_aligned
176eb98c 438 && addr_trap < addr_watch + len)
a3b60e45
JK
439 {
440 /* ADDR_TRAP reports the first address of the memory range
441 accessed by the CPU, regardless of what was the memory
442 range watched. Thus, a large CPU access that straddles
443 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
444 ADDR_TRAP that is lower than the
445 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
446
447 addr: | 4 | 5 | 6 | 7 | 8 |
448 |---- range watched ----|
449 |----------- range accessed ------------|
450
451 In this case, ADDR_TRAP will be 4.
452
453 To match a watchpoint known to GDB core, we must never
454 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
455 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
456 positive on kernels older than 4.10. See PR
457 external/20207. */
458 return addr_orig;
459 }
176eb98c
MS
460 }
461
462 return (CORE_ADDR) 0;
463}
464
421530db 465/* Implementation of linux_target_ops method "stopped_by_watchpoint". */
176eb98c
MS
466
467static int
468aarch64_stopped_by_watchpoint (void)
469{
470 if (aarch64_stopped_data_address () != 0)
471 return 1;
472 else
473 return 0;
474}
475
476/* Fetch the thread-local storage pointer for libthread_db. */
477
478ps_err_e
754653a7 479ps_get_thread_area (struct ps_prochandle *ph,
176eb98c
MS
480 lwpid_t lwpid, int idx, void **base)
481{
a0cc84cd
YQ
482 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
483 is_64bit_tdesc ());
176eb98c
MS
484}
485
ade90bde
YQ
486/* Implementation of linux_target_ops method "siginfo_fixup". */
487
488static int
8adce034 489aarch64_linux_siginfo_fixup (siginfo_t *native, gdb_byte *inf, int direction)
ade90bde
YQ
490{
491 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
492 if (!is_64bit_tdesc ())
493 {
494 if (direction == 0)
495 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
496 native);
497 else
498 aarch64_siginfo_from_compat_siginfo (native,
499 (struct compat_siginfo *) inf);
500
501 return 1;
502 }
503
504 return 0;
505}
506
04ec7890 507/* Implementation of linux_target_ops method "new_process". */
176eb98c
MS
508
509static struct arch_process_info *
510aarch64_linux_new_process (void)
511{
8d749320 512 struct arch_process_info *info = XCNEW (struct arch_process_info);
176eb98c
MS
513
514 aarch64_init_debug_reg_state (&info->debug_reg_state);
515
516 return info;
517}
518
04ec7890
SM
519/* Implementation of linux_target_ops method "delete_process". */
520
521static void
522aarch64_linux_delete_process (struct arch_process_info *info)
523{
524 xfree (info);
525}
526
421530db
PL
527/* Implementation of linux_target_ops method "linux_new_fork". */
528
3a8a0396
DB
529static void
530aarch64_linux_new_fork (struct process_info *parent,
531 struct process_info *child)
532{
533 /* These are allocated by linux_add_process. */
61a7418c
DB
534 gdb_assert (parent->priv != NULL
535 && parent->priv->arch_private != NULL);
536 gdb_assert (child->priv != NULL
537 && child->priv->arch_private != NULL);
3a8a0396
DB
538
539 /* Linux kernel before 2.6.33 commit
540 72f674d203cd230426437cdcf7dd6f681dad8b0d
541 will inherit hardware debug registers from parent
542 on fork/vfork/clone. Newer Linux kernels create such tasks with
543 zeroed debug registers.
544
545 GDB core assumes the child inherits the watchpoints/hw
546 breakpoints of the parent, and will remove them all from the
547 forked off process. Copy the debug registers mirrors into the
548 new process so that all breakpoints and watchpoints can be
549 removed together. The debug registers mirror will become zeroed
550 in the end before detaching the forked off process, thus making
551 this compatible with older Linux kernels too. */
552
61a7418c 553 *child->priv->arch_private = *parent->priv->arch_private;
3a8a0396
DB
554}
555
ee4fbcfa
AH
556/* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
557#define AARCH64_HWCAP_PACA (1 << 30)
558
797bcff5 559/* Implementation of linux target ops method "low_arch_setup". */
3b53ae99 560
797bcff5
TBA
561void
562aarch64_target::low_arch_setup ()
3b53ae99
YQ
563{
564 unsigned int machine;
565 int is_elf64;
566 int tid;
567
568 tid = lwpid_of (current_thread);
569
570 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
571
572 if (is_elf64)
fefa175e
AH
573 {
574 uint64_t vq = aarch64_sve_get_vq (tid);
974c89e0
AH
575 unsigned long hwcap = linux_get_hwcap (8);
576 bool pauth_p = hwcap & AARCH64_HWCAP_PACA;
ee4fbcfa
AH
577
578 current_process ()->tdesc = aarch64_linux_read_description (vq, pauth_p);
fefa175e 579 }
3b53ae99 580 else
7cc17433 581 current_process ()->tdesc = aarch32_linux_read_description ();
176eb98c 582
af1b22f3 583 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
176eb98c
MS
584}
585
02895270
AH
586/* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
587
588static void
589aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
590{
591 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
592}
593
594/* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
595
596static void
597aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
598{
599 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
600}
601
3aee8918 602static struct regset_info aarch64_regsets[] =
176eb98c
MS
603{
604 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
605 sizeof (struct user_pt_regs), GENERAL_REGS,
606 aarch64_fill_gregset, aarch64_store_gregset },
607 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
608 sizeof (struct user_fpsimd_state), FP_REGS,
609 aarch64_fill_fpregset, aarch64_store_fpregset
610 },
1ef53e6b
AH
611 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
612 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
613 NULL, aarch64_store_pauthregset },
50bc912a 614 NULL_REGSET
176eb98c
MS
615};
616
3aee8918
PA
617static struct regsets_info aarch64_regsets_info =
618 {
619 aarch64_regsets, /* regsets */
620 0, /* num_regsets */
621 NULL, /* disabled_regsets */
622 };
623
3b53ae99 624static struct regs_info regs_info_aarch64 =
3aee8918
PA
625 {
626 NULL, /* regset_bitmap */
c2d65f38 627 NULL, /* usrregs */
3aee8918
PA
628 &aarch64_regsets_info,
629 };
630
02895270
AH
631static struct regset_info aarch64_sve_regsets[] =
632{
633 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
634 sizeof (struct user_pt_regs), GENERAL_REGS,
635 aarch64_fill_gregset, aarch64_store_gregset },
636 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
637 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
638 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
639 },
1ef53e6b
AH
640 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
641 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
642 NULL, aarch64_store_pauthregset },
02895270
AH
643 NULL_REGSET
644};
645
646static struct regsets_info aarch64_sve_regsets_info =
647 {
648 aarch64_sve_regsets, /* regsets. */
649 0, /* num_regsets. */
650 NULL, /* disabled_regsets. */
651 };
652
653static struct regs_info regs_info_aarch64_sve =
654 {
655 NULL, /* regset_bitmap. */
656 NULL, /* usrregs. */
657 &aarch64_sve_regsets_info,
658 };
659
aa8d21c9 660/* Implementation of linux target ops method "get_regs_info". */
421530db 661
aa8d21c9
TBA
662const regs_info *
663aarch64_target::get_regs_info ()
3aee8918 664{
02895270 665 if (!is_64bit_tdesc ())
3b53ae99 666 return &regs_info_aarch32;
02895270
AH
667
668 if (is_sve_tdesc ())
669 return &regs_info_aarch64_sve;
670
671 return &regs_info_aarch64;
3aee8918
PA
672}
673
7671bf47
PL
674/* Implementation of linux_target_ops method "supports_tracepoints". */
675
676static int
677aarch64_supports_tracepoints (void)
678{
524b57e6
YQ
679 if (current_thread == NULL)
680 return 1;
681 else
682 {
683 /* We don't support tracepoints on aarch32 now. */
684 return is_64bit_tdesc ();
685 }
7671bf47
PL
686}
687
bb903df0
PL
688/* Implementation of linux_target_ops method "get_thread_area". */
689
690static int
691aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
692{
693 struct iovec iovec;
694 uint64_t reg;
695
696 iovec.iov_base = &reg;
697 iovec.iov_len = sizeof (reg);
698
699 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
700 return -1;
701
702 *addrp = reg;
703
704 return 0;
705}
706
061fc021
YQ
707/* Implementation of linux_target_ops method "get_syscall_trapinfo". */
708
709static void
710aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
711{
712 int use_64bit = register_size (regcache->tdesc, 0) == 8;
713
714 if (use_64bit)
715 {
716 long l_sysno;
717
718 collect_register_by_name (regcache, "x8", &l_sysno);
719 *sysno = (int) l_sysno;
720 }
721 else
722 collect_register_by_name (regcache, "r7", sysno);
723}
724
afbe19f8
PL
725/* List of condition codes that we need. */
726
727enum aarch64_condition_codes
728{
729 EQ = 0x0,
730 NE = 0x1,
731 LO = 0x3,
732 GE = 0xa,
733 LT = 0xb,
734 GT = 0xc,
735 LE = 0xd,
bb903df0
PL
736};
737
6c1c9a8b
YQ
738enum aarch64_operand_type
739{
740 OPERAND_IMMEDIATE,
741 OPERAND_REGISTER,
742};
743
bb903df0
PL
744/* Representation of an operand. At this time, it only supports register
745 and immediate types. */
746
747struct aarch64_operand
748{
749 /* Type of the operand. */
6c1c9a8b
YQ
750 enum aarch64_operand_type type;
751
bb903df0
PL
752 /* Value of the operand according to the type. */
753 union
754 {
755 uint32_t imm;
756 struct aarch64_register reg;
757 };
758};
759
760/* List of registers that we are currently using, we can add more here as
761 we need to use them. */
762
763/* General purpose scratch registers (64 bit). */
764static const struct aarch64_register x0 = { 0, 1 };
765static const struct aarch64_register x1 = { 1, 1 };
766static const struct aarch64_register x2 = { 2, 1 };
767static const struct aarch64_register x3 = { 3, 1 };
768static const struct aarch64_register x4 = { 4, 1 };
769
770/* General purpose scratch registers (32 bit). */
afbe19f8 771static const struct aarch64_register w0 = { 0, 0 };
bb903df0
PL
772static const struct aarch64_register w2 = { 2, 0 };
773
774/* Intra-procedure scratch registers. */
775static const struct aarch64_register ip0 = { 16, 1 };
776
777/* Special purpose registers. */
afbe19f8
PL
778static const struct aarch64_register fp = { 29, 1 };
779static const struct aarch64_register lr = { 30, 1 };
bb903df0
PL
780static const struct aarch64_register sp = { 31, 1 };
781static const struct aarch64_register xzr = { 31, 1 };
782
783/* Dynamically allocate a new register. If we know the register
784 statically, we should make it a global as above instead of using this
785 helper function. */
786
787static struct aarch64_register
788aarch64_register (unsigned num, int is64)
789{
790 return (struct aarch64_register) { num, is64 };
791}
792
793/* Helper function to create a register operand, for instructions with
794 different types of operands.
795
796 For example:
797 p += emit_mov (p, x0, register_operand (x1)); */
798
799static struct aarch64_operand
800register_operand (struct aarch64_register reg)
801{
802 struct aarch64_operand operand;
803
804 operand.type = OPERAND_REGISTER;
805 operand.reg = reg;
806
807 return operand;
808}
809
810/* Helper function to create an immediate operand, for instructions with
811 different types of operands.
812
813 For example:
814 p += emit_mov (p, x0, immediate_operand (12)); */
815
816static struct aarch64_operand
817immediate_operand (uint32_t imm)
818{
819 struct aarch64_operand operand;
820
821 operand.type = OPERAND_IMMEDIATE;
822 operand.imm = imm;
823
824 return operand;
825}
826
bb903df0
PL
827/* Helper function to create an offset memory operand.
828
829 For example:
830 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
831
832static struct aarch64_memory_operand
833offset_memory_operand (int32_t offset)
834{
835 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
836}
837
838/* Helper function to create a pre-index memory operand.
839
840 For example:
841 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
842
843static struct aarch64_memory_operand
844preindex_memory_operand (int32_t index)
845{
846 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
847}
848
afbe19f8
PL
849/* Helper function to create a post-index memory operand.
850
851 For example:
852 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
853
854static struct aarch64_memory_operand
855postindex_memory_operand (int32_t index)
856{
857 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
858}
859
bb903df0
PL
860/* System control registers. These special registers can be written and
861 read with the MRS and MSR instructions.
862
863 - NZCV: Condition flags. GDB refers to this register under the CPSR
864 name.
865 - FPSR: Floating-point status register.
866 - FPCR: Floating-point control registers.
867 - TPIDR_EL0: Software thread ID register. */
868
869enum aarch64_system_control_registers
870{
871 /* op0 op1 crn crm op2 */
872 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
873 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
874 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
875 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
876};
877
bb903df0
PL
878/* Write a BLR instruction into *BUF.
879
880 BLR rn
881
882 RN is the register to branch to. */
883
884static int
885emit_blr (uint32_t *buf, struct aarch64_register rn)
886{
e1c587c3 887 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
bb903df0
PL
888}
889
afbe19f8 890/* Write a RET instruction into *BUF.
bb903df0 891
afbe19f8 892 RET xn
bb903df0 893
afbe19f8 894 RN is the register to branch to. */
bb903df0
PL
895
896static int
afbe19f8
PL
897emit_ret (uint32_t *buf, struct aarch64_register rn)
898{
e1c587c3 899 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
afbe19f8
PL
900}
901
902static int
903emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
904 struct aarch64_register rt,
905 struct aarch64_register rt2,
906 struct aarch64_register rn,
907 struct aarch64_memory_operand operand)
bb903df0
PL
908{
909 uint32_t opc;
910 uint32_t pre_index;
911 uint32_t write_back;
912
913 if (rt.is64)
914 opc = ENCODE (2, 2, 30);
915 else
916 opc = ENCODE (0, 2, 30);
917
918 switch (operand.type)
919 {
920 case MEMORY_OPERAND_OFFSET:
921 {
922 pre_index = ENCODE (1, 1, 24);
923 write_back = ENCODE (0, 1, 23);
924 break;
925 }
afbe19f8
PL
926 case MEMORY_OPERAND_POSTINDEX:
927 {
928 pre_index = ENCODE (0, 1, 24);
929 write_back = ENCODE (1, 1, 23);
930 break;
931 }
bb903df0
PL
932 case MEMORY_OPERAND_PREINDEX:
933 {
934 pre_index = ENCODE (1, 1, 24);
935 write_back = ENCODE (1, 1, 23);
936 break;
937 }
938 default:
939 return 0;
940 }
941
e1c587c3
YQ
942 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
943 | ENCODE (operand.index >> 3, 7, 15)
944 | ENCODE (rt2.num, 5, 10)
945 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
946}
947
afbe19f8
PL
948/* Write a STP instruction into *BUF.
949
950 STP rt, rt2, [rn, #offset]
951 STP rt, rt2, [rn, #index]!
952 STP rt, rt2, [rn], #index
953
954 RT and RT2 are the registers to store.
955 RN is the base address register.
956 OFFSET is the immediate to add to the base address. It is limited to a
957 -512 .. 504 range (7 bits << 3). */
958
959static int
960emit_stp (uint32_t *buf, struct aarch64_register rt,
961 struct aarch64_register rt2, struct aarch64_register rn,
962 struct aarch64_memory_operand operand)
963{
964 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
965}
966
967/* Write a LDP instruction into *BUF.
968
969 LDP rt, rt2, [rn, #offset]
970 LDP rt, rt2, [rn, #index]!
971 LDP rt, rt2, [rn], #index
972
973 RT and RT2 are the registers to store.
974 RN is the base address register.
975 OFFSET is the immediate to add to the base address. It is limited to a
976 -512 .. 504 range (7 bits << 3). */
977
978static int
979emit_ldp (uint32_t *buf, struct aarch64_register rt,
980 struct aarch64_register rt2, struct aarch64_register rn,
981 struct aarch64_memory_operand operand)
982{
983 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
984}
985
bb903df0
PL
986/* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
987
988 LDP qt, qt2, [rn, #offset]
989
990 RT and RT2 are the Q registers to store.
991 RN is the base address register.
992 OFFSET is the immediate to add to the base address. It is limited to
993 -1024 .. 1008 range (7 bits << 4). */
994
995static int
996emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
997 struct aarch64_register rn, int32_t offset)
998{
999 uint32_t opc = ENCODE (2, 2, 30);
1000 uint32_t pre_index = ENCODE (1, 1, 24);
1001
e1c587c3
YQ
1002 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
1003 | ENCODE (offset >> 4, 7, 15)
1004 | ENCODE (rt2, 5, 10)
1005 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1006}
1007
1008/* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1009
1010 STP qt, qt2, [rn, #offset]
1011
1012 RT and RT2 are the Q registers to store.
1013 RN is the base address register.
1014 OFFSET is the immediate to add to the base address. It is limited to
1015 -1024 .. 1008 range (7 bits << 4). */
1016
1017static int
1018emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1019 struct aarch64_register rn, int32_t offset)
1020{
1021 uint32_t opc = ENCODE (2, 2, 30);
1022 uint32_t pre_index = ENCODE (1, 1, 24);
1023
e1c587c3 1024 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
b6542f81
YQ
1025 | ENCODE (offset >> 4, 7, 15)
1026 | ENCODE (rt2, 5, 10)
1027 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1028}
1029
afbe19f8
PL
1030/* Write a LDRH instruction into *BUF.
1031
1032 LDRH wt, [xn, #offset]
1033 LDRH wt, [xn, #index]!
1034 LDRH wt, [xn], #index
1035
1036 RT is the register to store.
1037 RN is the base address register.
1038 OFFSET is the immediate to add to the base address. It is limited to
1039 0 .. 32760 range (12 bits << 3). */
1040
1041static int
1042emit_ldrh (uint32_t *buf, struct aarch64_register rt,
1043 struct aarch64_register rn,
1044 struct aarch64_memory_operand operand)
1045{
1c2e1515 1046 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
afbe19f8
PL
1047}
1048
1049/* Write a LDRB instruction into *BUF.
1050
1051 LDRB wt, [xn, #offset]
1052 LDRB wt, [xn, #index]!
1053 LDRB wt, [xn], #index
1054
1055 RT is the register to store.
1056 RN is the base address register.
1057 OFFSET is the immediate to add to the base address. It is limited to
1058 0 .. 32760 range (12 bits << 3). */
1059
1060static int
1061emit_ldrb (uint32_t *buf, struct aarch64_register rt,
1062 struct aarch64_register rn,
1063 struct aarch64_memory_operand operand)
1064{
1c2e1515 1065 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
afbe19f8
PL
1066}
1067
bb903df0 1068
bb903df0
PL
1069
1070/* Write a STR instruction into *BUF.
1071
1072 STR rt, [rn, #offset]
1073 STR rt, [rn, #index]!
afbe19f8 1074 STR rt, [rn], #index
bb903df0
PL
1075
1076 RT is the register to store.
1077 RN is the base address register.
1078 OFFSET is the immediate to add to the base address. It is limited to
1079 0 .. 32760 range (12 bits << 3). */
1080
1081static int
1082emit_str (uint32_t *buf, struct aarch64_register rt,
1083 struct aarch64_register rn,
1084 struct aarch64_memory_operand operand)
1085{
1c2e1515 1086 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
bb903df0
PL
1087}
1088
1089/* Helper function emitting an exclusive load or store instruction. */
1090
1091static int
1092emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1093 enum aarch64_opcodes opcode,
1094 struct aarch64_register rs,
1095 struct aarch64_register rt,
1096 struct aarch64_register rt2,
1097 struct aarch64_register rn)
1098{
e1c587c3
YQ
1099 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1100 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1101 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
1102}
1103
1104/* Write a LAXR instruction into *BUF.
1105
1106 LDAXR rt, [xn]
1107
1108 RT is the destination register.
1109 RN is the base address register. */
1110
1111static int
1112emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1113 struct aarch64_register rn)
1114{
1115 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1116 xzr, rn);
1117}
1118
1119/* Write a STXR instruction into *BUF.
1120
1121 STXR ws, rt, [xn]
1122
1123 RS is the result register, it indicates if the store succeeded or not.
1124 RT is the destination register.
1125 RN is the base address register. */
1126
1127static int
1128emit_stxr (uint32_t *buf, struct aarch64_register rs,
1129 struct aarch64_register rt, struct aarch64_register rn)
1130{
1131 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1132 xzr, rn);
1133}
1134
1135/* Write a STLR instruction into *BUF.
1136
1137 STLR rt, [xn]
1138
1139 RT is the register to store.
1140 RN is the base address register. */
1141
1142static int
1143emit_stlr (uint32_t *buf, struct aarch64_register rt,
1144 struct aarch64_register rn)
1145{
1146 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1147 xzr, rn);
1148}
1149
1150/* Helper function for data processing instructions with register sources. */
1151
1152static int
231c0592 1153emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
bb903df0
PL
1154 struct aarch64_register rd,
1155 struct aarch64_register rn,
1156 struct aarch64_register rm)
1157{
1158 uint32_t size = ENCODE (rd.is64, 1, 31);
1159
e1c587c3
YQ
1160 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1161 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1162}
1163
1164/* Helper function for data processing instructions taking either a register
1165 or an immediate. */
1166
1167static int
1168emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1169 struct aarch64_register rd,
1170 struct aarch64_register rn,
1171 struct aarch64_operand operand)
1172{
1173 uint32_t size = ENCODE (rd.is64, 1, 31);
1174 /* The opcode is different for register and immediate source operands. */
1175 uint32_t operand_opcode;
1176
1177 if (operand.type == OPERAND_IMMEDIATE)
1178 {
1179 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1180 operand_opcode = ENCODE (8, 4, 25);
1181
e1c587c3
YQ
1182 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1183 | ENCODE (operand.imm, 12, 10)
1184 | ENCODE (rn.num, 5, 5)
1185 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1186 }
1187 else
1188 {
1189 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1190 operand_opcode = ENCODE (5, 4, 25);
1191
1192 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1193 rn, operand.reg);
1194 }
1195}
1196
1197/* Write an ADD instruction into *BUF.
1198
1199 ADD rd, rn, #imm
1200 ADD rd, rn, rm
1201
1202 This function handles both an immediate and register add.
1203
1204 RD is the destination register.
1205 RN is the input register.
1206 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1207 OPERAND_REGISTER. */
1208
1209static int
1210emit_add (uint32_t *buf, struct aarch64_register rd,
1211 struct aarch64_register rn, struct aarch64_operand operand)
1212{
1213 return emit_data_processing (buf, ADD, rd, rn, operand);
1214}
1215
1216/* Write a SUB instruction into *BUF.
1217
1218 SUB rd, rn, #imm
1219 SUB rd, rn, rm
1220
1221 This function handles both an immediate and register sub.
1222
1223 RD is the destination register.
1224 RN is the input register.
1225 IMM is the immediate to substract to RN. */
1226
1227static int
1228emit_sub (uint32_t *buf, struct aarch64_register rd,
1229 struct aarch64_register rn, struct aarch64_operand operand)
1230{
1231 return emit_data_processing (buf, SUB, rd, rn, operand);
1232}
1233
1234/* Write a MOV instruction into *BUF.
1235
1236 MOV rd, #imm
1237 MOV rd, rm
1238
1239 This function handles both a wide immediate move and a register move,
1240 with the condition that the source register is not xzr. xzr and the
1241 stack pointer share the same encoding and this function only supports
1242 the stack pointer.
1243
1244 RD is the destination register.
1245 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1246 OPERAND_REGISTER. */
1247
1248static int
1249emit_mov (uint32_t *buf, struct aarch64_register rd,
1250 struct aarch64_operand operand)
1251{
1252 if (operand.type == OPERAND_IMMEDIATE)
1253 {
1254 uint32_t size = ENCODE (rd.is64, 1, 31);
1255 /* Do not shift the immediate. */
1256 uint32_t shift = ENCODE (0, 2, 21);
1257
e1c587c3
YQ
1258 return aarch64_emit_insn (buf, MOV | size | shift
1259 | ENCODE (operand.imm, 16, 5)
1260 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1261 }
1262 else
1263 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1264}
1265
1266/* Write a MOVK instruction into *BUF.
1267
1268 MOVK rd, #imm, lsl #shift
1269
1270 RD is the destination register.
1271 IMM is the immediate.
1272 SHIFT is the logical shift left to apply to IMM. */
1273
1274static int
7781c06f
YQ
1275emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1276 unsigned shift)
bb903df0
PL
1277{
1278 uint32_t size = ENCODE (rd.is64, 1, 31);
1279
e1c587c3
YQ
1280 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1281 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1282}
1283
1284/* Write instructions into *BUF in order to move ADDR into a register.
1285 ADDR can be a 64-bit value.
1286
1287 This function will emit a series of MOV and MOVK instructions, such as:
1288
1289 MOV xd, #(addr)
1290 MOVK xd, #(addr >> 16), lsl #16
1291 MOVK xd, #(addr >> 32), lsl #32
1292 MOVK xd, #(addr >> 48), lsl #48 */
1293
1294static int
1295emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1296{
1297 uint32_t *p = buf;
1298
1299 /* The MOV (wide immediate) instruction clears to top bits of the
1300 register. */
1301 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1302
1303 if ((addr >> 16) != 0)
1304 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1305 else
1306 return p - buf;
1307
1308 if ((addr >> 32) != 0)
1309 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1310 else
1311 return p - buf;
1312
1313 if ((addr >> 48) != 0)
1314 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1315
1316 return p - buf;
1317}
1318
afbe19f8
PL
1319/* Write a SUBS instruction into *BUF.
1320
1321 SUBS rd, rn, rm
1322
1323 This instruction update the condition flags.
1324
1325 RD is the destination register.
1326 RN and RM are the source registers. */
1327
1328static int
1329emit_subs (uint32_t *buf, struct aarch64_register rd,
1330 struct aarch64_register rn, struct aarch64_operand operand)
1331{
1332 return emit_data_processing (buf, SUBS, rd, rn, operand);
1333}
1334
1335/* Write a CMP instruction into *BUF.
1336
1337 CMP rn, rm
1338
1339 This instruction is an alias of SUBS xzr, rn, rm.
1340
1341 RN and RM are the registers to compare. */
1342
1343static int
1344emit_cmp (uint32_t *buf, struct aarch64_register rn,
1345 struct aarch64_operand operand)
1346{
1347 return emit_subs (buf, xzr, rn, operand);
1348}
1349
1350/* Write a AND instruction into *BUF.
1351
1352 AND rd, rn, rm
1353
1354 RD is the destination register.
1355 RN and RM are the source registers. */
1356
1357static int
1358emit_and (uint32_t *buf, struct aarch64_register rd,
1359 struct aarch64_register rn, struct aarch64_register rm)
1360{
1361 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1362}
1363
1364/* Write a ORR instruction into *BUF.
1365
1366 ORR rd, rn, rm
1367
1368 RD is the destination register.
1369 RN and RM are the source registers. */
1370
1371static int
1372emit_orr (uint32_t *buf, struct aarch64_register rd,
1373 struct aarch64_register rn, struct aarch64_register rm)
1374{
1375 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1376}
1377
1378/* Write a ORN instruction into *BUF.
1379
1380 ORN rd, rn, rm
1381
1382 RD is the destination register.
1383 RN and RM are the source registers. */
1384
1385static int
1386emit_orn (uint32_t *buf, struct aarch64_register rd,
1387 struct aarch64_register rn, struct aarch64_register rm)
1388{
1389 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1390}
1391
1392/* Write a EOR instruction into *BUF.
1393
1394 EOR rd, rn, rm
1395
1396 RD is the destination register.
1397 RN and RM are the source registers. */
1398
1399static int
1400emit_eor (uint32_t *buf, struct aarch64_register rd,
1401 struct aarch64_register rn, struct aarch64_register rm)
1402{
1403 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1404}
1405
1406/* Write a MVN instruction into *BUF.
1407
1408 MVN rd, rm
1409
1410 This is an alias for ORN rd, xzr, rm.
1411
1412 RD is the destination register.
1413 RM is the source register. */
1414
1415static int
1416emit_mvn (uint32_t *buf, struct aarch64_register rd,
1417 struct aarch64_register rm)
1418{
1419 return emit_orn (buf, rd, xzr, rm);
1420}
1421
1422/* Write a LSLV instruction into *BUF.
1423
1424 LSLV rd, rn, rm
1425
1426 RD is the destination register.
1427 RN and RM are the source registers. */
1428
1429static int
1430emit_lslv (uint32_t *buf, struct aarch64_register rd,
1431 struct aarch64_register rn, struct aarch64_register rm)
1432{
1433 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1434}
1435
1436/* Write a LSRV instruction into *BUF.
1437
1438 LSRV rd, rn, rm
1439
1440 RD is the destination register.
1441 RN and RM are the source registers. */
1442
1443static int
1444emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1445 struct aarch64_register rn, struct aarch64_register rm)
1446{
1447 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1448}
1449
1450/* Write a ASRV instruction into *BUF.
1451
1452 ASRV rd, rn, rm
1453
1454 RD is the destination register.
1455 RN and RM are the source registers. */
1456
1457static int
1458emit_asrv (uint32_t *buf, struct aarch64_register rd,
1459 struct aarch64_register rn, struct aarch64_register rm)
1460{
1461 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1462}
1463
1464/* Write a MUL instruction into *BUF.
1465
1466 MUL rd, rn, rm
1467
1468 RD is the destination register.
1469 RN and RM are the source registers. */
1470
1471static int
1472emit_mul (uint32_t *buf, struct aarch64_register rd,
1473 struct aarch64_register rn, struct aarch64_register rm)
1474{
1475 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1476}
1477
bb903df0
PL
1478/* Write a MRS instruction into *BUF. The register size is 64-bit.
1479
1480 MRS xt, system_reg
1481
1482 RT is the destination register.
1483 SYSTEM_REG is special purpose register to read. */
1484
1485static int
1486emit_mrs (uint32_t *buf, struct aarch64_register rt,
1487 enum aarch64_system_control_registers system_reg)
1488{
e1c587c3
YQ
1489 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1490 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1491}
1492
1493/* Write a MSR instruction into *BUF. The register size is 64-bit.
1494
1495 MSR system_reg, xt
1496
1497 SYSTEM_REG is special purpose register to write.
1498 RT is the input register. */
1499
1500static int
1501emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1502 struct aarch64_register rt)
1503{
e1c587c3
YQ
1504 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1505 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1506}
1507
1508/* Write a SEVL instruction into *BUF.
1509
1510 This is a hint instruction telling the hardware to trigger an event. */
1511
1512static int
1513emit_sevl (uint32_t *buf)
1514{
e1c587c3 1515 return aarch64_emit_insn (buf, SEVL);
bb903df0
PL
1516}
1517
1518/* Write a WFE instruction into *BUF.
1519
1520 This is a hint instruction telling the hardware to wait for an event. */
1521
1522static int
1523emit_wfe (uint32_t *buf)
1524{
e1c587c3 1525 return aarch64_emit_insn (buf, WFE);
bb903df0
PL
1526}
1527
afbe19f8
PL
1528/* Write a SBFM instruction into *BUF.
1529
1530 SBFM rd, rn, #immr, #imms
1531
1532 This instruction moves the bits from #immr to #imms into the
1533 destination, sign extending the result.
1534
1535 RD is the destination register.
1536 RN is the source register.
1537 IMMR is the bit number to start at (least significant bit).
1538 IMMS is the bit number to stop at (most significant bit). */
1539
1540static int
1541emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1542 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1543{
1544 uint32_t size = ENCODE (rd.is64, 1, 31);
1545 uint32_t n = ENCODE (rd.is64, 1, 22);
1546
e1c587c3
YQ
1547 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1548 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1549 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1550}
1551
1552/* Write a SBFX instruction into *BUF.
1553
1554 SBFX rd, rn, #lsb, #width
1555
1556 This instruction moves #width bits from #lsb into the destination, sign
1557 extending the result. This is an alias for:
1558
1559 SBFM rd, rn, #lsb, #(lsb + width - 1)
1560
1561 RD is the destination register.
1562 RN is the source register.
1563 LSB is the bit number to start at (least significant bit).
1564 WIDTH is the number of bits to move. */
1565
1566static int
1567emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1568 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1569{
1570 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1571}
1572
1573/* Write a UBFM instruction into *BUF.
1574
1575 UBFM rd, rn, #immr, #imms
1576
1577 This instruction moves the bits from #immr to #imms into the
1578 destination, extending the result with zeros.
1579
1580 RD is the destination register.
1581 RN is the source register.
1582 IMMR is the bit number to start at (least significant bit).
1583 IMMS is the bit number to stop at (most significant bit). */
1584
1585static int
1586emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1587 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1588{
1589 uint32_t size = ENCODE (rd.is64, 1, 31);
1590 uint32_t n = ENCODE (rd.is64, 1, 22);
1591
e1c587c3
YQ
1592 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1593 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1594 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1595}
1596
1597/* Write a UBFX instruction into *BUF.
1598
1599 UBFX rd, rn, #lsb, #width
1600
1601 This instruction moves #width bits from #lsb into the destination,
1602 extending the result with zeros. This is an alias for:
1603
1604 UBFM rd, rn, #lsb, #(lsb + width - 1)
1605
1606 RD is the destination register.
1607 RN is the source register.
1608 LSB is the bit number to start at (least significant bit).
1609 WIDTH is the number of bits to move. */
1610
1611static int
1612emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1613 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1614{
1615 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1616}
1617
1618/* Write a CSINC instruction into *BUF.
1619
1620 CSINC rd, rn, rm, cond
1621
1622 This instruction conditionally increments rn or rm and places the result
1623 in rd. rn is chosen is the condition is true.
1624
1625 RD is the destination register.
1626 RN and RM are the source registers.
1627 COND is the encoded condition. */
1628
1629static int
1630emit_csinc (uint32_t *buf, struct aarch64_register rd,
1631 struct aarch64_register rn, struct aarch64_register rm,
1632 unsigned cond)
1633{
1634 uint32_t size = ENCODE (rd.is64, 1, 31);
1635
e1c587c3
YQ
1636 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1637 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1638 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1639}
1640
1641/* Write a CSET instruction into *BUF.
1642
1643 CSET rd, cond
1644
1645 This instruction conditionally write 1 or 0 in the destination register.
1646 1 is written if the condition is true. This is an alias for:
1647
1648 CSINC rd, xzr, xzr, !cond
1649
1650 Note that the condition needs to be inverted.
1651
1652 RD is the destination register.
1653 RN and RM are the source registers.
1654 COND is the encoded condition. */
1655
1656static int
1657emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1658{
1659 /* The least significant bit of the condition needs toggling in order to
1660 invert it. */
1661 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1662}
1663
bb903df0
PL
1664/* Write LEN instructions from BUF into the inferior memory at *TO.
1665
1666 Note instructions are always little endian on AArch64, unlike data. */
1667
1668static void
1669append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1670{
1671 size_t byte_len = len * sizeof (uint32_t);
1672#if (__BYTE_ORDER == __BIG_ENDIAN)
cb93dc7f 1673 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
bb903df0
PL
1674 size_t i;
1675
1676 for (i = 0; i < len; i++)
1677 le_buf[i] = htole32 (buf[i]);
1678
4196ab2a 1679 target_write_memory (*to, (const unsigned char *) le_buf, byte_len);
bb903df0
PL
1680
1681 xfree (le_buf);
1682#else
4196ab2a 1683 target_write_memory (*to, (const unsigned char *) buf, byte_len);
bb903df0
PL
1684#endif
1685
1686 *to += byte_len;
1687}
1688
0badd99f
YQ
1689/* Sub-class of struct aarch64_insn_data, store information of
1690 instruction relocation for fast tracepoint. Visitor can
1691 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1692 the relocated instructions in buffer pointed by INSN_PTR. */
bb903df0 1693
0badd99f
YQ
1694struct aarch64_insn_relocation_data
1695{
1696 struct aarch64_insn_data base;
1697
1698 /* The new address the instruction is relocated to. */
1699 CORE_ADDR new_addr;
1700 /* Pointer to the buffer of relocated instruction(s). */
1701 uint32_t *insn_ptr;
1702};
1703
1704/* Implementation of aarch64_insn_visitor method "b". */
1705
1706static void
1707aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1708 struct aarch64_insn_data *data)
1709{
1710 struct aarch64_insn_relocation_data *insn_reloc
1711 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1712 int64_t new_offset
0badd99f
YQ
1713 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1714
1715 if (can_encode_int32 (new_offset, 28))
1716 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1717}
1718
1719/* Implementation of aarch64_insn_visitor method "b_cond". */
1720
1721static void
1722aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1723 struct aarch64_insn_data *data)
1724{
1725 struct aarch64_insn_relocation_data *insn_reloc
1726 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1727 int64_t new_offset
0badd99f
YQ
1728 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1729
1730 if (can_encode_int32 (new_offset, 21))
1731 {
1732 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1733 new_offset);
bb903df0 1734 }
0badd99f 1735 else if (can_encode_int32 (new_offset, 28))
bb903df0 1736 {
0badd99f
YQ
1737 /* The offset is out of range for a conditional branch
1738 instruction but not for a unconditional branch. We can use
1739 the following instructions instead:
bb903df0 1740
0badd99f
YQ
1741 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1742 B NOT_TAKEN ; Else jump over TAKEN and continue.
1743 TAKEN:
1744 B #(offset - 8)
1745 NOT_TAKEN:
1746
1747 */
1748
1749 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1750 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1751 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
bb903df0 1752 }
0badd99f 1753}
bb903df0 1754
0badd99f
YQ
1755/* Implementation of aarch64_insn_visitor method "cb". */
1756
1757static void
1758aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1759 const unsigned rn, int is64,
1760 struct aarch64_insn_data *data)
1761{
1762 struct aarch64_insn_relocation_data *insn_reloc
1763 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1764 int64_t new_offset
0badd99f
YQ
1765 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1766
1767 if (can_encode_int32 (new_offset, 21))
1768 {
1769 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1770 aarch64_register (rn, is64), new_offset);
bb903df0 1771 }
0badd99f 1772 else if (can_encode_int32 (new_offset, 28))
bb903df0 1773 {
0badd99f
YQ
1774 /* The offset is out of range for a compare and branch
1775 instruction but not for a unconditional branch. We can use
1776 the following instructions instead:
1777
1778 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1779 B NOT_TAKEN ; Else jump over TAKEN and continue.
1780 TAKEN:
1781 B #(offset - 8)
1782 NOT_TAKEN:
1783
1784 */
1785 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1786 aarch64_register (rn, is64), 8);
1787 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1788 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1789 }
1790}
bb903df0 1791
0badd99f 1792/* Implementation of aarch64_insn_visitor method "tb". */
bb903df0 1793
0badd99f
YQ
1794static void
1795aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1796 const unsigned rt, unsigned bit,
1797 struct aarch64_insn_data *data)
1798{
1799 struct aarch64_insn_relocation_data *insn_reloc
1800 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1801 int64_t new_offset
0badd99f
YQ
1802 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1803
1804 if (can_encode_int32 (new_offset, 16))
1805 {
1806 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1807 aarch64_register (rt, 1), new_offset);
bb903df0 1808 }
0badd99f 1809 else if (can_encode_int32 (new_offset, 28))
bb903df0 1810 {
0badd99f
YQ
1811 /* The offset is out of range for a test bit and branch
1812 instruction but not for a unconditional branch. We can use
1813 the following instructions instead:
1814
1815 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1816 B NOT_TAKEN ; Else jump over TAKEN and continue.
1817 TAKEN:
1818 B #(offset - 8)
1819 NOT_TAKEN:
1820
1821 */
1822 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1823 aarch64_register (rt, 1), 8);
1824 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1825 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1826 new_offset - 8);
1827 }
1828}
bb903df0 1829
0badd99f 1830/* Implementation of aarch64_insn_visitor method "adr". */
bb903df0 1831
0badd99f
YQ
1832static void
1833aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1834 const int is_adrp,
1835 struct aarch64_insn_data *data)
1836{
1837 struct aarch64_insn_relocation_data *insn_reloc
1838 = (struct aarch64_insn_relocation_data *) data;
1839 /* We know exactly the address the ADR{P,} instruction will compute.
1840 We can just write it to the destination register. */
1841 CORE_ADDR address = data->insn_addr + offset;
bb903df0 1842
0badd99f
YQ
1843 if (is_adrp)
1844 {
1845 /* Clear the lower 12 bits of the offset to get the 4K page. */
1846 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1847 aarch64_register (rd, 1),
1848 address & ~0xfff);
1849 }
1850 else
1851 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1852 aarch64_register (rd, 1), address);
1853}
bb903df0 1854
0badd99f 1855/* Implementation of aarch64_insn_visitor method "ldr_literal". */
bb903df0 1856
0badd99f
YQ
1857static void
1858aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1859 const unsigned rt, const int is64,
1860 struct aarch64_insn_data *data)
1861{
1862 struct aarch64_insn_relocation_data *insn_reloc
1863 = (struct aarch64_insn_relocation_data *) data;
1864 CORE_ADDR address = data->insn_addr + offset;
1865
1866 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1867 aarch64_register (rt, 1), address);
1868
1869 /* We know exactly what address to load from, and what register we
1870 can use:
1871
1872 MOV xd, #(oldloc + offset)
1873 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1874 ...
1875
1876 LDR xd, [xd] ; or LDRSW xd, [xd]
1877
1878 */
1879
1880 if (is_sw)
1881 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1882 aarch64_register (rt, 1),
1883 aarch64_register (rt, 1),
1884 offset_memory_operand (0));
bb903df0 1885 else
0badd99f
YQ
1886 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1887 aarch64_register (rt, is64),
1888 aarch64_register (rt, 1),
1889 offset_memory_operand (0));
1890}
1891
1892/* Implementation of aarch64_insn_visitor method "others". */
1893
1894static void
1895aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1896 struct aarch64_insn_data *data)
1897{
1898 struct aarch64_insn_relocation_data *insn_reloc
1899 = (struct aarch64_insn_relocation_data *) data;
bb903df0 1900
0badd99f
YQ
1901 /* The instruction is not PC relative. Just re-emit it at the new
1902 location. */
e1c587c3 1903 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
0badd99f
YQ
1904}
1905
1906static const struct aarch64_insn_visitor visitor =
1907{
1908 aarch64_ftrace_insn_reloc_b,
1909 aarch64_ftrace_insn_reloc_b_cond,
1910 aarch64_ftrace_insn_reloc_cb,
1911 aarch64_ftrace_insn_reloc_tb,
1912 aarch64_ftrace_insn_reloc_adr,
1913 aarch64_ftrace_insn_reloc_ldr_literal,
1914 aarch64_ftrace_insn_reloc_others,
1915};
1916
bb903df0
PL
1917/* Implementation of linux_target_ops method
1918 "install_fast_tracepoint_jump_pad". */
1919
1920static int
1921aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1922 CORE_ADDR tpaddr,
1923 CORE_ADDR collector,
1924 CORE_ADDR lockaddr,
1925 ULONGEST orig_size,
1926 CORE_ADDR *jump_entry,
1927 CORE_ADDR *trampoline,
1928 ULONGEST *trampoline_size,
1929 unsigned char *jjump_pad_insn,
1930 ULONGEST *jjump_pad_insn_size,
1931 CORE_ADDR *adjusted_insn_addr,
1932 CORE_ADDR *adjusted_insn_addr_end,
1933 char *err)
1934{
1935 uint32_t buf[256];
1936 uint32_t *p = buf;
2ac09a5b 1937 int64_t offset;
bb903df0 1938 int i;
70b439f0 1939 uint32_t insn;
bb903df0 1940 CORE_ADDR buildaddr = *jump_entry;
0badd99f 1941 struct aarch64_insn_relocation_data insn_data;
bb903df0
PL
1942
1943 /* We need to save the current state on the stack both to restore it
1944 later and to collect register values when the tracepoint is hit.
1945
1946 The saved registers are pushed in a layout that needs to be in sync
1947 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1948 the supply_fast_tracepoint_registers function will fill in the
1949 register cache from a pointer to saved registers on the stack we build
1950 here.
1951
1952 For simplicity, we set the size of each cell on the stack to 16 bytes.
1953 This way one cell can hold any register type, from system registers
1954 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1955 has to be 16 bytes aligned anyway.
1956
1957 Note that the CPSR register does not exist on AArch64. Instead we
1958 can access system bits describing the process state with the
1959 MRS/MSR instructions, namely the condition flags. We save them as
1960 if they are part of a CPSR register because that's how GDB
1961 interprets these system bits. At the moment, only the condition
1962 flags are saved in CPSR (NZCV).
1963
1964 Stack layout, each cell is 16 bytes (descending):
1965
1966 High *-------- SIMD&FP registers from 31 down to 0. --------*
1967 | q31 |
1968 . .
1969 . . 32 cells
1970 . .
1971 | q0 |
1972 *---- General purpose registers from 30 down to 0. ----*
1973 | x30 |
1974 . .
1975 . . 31 cells
1976 . .
1977 | x0 |
1978 *------------- Special purpose registers. -------------*
1979 | SP |
1980 | PC |
1981 | CPSR (NZCV) | 5 cells
1982 | FPSR |
1983 | FPCR | <- SP + 16
1984 *------------- collecting_t object --------------------*
1985 | TPIDR_EL0 | struct tracepoint * |
1986 Low *------------------------------------------------------*
1987
1988 After this stack is set up, we issue a call to the collector, passing
1989 it the saved registers at (SP + 16). */
1990
1991 /* Push SIMD&FP registers on the stack:
1992
1993 SUB sp, sp, #(32 * 16)
1994
1995 STP q30, q31, [sp, #(30 * 16)]
1996 ...
1997 STP q0, q1, [sp]
1998
1999 */
2000 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
2001 for (i = 30; i >= 0; i -= 2)
2002 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
2003
30baf67b 2004 /* Push general purpose registers on the stack. Note that we do not need
bb903df0
PL
2005 to push x31 as it represents the xzr register and not the stack
2006 pointer in a STR instruction.
2007
2008 SUB sp, sp, #(31 * 16)
2009
2010 STR x30, [sp, #(30 * 16)]
2011 ...
2012 STR x0, [sp]
2013
2014 */
2015 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
2016 for (i = 30; i >= 0; i -= 1)
2017 p += emit_str (p, aarch64_register (i, 1), sp,
2018 offset_memory_operand (i * 16));
2019
2020 /* Make space for 5 more cells.
2021
2022 SUB sp, sp, #(5 * 16)
2023
2024 */
2025 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
2026
2027
2028 /* Save SP:
2029
2030 ADD x4, sp, #((32 + 31 + 5) * 16)
2031 STR x4, [sp, #(4 * 16)]
2032
2033 */
2034 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
2035 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
2036
2037 /* Save PC (tracepoint address):
2038
2039 MOV x3, #(tpaddr)
2040 ...
2041
2042 STR x3, [sp, #(3 * 16)]
2043
2044 */
2045
2046 p += emit_mov_addr (p, x3, tpaddr);
2047 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
2048
2049 /* Save CPSR (NZCV), FPSR and FPCR:
2050
2051 MRS x2, nzcv
2052 MRS x1, fpsr
2053 MRS x0, fpcr
2054
2055 STR x2, [sp, #(2 * 16)]
2056 STR x1, [sp, #(1 * 16)]
2057 STR x0, [sp, #(0 * 16)]
2058
2059 */
2060 p += emit_mrs (p, x2, NZCV);
2061 p += emit_mrs (p, x1, FPSR);
2062 p += emit_mrs (p, x0, FPCR);
2063 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2064 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2065 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2066
2067 /* Push the collecting_t object. It consist of the address of the
2068 tracepoint and an ID for the current thread. We get the latter by
2069 reading the tpidr_el0 system register. It corresponds to the
2070 NT_ARM_TLS register accessible with ptrace.
2071
2072 MOV x0, #(tpoint)
2073 ...
2074
2075 MRS x1, tpidr_el0
2076
2077 STP x0, x1, [sp, #-16]!
2078
2079 */
2080
2081 p += emit_mov_addr (p, x0, tpoint);
2082 p += emit_mrs (p, x1, TPIDR_EL0);
2083 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2084
2085 /* Spin-lock:
2086
2087 The shared memory for the lock is at lockaddr. It will hold zero
2088 if no-one is holding the lock, otherwise it contains the address of
2089 the collecting_t object on the stack of the thread which acquired it.
2090
2091 At this stage, the stack pointer points to this thread's collecting_t
2092 object.
2093
2094 We use the following registers:
2095 - x0: Address of the lock.
2096 - x1: Pointer to collecting_t object.
2097 - x2: Scratch register.
2098
2099 MOV x0, #(lockaddr)
2100 ...
2101 MOV x1, sp
2102
2103 ; Trigger an event local to this core. So the following WFE
2104 ; instruction is ignored.
2105 SEVL
2106 again:
2107 ; Wait for an event. The event is triggered by either the SEVL
2108 ; or STLR instructions (store release).
2109 WFE
2110
2111 ; Atomically read at lockaddr. This marks the memory location as
2112 ; exclusive. This instruction also has memory constraints which
2113 ; make sure all previous data reads and writes are done before
2114 ; executing it.
2115 LDAXR x2, [x0]
2116
2117 ; Try again if another thread holds the lock.
2118 CBNZ x2, again
2119
2120 ; We can lock it! Write the address of the collecting_t object.
2121 ; This instruction will fail if the memory location is not marked
2122 ; as exclusive anymore. If it succeeds, it will remove the
2123 ; exclusive mark on the memory location. This way, if another
2124 ; thread executes this instruction before us, we will fail and try
2125 ; all over again.
2126 STXR w2, x1, [x0]
2127 CBNZ w2, again
2128
2129 */
2130
2131 p += emit_mov_addr (p, x0, lockaddr);
2132 p += emit_mov (p, x1, register_operand (sp));
2133
2134 p += emit_sevl (p);
2135 p += emit_wfe (p);
2136 p += emit_ldaxr (p, x2, x0);
2137 p += emit_cb (p, 1, w2, -2 * 4);
2138 p += emit_stxr (p, w2, x1, x0);
2139 p += emit_cb (p, 1, x2, -4 * 4);
2140
2141 /* Call collector (struct tracepoint *, unsigned char *):
2142
2143 MOV x0, #(tpoint)
2144 ...
2145
2146 ; Saved registers start after the collecting_t object.
2147 ADD x1, sp, #16
2148
2149 ; We use an intra-procedure-call scratch register.
2150 MOV ip0, #(collector)
2151 ...
2152
2153 ; And call back to C!
2154 BLR ip0
2155
2156 */
2157
2158 p += emit_mov_addr (p, x0, tpoint);
2159 p += emit_add (p, x1, sp, immediate_operand (16));
2160
2161 p += emit_mov_addr (p, ip0, collector);
2162 p += emit_blr (p, ip0);
2163
2164 /* Release the lock.
2165
2166 MOV x0, #(lockaddr)
2167 ...
2168
2169 ; This instruction is a normal store with memory ordering
2170 ; constraints. Thanks to this we do not have to put a data
2171 ; barrier instruction to make sure all data read and writes are done
30baf67b 2172 ; before this instruction is executed. Furthermore, this instruction
bb903df0
PL
2173 ; will trigger an event, letting other threads know they can grab
2174 ; the lock.
2175 STLR xzr, [x0]
2176
2177 */
2178 p += emit_mov_addr (p, x0, lockaddr);
2179 p += emit_stlr (p, xzr, x0);
2180
2181 /* Free collecting_t object:
2182
2183 ADD sp, sp, #16
2184
2185 */
2186 p += emit_add (p, sp, sp, immediate_operand (16));
2187
2188 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2189 registers from the stack.
2190
2191 LDR x2, [sp, #(2 * 16)]
2192 LDR x1, [sp, #(1 * 16)]
2193 LDR x0, [sp, #(0 * 16)]
2194
2195 MSR NZCV, x2
2196 MSR FPSR, x1
2197 MSR FPCR, x0
2198
2199 ADD sp, sp #(5 * 16)
2200
2201 */
2202 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2203 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2204 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2205 p += emit_msr (p, NZCV, x2);
2206 p += emit_msr (p, FPSR, x1);
2207 p += emit_msr (p, FPCR, x0);
2208
2209 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2210
2211 /* Pop general purpose registers:
2212
2213 LDR x0, [sp]
2214 ...
2215 LDR x30, [sp, #(30 * 16)]
2216
2217 ADD sp, sp, #(31 * 16)
2218
2219 */
2220 for (i = 0; i <= 30; i += 1)
2221 p += emit_ldr (p, aarch64_register (i, 1), sp,
2222 offset_memory_operand (i * 16));
2223 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2224
2225 /* Pop SIMD&FP registers:
2226
2227 LDP q0, q1, [sp]
2228 ...
2229 LDP q30, q31, [sp, #(30 * 16)]
2230
2231 ADD sp, sp, #(32 * 16)
2232
2233 */
2234 for (i = 0; i <= 30; i += 2)
2235 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2236 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2237
2238 /* Write the code into the inferior memory. */
2239 append_insns (&buildaddr, p - buf, buf);
2240
2241 /* Now emit the relocated instruction. */
2242 *adjusted_insn_addr = buildaddr;
70b439f0 2243 target_read_uint32 (tpaddr, &insn);
0badd99f
YQ
2244
2245 insn_data.base.insn_addr = tpaddr;
2246 insn_data.new_addr = buildaddr;
2247 insn_data.insn_ptr = buf;
2248
2249 aarch64_relocate_instruction (insn, &visitor,
2250 (struct aarch64_insn_data *) &insn_data);
2251
bb903df0 2252 /* We may not have been able to relocate the instruction. */
0badd99f 2253 if (insn_data.insn_ptr == buf)
bb903df0
PL
2254 {
2255 sprintf (err,
2256 "E.Could not relocate instruction from %s to %s.",
2257 core_addr_to_string_nz (tpaddr),
2258 core_addr_to_string_nz (buildaddr));
2259 return 1;
2260 }
dfaffe9d 2261 else
0badd99f 2262 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
dfaffe9d 2263 *adjusted_insn_addr_end = buildaddr;
bb903df0
PL
2264
2265 /* Go back to the start of the buffer. */
2266 p = buf;
2267
2268 /* Emit a branch back from the jump pad. */
2269 offset = (tpaddr + orig_size - buildaddr);
2270 if (!can_encode_int32 (offset, 28))
2271 {
2272 sprintf (err,
2273 "E.Jump back from jump pad too far from tracepoint "
2ac09a5b 2274 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2275 offset);
2276 return 1;
2277 }
2278
2279 p += emit_b (p, 0, offset);
2280 append_insns (&buildaddr, p - buf, buf);
2281
2282 /* Give the caller a branch instruction into the jump pad. */
2283 offset = (*jump_entry - tpaddr);
2284 if (!can_encode_int32 (offset, 28))
2285 {
2286 sprintf (err,
2287 "E.Jump pad too far from tracepoint "
2ac09a5b 2288 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2289 offset);
2290 return 1;
2291 }
2292
2293 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2294 *jjump_pad_insn_size = 4;
2295
2296 /* Return the end address of our pad. */
2297 *jump_entry = buildaddr;
2298
2299 return 0;
2300}
2301
afbe19f8
PL
2302/* Helper function writing LEN instructions from START into
2303 current_insn_ptr. */
2304
2305static void
2306emit_ops_insns (const uint32_t *start, int len)
2307{
2308 CORE_ADDR buildaddr = current_insn_ptr;
2309
2310 if (debug_threads)
2311 debug_printf ("Adding %d instrucions at %s\n",
2312 len, paddress (buildaddr));
2313
2314 append_insns (&buildaddr, len, start);
2315 current_insn_ptr = buildaddr;
2316}
2317
2318/* Pop a register from the stack. */
2319
2320static int
2321emit_pop (uint32_t *buf, struct aarch64_register rt)
2322{
2323 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2324}
2325
2326/* Push a register on the stack. */
2327
2328static int
2329emit_push (uint32_t *buf, struct aarch64_register rt)
2330{
2331 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2332}
2333
2334/* Implementation of emit_ops method "emit_prologue". */
2335
2336static void
2337aarch64_emit_prologue (void)
2338{
2339 uint32_t buf[16];
2340 uint32_t *p = buf;
2341
2342 /* This function emit a prologue for the following function prototype:
2343
2344 enum eval_result_type f (unsigned char *regs,
2345 ULONGEST *value);
2346
2347 The first argument is a buffer of raw registers. The second
2348 argument is the result of
2349 evaluating the expression, which will be set to whatever is on top of
2350 the stack at the end.
2351
2352 The stack set up by the prologue is as such:
2353
2354 High *------------------------------------------------------*
2355 | LR |
2356 | FP | <- FP
2357 | x1 (ULONGEST *value) |
2358 | x0 (unsigned char *regs) |
2359 Low *------------------------------------------------------*
2360
2361 As we are implementing a stack machine, each opcode can expand the
2362 stack so we never know how far we are from the data saved by this
2363 prologue. In order to be able refer to value and regs later, we save
2364 the current stack pointer in the frame pointer. This way, it is not
2365 clobbered when calling C functions.
2366
30baf67b 2367 Finally, throughout every operation, we are using register x0 as the
afbe19f8
PL
2368 top of the stack, and x1 as a scratch register. */
2369
2370 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2371 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2372 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2373
2374 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2375
2376
2377 emit_ops_insns (buf, p - buf);
2378}
2379
2380/* Implementation of emit_ops method "emit_epilogue". */
2381
2382static void
2383aarch64_emit_epilogue (void)
2384{
2385 uint32_t buf[16];
2386 uint32_t *p = buf;
2387
2388 /* Store the result of the expression (x0) in *value. */
2389 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2390 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2391 p += emit_str (p, x0, x1, offset_memory_operand (0));
2392
2393 /* Restore the previous state. */
2394 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2395 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2396
2397 /* Return expr_eval_no_error. */
2398 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2399 p += emit_ret (p, lr);
2400
2401 emit_ops_insns (buf, p - buf);
2402}
2403
2404/* Implementation of emit_ops method "emit_add". */
2405
2406static void
2407aarch64_emit_add (void)
2408{
2409 uint32_t buf[16];
2410 uint32_t *p = buf;
2411
2412 p += emit_pop (p, x1);
45e3745e 2413 p += emit_add (p, x0, x1, register_operand (x0));
afbe19f8
PL
2414
2415 emit_ops_insns (buf, p - buf);
2416}
2417
2418/* Implementation of emit_ops method "emit_sub". */
2419
2420static void
2421aarch64_emit_sub (void)
2422{
2423 uint32_t buf[16];
2424 uint32_t *p = buf;
2425
2426 p += emit_pop (p, x1);
45e3745e 2427 p += emit_sub (p, x0, x1, register_operand (x0));
afbe19f8
PL
2428
2429 emit_ops_insns (buf, p - buf);
2430}
2431
2432/* Implementation of emit_ops method "emit_mul". */
2433
2434static void
2435aarch64_emit_mul (void)
2436{
2437 uint32_t buf[16];
2438 uint32_t *p = buf;
2439
2440 p += emit_pop (p, x1);
2441 p += emit_mul (p, x0, x1, x0);
2442
2443 emit_ops_insns (buf, p - buf);
2444}
2445
2446/* Implementation of emit_ops method "emit_lsh". */
2447
2448static void
2449aarch64_emit_lsh (void)
2450{
2451 uint32_t buf[16];
2452 uint32_t *p = buf;
2453
2454 p += emit_pop (p, x1);
2455 p += emit_lslv (p, x0, x1, x0);
2456
2457 emit_ops_insns (buf, p - buf);
2458}
2459
2460/* Implementation of emit_ops method "emit_rsh_signed". */
2461
2462static void
2463aarch64_emit_rsh_signed (void)
2464{
2465 uint32_t buf[16];
2466 uint32_t *p = buf;
2467
2468 p += emit_pop (p, x1);
2469 p += emit_asrv (p, x0, x1, x0);
2470
2471 emit_ops_insns (buf, p - buf);
2472}
2473
2474/* Implementation of emit_ops method "emit_rsh_unsigned". */
2475
2476static void
2477aarch64_emit_rsh_unsigned (void)
2478{
2479 uint32_t buf[16];
2480 uint32_t *p = buf;
2481
2482 p += emit_pop (p, x1);
2483 p += emit_lsrv (p, x0, x1, x0);
2484
2485 emit_ops_insns (buf, p - buf);
2486}
2487
2488/* Implementation of emit_ops method "emit_ext". */
2489
2490static void
2491aarch64_emit_ext (int arg)
2492{
2493 uint32_t buf[16];
2494 uint32_t *p = buf;
2495
2496 p += emit_sbfx (p, x0, x0, 0, arg);
2497
2498 emit_ops_insns (buf, p - buf);
2499}
2500
2501/* Implementation of emit_ops method "emit_log_not". */
2502
2503static void
2504aarch64_emit_log_not (void)
2505{
2506 uint32_t buf[16];
2507 uint32_t *p = buf;
2508
2509 /* If the top of the stack is 0, replace it with 1. Else replace it with
2510 0. */
2511
2512 p += emit_cmp (p, x0, immediate_operand (0));
2513 p += emit_cset (p, x0, EQ);
2514
2515 emit_ops_insns (buf, p - buf);
2516}
2517
2518/* Implementation of emit_ops method "emit_bit_and". */
2519
2520static void
2521aarch64_emit_bit_and (void)
2522{
2523 uint32_t buf[16];
2524 uint32_t *p = buf;
2525
2526 p += emit_pop (p, x1);
2527 p += emit_and (p, x0, x0, x1);
2528
2529 emit_ops_insns (buf, p - buf);
2530}
2531
2532/* Implementation of emit_ops method "emit_bit_or". */
2533
2534static void
2535aarch64_emit_bit_or (void)
2536{
2537 uint32_t buf[16];
2538 uint32_t *p = buf;
2539
2540 p += emit_pop (p, x1);
2541 p += emit_orr (p, x0, x0, x1);
2542
2543 emit_ops_insns (buf, p - buf);
2544}
2545
2546/* Implementation of emit_ops method "emit_bit_xor". */
2547
2548static void
2549aarch64_emit_bit_xor (void)
2550{
2551 uint32_t buf[16];
2552 uint32_t *p = buf;
2553
2554 p += emit_pop (p, x1);
2555 p += emit_eor (p, x0, x0, x1);
2556
2557 emit_ops_insns (buf, p - buf);
2558}
2559
2560/* Implementation of emit_ops method "emit_bit_not". */
2561
2562static void
2563aarch64_emit_bit_not (void)
2564{
2565 uint32_t buf[16];
2566 uint32_t *p = buf;
2567
2568 p += emit_mvn (p, x0, x0);
2569
2570 emit_ops_insns (buf, p - buf);
2571}
2572
2573/* Implementation of emit_ops method "emit_equal". */
2574
2575static void
2576aarch64_emit_equal (void)
2577{
2578 uint32_t buf[16];
2579 uint32_t *p = buf;
2580
2581 p += emit_pop (p, x1);
2582 p += emit_cmp (p, x0, register_operand (x1));
2583 p += emit_cset (p, x0, EQ);
2584
2585 emit_ops_insns (buf, p - buf);
2586}
2587
2588/* Implementation of emit_ops method "emit_less_signed". */
2589
2590static void
2591aarch64_emit_less_signed (void)
2592{
2593 uint32_t buf[16];
2594 uint32_t *p = buf;
2595
2596 p += emit_pop (p, x1);
2597 p += emit_cmp (p, x1, register_operand (x0));
2598 p += emit_cset (p, x0, LT);
2599
2600 emit_ops_insns (buf, p - buf);
2601}
2602
2603/* Implementation of emit_ops method "emit_less_unsigned". */
2604
2605static void
2606aarch64_emit_less_unsigned (void)
2607{
2608 uint32_t buf[16];
2609 uint32_t *p = buf;
2610
2611 p += emit_pop (p, x1);
2612 p += emit_cmp (p, x1, register_operand (x0));
2613 p += emit_cset (p, x0, LO);
2614
2615 emit_ops_insns (buf, p - buf);
2616}
2617
2618/* Implementation of emit_ops method "emit_ref". */
2619
2620static void
2621aarch64_emit_ref (int size)
2622{
2623 uint32_t buf[16];
2624 uint32_t *p = buf;
2625
2626 switch (size)
2627 {
2628 case 1:
2629 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2630 break;
2631 case 2:
2632 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2633 break;
2634 case 4:
2635 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2636 break;
2637 case 8:
2638 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2639 break;
2640 default:
2641 /* Unknown size, bail on compilation. */
2642 emit_error = 1;
2643 break;
2644 }
2645
2646 emit_ops_insns (buf, p - buf);
2647}
2648
2649/* Implementation of emit_ops method "emit_if_goto". */
2650
2651static void
2652aarch64_emit_if_goto (int *offset_p, int *size_p)
2653{
2654 uint32_t buf[16];
2655 uint32_t *p = buf;
2656
2657 /* The Z flag is set or cleared here. */
2658 p += emit_cmp (p, x0, immediate_operand (0));
2659 /* This instruction must not change the Z flag. */
2660 p += emit_pop (p, x0);
2661 /* Branch over the next instruction if x0 == 0. */
2662 p += emit_bcond (p, EQ, 8);
2663
2664 /* The NOP instruction will be patched with an unconditional branch. */
2665 if (offset_p)
2666 *offset_p = (p - buf) * 4;
2667 if (size_p)
2668 *size_p = 4;
2669 p += emit_nop (p);
2670
2671 emit_ops_insns (buf, p - buf);
2672}
2673
2674/* Implementation of emit_ops method "emit_goto". */
2675
2676static void
2677aarch64_emit_goto (int *offset_p, int *size_p)
2678{
2679 uint32_t buf[16];
2680 uint32_t *p = buf;
2681
2682 /* The NOP instruction will be patched with an unconditional branch. */
2683 if (offset_p)
2684 *offset_p = 0;
2685 if (size_p)
2686 *size_p = 4;
2687 p += emit_nop (p);
2688
2689 emit_ops_insns (buf, p - buf);
2690}
2691
2692/* Implementation of emit_ops method "write_goto_address". */
2693
bb1183e2 2694static void
afbe19f8
PL
2695aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2696{
2697 uint32_t insn;
2698
2699 emit_b (&insn, 0, to - from);
2700 append_insns (&from, 1, &insn);
2701}
2702
2703/* Implementation of emit_ops method "emit_const". */
2704
2705static void
2706aarch64_emit_const (LONGEST num)
2707{
2708 uint32_t buf[16];
2709 uint32_t *p = buf;
2710
2711 p += emit_mov_addr (p, x0, num);
2712
2713 emit_ops_insns (buf, p - buf);
2714}
2715
2716/* Implementation of emit_ops method "emit_call". */
2717
2718static void
2719aarch64_emit_call (CORE_ADDR fn)
2720{
2721 uint32_t buf[16];
2722 uint32_t *p = buf;
2723
2724 p += emit_mov_addr (p, ip0, fn);
2725 p += emit_blr (p, ip0);
2726
2727 emit_ops_insns (buf, p - buf);
2728}
2729
2730/* Implementation of emit_ops method "emit_reg". */
2731
2732static void
2733aarch64_emit_reg (int reg)
2734{
2735 uint32_t buf[16];
2736 uint32_t *p = buf;
2737
2738 /* Set x0 to unsigned char *regs. */
2739 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2740 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2741 p += emit_mov (p, x1, immediate_operand (reg));
2742
2743 emit_ops_insns (buf, p - buf);
2744
2745 aarch64_emit_call (get_raw_reg_func_addr ());
2746}
2747
2748/* Implementation of emit_ops method "emit_pop". */
2749
2750static void
2751aarch64_emit_pop (void)
2752{
2753 uint32_t buf[16];
2754 uint32_t *p = buf;
2755
2756 p += emit_pop (p, x0);
2757
2758 emit_ops_insns (buf, p - buf);
2759}
2760
2761/* Implementation of emit_ops method "emit_stack_flush". */
2762
2763static void
2764aarch64_emit_stack_flush (void)
2765{
2766 uint32_t buf[16];
2767 uint32_t *p = buf;
2768
2769 p += emit_push (p, x0);
2770
2771 emit_ops_insns (buf, p - buf);
2772}
2773
2774/* Implementation of emit_ops method "emit_zero_ext". */
2775
2776static void
2777aarch64_emit_zero_ext (int arg)
2778{
2779 uint32_t buf[16];
2780 uint32_t *p = buf;
2781
2782 p += emit_ubfx (p, x0, x0, 0, arg);
2783
2784 emit_ops_insns (buf, p - buf);
2785}
2786
2787/* Implementation of emit_ops method "emit_swap". */
2788
2789static void
2790aarch64_emit_swap (void)
2791{
2792 uint32_t buf[16];
2793 uint32_t *p = buf;
2794
2795 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2796 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2797 p += emit_mov (p, x0, register_operand (x1));
2798
2799 emit_ops_insns (buf, p - buf);
2800}
2801
2802/* Implementation of emit_ops method "emit_stack_adjust". */
2803
2804static void
2805aarch64_emit_stack_adjust (int n)
2806{
2807 /* This is not needed with our design. */
2808 uint32_t buf[16];
2809 uint32_t *p = buf;
2810
2811 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2812
2813 emit_ops_insns (buf, p - buf);
2814}
2815
2816/* Implementation of emit_ops method "emit_int_call_1". */
2817
2818static void
2819aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2820{
2821 uint32_t buf[16];
2822 uint32_t *p = buf;
2823
2824 p += emit_mov (p, x0, immediate_operand (arg1));
2825
2826 emit_ops_insns (buf, p - buf);
2827
2828 aarch64_emit_call (fn);
2829}
2830
2831/* Implementation of emit_ops method "emit_void_call_2". */
2832
2833static void
2834aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2835{
2836 uint32_t buf[16];
2837 uint32_t *p = buf;
2838
2839 /* Push x0 on the stack. */
2840 aarch64_emit_stack_flush ();
2841
2842 /* Setup arguments for the function call:
2843
2844 x0: arg1
2845 x1: top of the stack
2846
2847 MOV x1, x0
2848 MOV x0, #arg1 */
2849
2850 p += emit_mov (p, x1, register_operand (x0));
2851 p += emit_mov (p, x0, immediate_operand (arg1));
2852
2853 emit_ops_insns (buf, p - buf);
2854
2855 aarch64_emit_call (fn);
2856
2857 /* Restore x0. */
2858 aarch64_emit_pop ();
2859}
2860
2861/* Implementation of emit_ops method "emit_eq_goto". */
2862
2863static void
2864aarch64_emit_eq_goto (int *offset_p, int *size_p)
2865{
2866 uint32_t buf[16];
2867 uint32_t *p = buf;
2868
2869 p += emit_pop (p, x1);
2870 p += emit_cmp (p, x1, register_operand (x0));
2871 /* Branch over the next instruction if x0 != x1. */
2872 p += emit_bcond (p, NE, 8);
2873 /* The NOP instruction will be patched with an unconditional branch. */
2874 if (offset_p)
2875 *offset_p = (p - buf) * 4;
2876 if (size_p)
2877 *size_p = 4;
2878 p += emit_nop (p);
2879
2880 emit_ops_insns (buf, p - buf);
2881}
2882
2883/* Implementation of emit_ops method "emit_ne_goto". */
2884
2885static void
2886aarch64_emit_ne_goto (int *offset_p, int *size_p)
2887{
2888 uint32_t buf[16];
2889 uint32_t *p = buf;
2890
2891 p += emit_pop (p, x1);
2892 p += emit_cmp (p, x1, register_operand (x0));
2893 /* Branch over the next instruction if x0 == x1. */
2894 p += emit_bcond (p, EQ, 8);
2895 /* The NOP instruction will be patched with an unconditional branch. */
2896 if (offset_p)
2897 *offset_p = (p - buf) * 4;
2898 if (size_p)
2899 *size_p = 4;
2900 p += emit_nop (p);
2901
2902 emit_ops_insns (buf, p - buf);
2903}
2904
2905/* Implementation of emit_ops method "emit_lt_goto". */
2906
2907static void
2908aarch64_emit_lt_goto (int *offset_p, int *size_p)
2909{
2910 uint32_t buf[16];
2911 uint32_t *p = buf;
2912
2913 p += emit_pop (p, x1);
2914 p += emit_cmp (p, x1, register_operand (x0));
2915 /* Branch over the next instruction if x0 >= x1. */
2916 p += emit_bcond (p, GE, 8);
2917 /* The NOP instruction will be patched with an unconditional branch. */
2918 if (offset_p)
2919 *offset_p = (p - buf) * 4;
2920 if (size_p)
2921 *size_p = 4;
2922 p += emit_nop (p);
2923
2924 emit_ops_insns (buf, p - buf);
2925}
2926
2927/* Implementation of emit_ops method "emit_le_goto". */
2928
2929static void
2930aarch64_emit_le_goto (int *offset_p, int *size_p)
2931{
2932 uint32_t buf[16];
2933 uint32_t *p = buf;
2934
2935 p += emit_pop (p, x1);
2936 p += emit_cmp (p, x1, register_operand (x0));
2937 /* Branch over the next instruction if x0 > x1. */
2938 p += emit_bcond (p, GT, 8);
2939 /* The NOP instruction will be patched with an unconditional branch. */
2940 if (offset_p)
2941 *offset_p = (p - buf) * 4;
2942 if (size_p)
2943 *size_p = 4;
2944 p += emit_nop (p);
2945
2946 emit_ops_insns (buf, p - buf);
2947}
2948
2949/* Implementation of emit_ops method "emit_gt_goto". */
2950
2951static void
2952aarch64_emit_gt_goto (int *offset_p, int *size_p)
2953{
2954 uint32_t buf[16];
2955 uint32_t *p = buf;
2956
2957 p += emit_pop (p, x1);
2958 p += emit_cmp (p, x1, register_operand (x0));
2959 /* Branch over the next instruction if x0 <= x1. */
2960 p += emit_bcond (p, LE, 8);
2961 /* The NOP instruction will be patched with an unconditional branch. */
2962 if (offset_p)
2963 *offset_p = (p - buf) * 4;
2964 if (size_p)
2965 *size_p = 4;
2966 p += emit_nop (p);
2967
2968 emit_ops_insns (buf, p - buf);
2969}
2970
2971/* Implementation of emit_ops method "emit_ge_got". */
2972
2973static void
2974aarch64_emit_ge_got (int *offset_p, int *size_p)
2975{
2976 uint32_t buf[16];
2977 uint32_t *p = buf;
2978
2979 p += emit_pop (p, x1);
2980 p += emit_cmp (p, x1, register_operand (x0));
2981 /* Branch over the next instruction if x0 <= x1. */
2982 p += emit_bcond (p, LT, 8);
2983 /* The NOP instruction will be patched with an unconditional branch. */
2984 if (offset_p)
2985 *offset_p = (p - buf) * 4;
2986 if (size_p)
2987 *size_p = 4;
2988 p += emit_nop (p);
2989
2990 emit_ops_insns (buf, p - buf);
2991}
2992
2993static struct emit_ops aarch64_emit_ops_impl =
2994{
2995 aarch64_emit_prologue,
2996 aarch64_emit_epilogue,
2997 aarch64_emit_add,
2998 aarch64_emit_sub,
2999 aarch64_emit_mul,
3000 aarch64_emit_lsh,
3001 aarch64_emit_rsh_signed,
3002 aarch64_emit_rsh_unsigned,
3003 aarch64_emit_ext,
3004 aarch64_emit_log_not,
3005 aarch64_emit_bit_and,
3006 aarch64_emit_bit_or,
3007 aarch64_emit_bit_xor,
3008 aarch64_emit_bit_not,
3009 aarch64_emit_equal,
3010 aarch64_emit_less_signed,
3011 aarch64_emit_less_unsigned,
3012 aarch64_emit_ref,
3013 aarch64_emit_if_goto,
3014 aarch64_emit_goto,
3015 aarch64_write_goto_address,
3016 aarch64_emit_const,
3017 aarch64_emit_call,
3018 aarch64_emit_reg,
3019 aarch64_emit_pop,
3020 aarch64_emit_stack_flush,
3021 aarch64_emit_zero_ext,
3022 aarch64_emit_swap,
3023 aarch64_emit_stack_adjust,
3024 aarch64_emit_int_call_1,
3025 aarch64_emit_void_call_2,
3026 aarch64_emit_eq_goto,
3027 aarch64_emit_ne_goto,
3028 aarch64_emit_lt_goto,
3029 aarch64_emit_le_goto,
3030 aarch64_emit_gt_goto,
3031 aarch64_emit_ge_got,
3032};
3033
3034/* Implementation of linux_target_ops method "emit_ops". */
3035
3036static struct emit_ops *
3037aarch64_emit_ops (void)
3038{
3039 return &aarch64_emit_ops_impl;
3040}
3041
bb903df0
PL
3042/* Implementation of linux_target_ops method
3043 "get_min_fast_tracepoint_insn_len". */
3044
3045static int
3046aarch64_get_min_fast_tracepoint_insn_len (void)
3047{
3048 return 4;
3049}
3050
d1d0aea1
PL
3051/* Implementation of linux_target_ops method "supports_range_stepping". */
3052
3053static int
3054aarch64_supports_range_stepping (void)
3055{
3056 return 1;
3057}
3058
3ca4edb6 3059/* Implementation of target ops method "sw_breakpoint_from_kind". */
dd373349 3060
3ca4edb6
TBA
3061const gdb_byte *
3062aarch64_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349 3063{
17b1509a
YQ
3064 if (is_64bit_tdesc ())
3065 {
3066 *size = aarch64_breakpoint_len;
3067 return aarch64_breakpoint;
3068 }
3069 else
3070 return arm_sw_breakpoint_from_kind (kind, size);
3071}
3072
06250e4e 3073/* Implementation of target ops method "breakpoint_kind_from_pc". */
17b1509a 3074
06250e4e
TBA
3075int
3076aarch64_target::breakpoint_kind_from_pc (CORE_ADDR *pcptr)
17b1509a
YQ
3077{
3078 if (is_64bit_tdesc ())
3079 return aarch64_breakpoint_len;
3080 else
3081 return arm_breakpoint_kind_from_pc (pcptr);
3082}
3083
06250e4e 3084/* Implementation of the target ops method
17b1509a
YQ
3085 "breakpoint_kind_from_current_state". */
3086
06250e4e
TBA
3087int
3088aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
17b1509a
YQ
3089{
3090 if (is_64bit_tdesc ())
3091 return aarch64_breakpoint_len;
3092 else
3093 return arm_breakpoint_kind_from_current_state (pcptr);
dd373349
AT
3094}
3095
7d00775e
AT
3096/* Support for hardware single step. */
3097
3098static int
3099aarch64_supports_hardware_single_step (void)
3100{
3101 return 1;
3102}
3103
176eb98c
MS
3104struct linux_target_ops the_low_target =
3105{
176eb98c 3106 aarch64_breakpoint_at,
802e8e6d 3107 aarch64_supports_z_point_type,
176eb98c
MS
3108 aarch64_insert_point,
3109 aarch64_remove_point,
3110 aarch64_stopped_by_watchpoint,
3111 aarch64_stopped_data_address,
421530db
PL
3112 NULL, /* collect_ptrace_register */
3113 NULL, /* supply_ptrace_register */
ade90bde 3114 aarch64_linux_siginfo_fixup,
176eb98c 3115 aarch64_linux_new_process,
04ec7890 3116 aarch64_linux_delete_process,
176eb98c 3117 aarch64_linux_new_thread,
466eecee 3118 aarch64_linux_delete_thread,
3a8a0396 3119 aarch64_linux_new_fork,
176eb98c 3120 aarch64_linux_prepare_to_resume,
421530db 3121 NULL, /* process_qsupported */
7671bf47 3122 aarch64_supports_tracepoints,
bb903df0
PL
3123 aarch64_get_thread_area,
3124 aarch64_install_fast_tracepoint_jump_pad,
afbe19f8 3125 aarch64_emit_ops,
bb903df0 3126 aarch64_get_min_fast_tracepoint_insn_len,
d1d0aea1 3127 aarch64_supports_range_stepping,
7d00775e 3128 aarch64_supports_hardware_single_step,
061fc021 3129 aarch64_get_syscall_trapinfo,
176eb98c 3130};
3aee8918 3131
ef0478f6
TBA
3132/* The linux target ops object. */
3133
3134linux_process_target *the_linux_target = &the_aarch64_target;
3135
3aee8918
PA
3136void
3137initialize_low_arch (void)
3138{
3b53ae99
YQ
3139 initialize_low_arch_aarch32 ();
3140
3aee8918 3141 initialize_regsets_info (&aarch64_regsets_info);
02895270 3142 initialize_regsets_info (&aarch64_sve_regsets_info);
3aee8918 3143}
This page took 0.952244 seconds and 4 git commands to generate.