gdbserver/linux-low: turn 'supports_z_point_type' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-aarch64-low.cc
CommitLineData
176eb98c
MS
1/* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
b811d2c2 4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
176eb98c
MS
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "server.h"
23#include "linux-low.h"
db3cb7cb 24#include "nat/aarch64-linux.h"
554717a3 25#include "nat/aarch64-linux-hw-point.h"
bb903df0 26#include "arch/aarch64-insn.h"
3b53ae99 27#include "linux-aarch32-low.h"
176eb98c 28#include "elf/common.h"
afbe19f8
PL
29#include "ax.h"
30#include "tracepoint.h"
f9d949fb 31#include "debug.h"
176eb98c
MS
32
33#include <signal.h>
34#include <sys/user.h>
5826e159 35#include "nat/gdb_ptrace.h"
e9dae05e 36#include <asm/ptrace.h>
bb903df0
PL
37#include <inttypes.h>
38#include <endian.h>
39#include <sys/uio.h>
176eb98c
MS
40
41#include "gdb_proc_service.h"
cc628f3d 42#include "arch/aarch64.h"
7cc17433 43#include "linux-aarch32-tdesc.h"
d6d7ce56 44#include "linux-aarch64-tdesc.h"
fefa175e 45#include "nat/aarch64-sve-linux-ptrace.h"
02895270 46#include "tdesc.h"
176eb98c 47
176eb98c
MS
48#ifdef HAVE_SYS_REG_H
49#include <sys/reg.h>
50#endif
51
ef0478f6
TBA
52/* Linux target op definitions for the AArch64 architecture. */
53
54class aarch64_target : public linux_process_target
55{
56public:
57
aa8d21c9
TBA
58 const regs_info *get_regs_info () override;
59
06250e4e
TBA
60 int breakpoint_kind_from_pc (CORE_ADDR *pcptr) override;
61
62 int breakpoint_kind_from_current_state (CORE_ADDR *pcptr) override;
63
3ca4edb6
TBA
64 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
65
007c9b97
TBA
66 bool supports_z_point_type (char z_type) override;
67
797bcff5
TBA
68protected:
69
70 void low_arch_setup () override;
daca57a7
TBA
71
72 bool low_cannot_fetch_register (int regno) override;
73
74 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
75
76 bool low_supports_breakpoints () override;
77
78 CORE_ADDR low_get_pc (regcache *regcache) override;
79
80 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
d7146cda
TBA
81
82 bool low_breakpoint_at (CORE_ADDR pc) override;
ef0478f6
TBA
83};
84
85/* The singleton target ops object. */
86
87static aarch64_target the_aarch64_target;
88
daca57a7
TBA
89bool
90aarch64_target::low_cannot_fetch_register (int regno)
91{
92 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
93 "is not implemented by the target");
94}
95
96bool
97aarch64_target::low_cannot_store_register (int regno)
98{
99 gdb_assert_not_reached ("linux target op low_cannot_store_register "
100 "is not implemented by the target");
101}
102
176eb98c
MS
103/* Per-process arch-specific data we want to keep. */
104
105struct arch_process_info
106{
107 /* Hardware breakpoint/watchpoint data.
108 The reason for them to be per-process rather than per-thread is
109 due to the lack of information in the gdbserver environment;
110 gdbserver is not told that whether a requested hardware
111 breakpoint/watchpoint is thread specific or not, so it has to set
112 each hw bp/wp for every thread in the current process. The
113 higher level bp/wp management in gdb will resume a thread if a hw
114 bp/wp trap is not expected for it. Since the hw bp/wp setting is
115 same for each thread, it is reasonable for the data to live here.
116 */
117 struct aarch64_debug_reg_state debug_reg_state;
118};
119
3b53ae99
YQ
120/* Return true if the size of register 0 is 8 byte. */
121
122static int
123is_64bit_tdesc (void)
124{
125 struct regcache *regcache = get_thread_regcache (current_thread, 0);
126
127 return register_size (regcache->tdesc, 0) == 8;
128}
129
02895270
AH
130/* Return true if the regcache contains the number of SVE registers. */
131
132static bool
133is_sve_tdesc (void)
134{
135 struct regcache *regcache = get_thread_regcache (current_thread, 0);
136
6cdd651f 137 return tdesc_contains_feature (regcache->tdesc, "org.gnu.gdb.aarch64.sve");
02895270
AH
138}
139
176eb98c
MS
140static void
141aarch64_fill_gregset (struct regcache *regcache, void *buf)
142{
6a69a054 143 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
176eb98c
MS
144 int i;
145
146 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
147 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
148 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
149 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
150 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
151}
152
153static void
154aarch64_store_gregset (struct regcache *regcache, const void *buf)
155{
6a69a054 156 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
176eb98c
MS
157 int i;
158
159 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
160 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
161 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
162 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
163 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
164}
165
166static void
167aarch64_fill_fpregset (struct regcache *regcache, void *buf)
168{
9caa3311 169 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
176eb98c
MS
170 int i;
171
172 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
173 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
174 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
175 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
176}
177
178static void
179aarch64_store_fpregset (struct regcache *regcache, const void *buf)
180{
9caa3311
YQ
181 const struct user_fpsimd_state *regset
182 = (const struct user_fpsimd_state *) buf;
176eb98c
MS
183 int i;
184
185 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
186 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
187 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
188 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
189}
190
1ef53e6b
AH
191/* Store the pauth registers to regcache. */
192
193static void
194aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
195{
196 uint64_t *pauth_regset = (uint64_t *) buf;
197 int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
198
199 if (pauth_base == 0)
200 return;
201
202 supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
203 &pauth_regset[0]);
204 supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
205 &pauth_regset[1]);
206}
207
bf9ae9d8
TBA
208bool
209aarch64_target::low_supports_breakpoints ()
210{
211 return true;
212}
213
214/* Implementation of linux target ops method "low_get_pc". */
421530db 215
bf9ae9d8
TBA
216CORE_ADDR
217aarch64_target::low_get_pc (regcache *regcache)
176eb98c 218{
8a7e4587 219 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 220 return linux_get_pc_64bit (regcache);
8a7e4587 221 else
a5652c21 222 return linux_get_pc_32bit (regcache);
176eb98c
MS
223}
224
bf9ae9d8 225/* Implementation of linux target ops method "low_set_pc". */
421530db 226
bf9ae9d8
TBA
227void
228aarch64_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
176eb98c 229{
8a7e4587 230 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 231 linux_set_pc_64bit (regcache, pc);
8a7e4587 232 else
a5652c21 233 linux_set_pc_32bit (regcache, pc);
176eb98c
MS
234}
235
176eb98c
MS
236#define aarch64_breakpoint_len 4
237
37d66942
PL
238/* AArch64 BRK software debug mode instruction.
239 This instruction needs to match gdb/aarch64-tdep.c
240 (aarch64_default_breakpoint). */
241static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
176eb98c 242
d7146cda 243/* Implementation of linux target ops method "low_breakpoint_at". */
421530db 244
d7146cda
TBA
245bool
246aarch64_target::low_breakpoint_at (CORE_ADDR where)
176eb98c 247{
db91f502
YQ
248 if (is_64bit_tdesc ())
249 {
250 gdb_byte insn[aarch64_breakpoint_len];
176eb98c 251
d7146cda 252 read_memory (where, (unsigned char *) &insn, aarch64_breakpoint_len);
db91f502 253 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
d7146cda 254 return true;
176eb98c 255
d7146cda 256 return false;
db91f502
YQ
257 }
258 else
259 return arm_breakpoint_at (where);
176eb98c
MS
260}
261
176eb98c
MS
262static void
263aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
264{
265 int i;
266
267 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
268 {
269 state->dr_addr_bp[i] = 0;
270 state->dr_ctrl_bp[i] = 0;
271 state->dr_ref_count_bp[i] = 0;
272 }
273
274 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
275 {
276 state->dr_addr_wp[i] = 0;
277 state->dr_ctrl_wp[i] = 0;
278 state->dr_ref_count_wp[i] = 0;
279 }
280}
281
176eb98c
MS
282/* Return the pointer to the debug register state structure in the
283 current process' arch-specific data area. */
284
db3cb7cb 285struct aarch64_debug_reg_state *
88e2cf7e 286aarch64_get_debug_reg_state (pid_t pid)
176eb98c 287{
88e2cf7e 288 struct process_info *proc = find_process_pid (pid);
176eb98c 289
fe978cb0 290 return &proc->priv->arch_private->debug_reg_state;
176eb98c
MS
291}
292
007c9b97 293/* Implementation of target ops method "supports_z_point_type". */
421530db 294
007c9b97
TBA
295bool
296aarch64_target::supports_z_point_type (char z_type)
4ff0d3d8
PA
297{
298 switch (z_type)
299 {
96c97461 300 case Z_PACKET_SW_BP:
4ff0d3d8
PA
301 case Z_PACKET_HW_BP:
302 case Z_PACKET_WRITE_WP:
303 case Z_PACKET_READ_WP:
304 case Z_PACKET_ACCESS_WP:
007c9b97 305 return true;
4ff0d3d8 306 default:
007c9b97 307 return false;
4ff0d3d8
PA
308 }
309}
310
421530db 311/* Implementation of linux_target_ops method "insert_point".
176eb98c 312
421530db
PL
313 It actually only records the info of the to-be-inserted bp/wp;
314 the actual insertion will happen when threads are resumed. */
176eb98c
MS
315
316static int
802e8e6d
PA
317aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
318 int len, struct raw_breakpoint *bp)
176eb98c
MS
319{
320 int ret;
4ff0d3d8 321 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
322 struct aarch64_debug_reg_state *state
323 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 324
c5e92cca 325 if (show_debug_regs)
176eb98c
MS
326 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
327 (unsigned long) addr, len);
328
802e8e6d
PA
329 /* Determine the type from the raw breakpoint type. */
330 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
331
332 if (targ_type != hw_execute)
39edd165
YQ
333 {
334 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
335 ret = aarch64_handle_watchpoint (targ_type, addr, len,
336 1 /* is_insert */, state);
337 else
338 ret = -1;
339 }
176eb98c 340 else
8d689ee5
YQ
341 {
342 if (len == 3)
343 {
344 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
345 instruction. Set it to 2 to correctly encode length bit
346 mask in hardware/watchpoint control register. */
347 len = 2;
348 }
349 ret = aarch64_handle_breakpoint (targ_type, addr, len,
350 1 /* is_insert */, state);
351 }
176eb98c 352
60a191ed 353 if (show_debug_regs)
88e2cf7e
YQ
354 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
355 targ_type);
176eb98c
MS
356
357 return ret;
358}
359
421530db 360/* Implementation of linux_target_ops method "remove_point".
176eb98c 361
421530db
PL
362 It actually only records the info of the to-be-removed bp/wp,
363 the actual removal will be done when threads are resumed. */
176eb98c
MS
364
365static int
802e8e6d
PA
366aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
367 int len, struct raw_breakpoint *bp)
176eb98c
MS
368{
369 int ret;
4ff0d3d8 370 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
371 struct aarch64_debug_reg_state *state
372 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 373
c5e92cca 374 if (show_debug_regs)
176eb98c
MS
375 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
376 (unsigned long) addr, len);
377
802e8e6d
PA
378 /* Determine the type from the raw breakpoint type. */
379 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
380
381 /* Set up state pointers. */
382 if (targ_type != hw_execute)
383 ret =
c67ca4de
YQ
384 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
385 state);
176eb98c 386 else
8d689ee5
YQ
387 {
388 if (len == 3)
389 {
390 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
391 instruction. Set it to 2 to correctly encode length bit
392 mask in hardware/watchpoint control register. */
393 len = 2;
394 }
395 ret = aarch64_handle_breakpoint (targ_type, addr, len,
396 0 /* is_insert */, state);
397 }
176eb98c 398
60a191ed 399 if (show_debug_regs)
88e2cf7e
YQ
400 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
401 targ_type);
176eb98c
MS
402
403 return ret;
404}
405
421530db 406/* Implementation of linux_target_ops method "stopped_data_address". */
176eb98c
MS
407
408static CORE_ADDR
409aarch64_stopped_data_address (void)
410{
411 siginfo_t siginfo;
412 int pid, i;
413 struct aarch64_debug_reg_state *state;
414
0bfdf32f 415 pid = lwpid_of (current_thread);
176eb98c
MS
416
417 /* Get the siginfo. */
418 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
419 return (CORE_ADDR) 0;
420
421 /* Need to be a hardware breakpoint/watchpoint trap. */
422 if (siginfo.si_signo != SIGTRAP
423 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
424 return (CORE_ADDR) 0;
425
426 /* Check if the address matches any watched address. */
88e2cf7e 427 state = aarch64_get_debug_reg_state (pid_of (current_thread));
176eb98c
MS
428 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
429 {
a3b60e45
JK
430 const unsigned int offset
431 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
176eb98c
MS
432 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
433 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
a3b60e45
JK
434 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
435 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
436 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
437
176eb98c
MS
438 if (state->dr_ref_count_wp[i]
439 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
a3b60e45 440 && addr_trap >= addr_watch_aligned
176eb98c 441 && addr_trap < addr_watch + len)
a3b60e45
JK
442 {
443 /* ADDR_TRAP reports the first address of the memory range
444 accessed by the CPU, regardless of what was the memory
445 range watched. Thus, a large CPU access that straddles
446 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
447 ADDR_TRAP that is lower than the
448 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
449
450 addr: | 4 | 5 | 6 | 7 | 8 |
451 |---- range watched ----|
452 |----------- range accessed ------------|
453
454 In this case, ADDR_TRAP will be 4.
455
456 To match a watchpoint known to GDB core, we must never
457 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
458 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
459 positive on kernels older than 4.10. See PR
460 external/20207. */
461 return addr_orig;
462 }
176eb98c
MS
463 }
464
465 return (CORE_ADDR) 0;
466}
467
421530db 468/* Implementation of linux_target_ops method "stopped_by_watchpoint". */
176eb98c
MS
469
470static int
471aarch64_stopped_by_watchpoint (void)
472{
473 if (aarch64_stopped_data_address () != 0)
474 return 1;
475 else
476 return 0;
477}
478
479/* Fetch the thread-local storage pointer for libthread_db. */
480
481ps_err_e
754653a7 482ps_get_thread_area (struct ps_prochandle *ph,
176eb98c
MS
483 lwpid_t lwpid, int idx, void **base)
484{
a0cc84cd
YQ
485 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
486 is_64bit_tdesc ());
176eb98c
MS
487}
488
ade90bde
YQ
489/* Implementation of linux_target_ops method "siginfo_fixup". */
490
491static int
8adce034 492aarch64_linux_siginfo_fixup (siginfo_t *native, gdb_byte *inf, int direction)
ade90bde
YQ
493{
494 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
495 if (!is_64bit_tdesc ())
496 {
497 if (direction == 0)
498 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
499 native);
500 else
501 aarch64_siginfo_from_compat_siginfo (native,
502 (struct compat_siginfo *) inf);
503
504 return 1;
505 }
506
507 return 0;
508}
509
04ec7890 510/* Implementation of linux_target_ops method "new_process". */
176eb98c
MS
511
512static struct arch_process_info *
513aarch64_linux_new_process (void)
514{
8d749320 515 struct arch_process_info *info = XCNEW (struct arch_process_info);
176eb98c
MS
516
517 aarch64_init_debug_reg_state (&info->debug_reg_state);
518
519 return info;
520}
521
04ec7890
SM
522/* Implementation of linux_target_ops method "delete_process". */
523
524static void
525aarch64_linux_delete_process (struct arch_process_info *info)
526{
527 xfree (info);
528}
529
421530db
PL
530/* Implementation of linux_target_ops method "linux_new_fork". */
531
3a8a0396
DB
532static void
533aarch64_linux_new_fork (struct process_info *parent,
534 struct process_info *child)
535{
536 /* These are allocated by linux_add_process. */
61a7418c
DB
537 gdb_assert (parent->priv != NULL
538 && parent->priv->arch_private != NULL);
539 gdb_assert (child->priv != NULL
540 && child->priv->arch_private != NULL);
3a8a0396
DB
541
542 /* Linux kernel before 2.6.33 commit
543 72f674d203cd230426437cdcf7dd6f681dad8b0d
544 will inherit hardware debug registers from parent
545 on fork/vfork/clone. Newer Linux kernels create such tasks with
546 zeroed debug registers.
547
548 GDB core assumes the child inherits the watchpoints/hw
549 breakpoints of the parent, and will remove them all from the
550 forked off process. Copy the debug registers mirrors into the
551 new process so that all breakpoints and watchpoints can be
552 removed together. The debug registers mirror will become zeroed
553 in the end before detaching the forked off process, thus making
554 this compatible with older Linux kernels too. */
555
61a7418c 556 *child->priv->arch_private = *parent->priv->arch_private;
3a8a0396
DB
557}
558
ee4fbcfa
AH
559/* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
560#define AARCH64_HWCAP_PACA (1 << 30)
561
797bcff5 562/* Implementation of linux target ops method "low_arch_setup". */
3b53ae99 563
797bcff5
TBA
564void
565aarch64_target::low_arch_setup ()
3b53ae99
YQ
566{
567 unsigned int machine;
568 int is_elf64;
569 int tid;
570
571 tid = lwpid_of (current_thread);
572
573 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
574
575 if (is_elf64)
fefa175e
AH
576 {
577 uint64_t vq = aarch64_sve_get_vq (tid);
974c89e0
AH
578 unsigned long hwcap = linux_get_hwcap (8);
579 bool pauth_p = hwcap & AARCH64_HWCAP_PACA;
ee4fbcfa
AH
580
581 current_process ()->tdesc = aarch64_linux_read_description (vq, pauth_p);
fefa175e 582 }
3b53ae99 583 else
7cc17433 584 current_process ()->tdesc = aarch32_linux_read_description ();
176eb98c 585
af1b22f3 586 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
176eb98c
MS
587}
588
02895270
AH
589/* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
590
591static void
592aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
593{
594 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
595}
596
597/* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
598
599static void
600aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
601{
602 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
603}
604
3aee8918 605static struct regset_info aarch64_regsets[] =
176eb98c
MS
606{
607 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
608 sizeof (struct user_pt_regs), GENERAL_REGS,
609 aarch64_fill_gregset, aarch64_store_gregset },
610 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
611 sizeof (struct user_fpsimd_state), FP_REGS,
612 aarch64_fill_fpregset, aarch64_store_fpregset
613 },
1ef53e6b
AH
614 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
615 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
616 NULL, aarch64_store_pauthregset },
50bc912a 617 NULL_REGSET
176eb98c
MS
618};
619
3aee8918
PA
620static struct regsets_info aarch64_regsets_info =
621 {
622 aarch64_regsets, /* regsets */
623 0, /* num_regsets */
624 NULL, /* disabled_regsets */
625 };
626
3b53ae99 627static struct regs_info regs_info_aarch64 =
3aee8918
PA
628 {
629 NULL, /* regset_bitmap */
c2d65f38 630 NULL, /* usrregs */
3aee8918
PA
631 &aarch64_regsets_info,
632 };
633
02895270
AH
634static struct regset_info aarch64_sve_regsets[] =
635{
636 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
637 sizeof (struct user_pt_regs), GENERAL_REGS,
638 aarch64_fill_gregset, aarch64_store_gregset },
639 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
640 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
641 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
642 },
1ef53e6b
AH
643 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
644 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
645 NULL, aarch64_store_pauthregset },
02895270
AH
646 NULL_REGSET
647};
648
649static struct regsets_info aarch64_sve_regsets_info =
650 {
651 aarch64_sve_regsets, /* regsets. */
652 0, /* num_regsets. */
653 NULL, /* disabled_regsets. */
654 };
655
656static struct regs_info regs_info_aarch64_sve =
657 {
658 NULL, /* regset_bitmap. */
659 NULL, /* usrregs. */
660 &aarch64_sve_regsets_info,
661 };
662
aa8d21c9 663/* Implementation of linux target ops method "get_regs_info". */
421530db 664
aa8d21c9
TBA
665const regs_info *
666aarch64_target::get_regs_info ()
3aee8918 667{
02895270 668 if (!is_64bit_tdesc ())
3b53ae99 669 return &regs_info_aarch32;
02895270
AH
670
671 if (is_sve_tdesc ())
672 return &regs_info_aarch64_sve;
673
674 return &regs_info_aarch64;
3aee8918
PA
675}
676
7671bf47
PL
677/* Implementation of linux_target_ops method "supports_tracepoints". */
678
679static int
680aarch64_supports_tracepoints (void)
681{
524b57e6
YQ
682 if (current_thread == NULL)
683 return 1;
684 else
685 {
686 /* We don't support tracepoints on aarch32 now. */
687 return is_64bit_tdesc ();
688 }
7671bf47
PL
689}
690
bb903df0
PL
691/* Implementation of linux_target_ops method "get_thread_area". */
692
693static int
694aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
695{
696 struct iovec iovec;
697 uint64_t reg;
698
699 iovec.iov_base = &reg;
700 iovec.iov_len = sizeof (reg);
701
702 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
703 return -1;
704
705 *addrp = reg;
706
707 return 0;
708}
709
061fc021
YQ
710/* Implementation of linux_target_ops method "get_syscall_trapinfo". */
711
712static void
713aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
714{
715 int use_64bit = register_size (regcache->tdesc, 0) == 8;
716
717 if (use_64bit)
718 {
719 long l_sysno;
720
721 collect_register_by_name (regcache, "x8", &l_sysno);
722 *sysno = (int) l_sysno;
723 }
724 else
725 collect_register_by_name (regcache, "r7", sysno);
726}
727
afbe19f8
PL
728/* List of condition codes that we need. */
729
730enum aarch64_condition_codes
731{
732 EQ = 0x0,
733 NE = 0x1,
734 LO = 0x3,
735 GE = 0xa,
736 LT = 0xb,
737 GT = 0xc,
738 LE = 0xd,
bb903df0
PL
739};
740
6c1c9a8b
YQ
741enum aarch64_operand_type
742{
743 OPERAND_IMMEDIATE,
744 OPERAND_REGISTER,
745};
746
bb903df0
PL
747/* Representation of an operand. At this time, it only supports register
748 and immediate types. */
749
750struct aarch64_operand
751{
752 /* Type of the operand. */
6c1c9a8b
YQ
753 enum aarch64_operand_type type;
754
bb903df0
PL
755 /* Value of the operand according to the type. */
756 union
757 {
758 uint32_t imm;
759 struct aarch64_register reg;
760 };
761};
762
763/* List of registers that we are currently using, we can add more here as
764 we need to use them. */
765
766/* General purpose scratch registers (64 bit). */
767static const struct aarch64_register x0 = { 0, 1 };
768static const struct aarch64_register x1 = { 1, 1 };
769static const struct aarch64_register x2 = { 2, 1 };
770static const struct aarch64_register x3 = { 3, 1 };
771static const struct aarch64_register x4 = { 4, 1 };
772
773/* General purpose scratch registers (32 bit). */
afbe19f8 774static const struct aarch64_register w0 = { 0, 0 };
bb903df0
PL
775static const struct aarch64_register w2 = { 2, 0 };
776
777/* Intra-procedure scratch registers. */
778static const struct aarch64_register ip0 = { 16, 1 };
779
780/* Special purpose registers. */
afbe19f8
PL
781static const struct aarch64_register fp = { 29, 1 };
782static const struct aarch64_register lr = { 30, 1 };
bb903df0
PL
783static const struct aarch64_register sp = { 31, 1 };
784static const struct aarch64_register xzr = { 31, 1 };
785
786/* Dynamically allocate a new register. If we know the register
787 statically, we should make it a global as above instead of using this
788 helper function. */
789
790static struct aarch64_register
791aarch64_register (unsigned num, int is64)
792{
793 return (struct aarch64_register) { num, is64 };
794}
795
796/* Helper function to create a register operand, for instructions with
797 different types of operands.
798
799 For example:
800 p += emit_mov (p, x0, register_operand (x1)); */
801
802static struct aarch64_operand
803register_operand (struct aarch64_register reg)
804{
805 struct aarch64_operand operand;
806
807 operand.type = OPERAND_REGISTER;
808 operand.reg = reg;
809
810 return operand;
811}
812
813/* Helper function to create an immediate operand, for instructions with
814 different types of operands.
815
816 For example:
817 p += emit_mov (p, x0, immediate_operand (12)); */
818
819static struct aarch64_operand
820immediate_operand (uint32_t imm)
821{
822 struct aarch64_operand operand;
823
824 operand.type = OPERAND_IMMEDIATE;
825 operand.imm = imm;
826
827 return operand;
828}
829
bb903df0
PL
830/* Helper function to create an offset memory operand.
831
832 For example:
833 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
834
835static struct aarch64_memory_operand
836offset_memory_operand (int32_t offset)
837{
838 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
839}
840
841/* Helper function to create a pre-index memory operand.
842
843 For example:
844 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
845
846static struct aarch64_memory_operand
847preindex_memory_operand (int32_t index)
848{
849 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
850}
851
afbe19f8
PL
852/* Helper function to create a post-index memory operand.
853
854 For example:
855 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
856
857static struct aarch64_memory_operand
858postindex_memory_operand (int32_t index)
859{
860 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
861}
862
bb903df0
PL
863/* System control registers. These special registers can be written and
864 read with the MRS and MSR instructions.
865
866 - NZCV: Condition flags. GDB refers to this register under the CPSR
867 name.
868 - FPSR: Floating-point status register.
869 - FPCR: Floating-point control registers.
870 - TPIDR_EL0: Software thread ID register. */
871
872enum aarch64_system_control_registers
873{
874 /* op0 op1 crn crm op2 */
875 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
876 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
877 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
878 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
879};
880
bb903df0
PL
881/* Write a BLR instruction into *BUF.
882
883 BLR rn
884
885 RN is the register to branch to. */
886
887static int
888emit_blr (uint32_t *buf, struct aarch64_register rn)
889{
e1c587c3 890 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
bb903df0
PL
891}
892
afbe19f8 893/* Write a RET instruction into *BUF.
bb903df0 894
afbe19f8 895 RET xn
bb903df0 896
afbe19f8 897 RN is the register to branch to. */
bb903df0
PL
898
899static int
afbe19f8
PL
900emit_ret (uint32_t *buf, struct aarch64_register rn)
901{
e1c587c3 902 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
afbe19f8
PL
903}
904
905static int
906emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
907 struct aarch64_register rt,
908 struct aarch64_register rt2,
909 struct aarch64_register rn,
910 struct aarch64_memory_operand operand)
bb903df0
PL
911{
912 uint32_t opc;
913 uint32_t pre_index;
914 uint32_t write_back;
915
916 if (rt.is64)
917 opc = ENCODE (2, 2, 30);
918 else
919 opc = ENCODE (0, 2, 30);
920
921 switch (operand.type)
922 {
923 case MEMORY_OPERAND_OFFSET:
924 {
925 pre_index = ENCODE (1, 1, 24);
926 write_back = ENCODE (0, 1, 23);
927 break;
928 }
afbe19f8
PL
929 case MEMORY_OPERAND_POSTINDEX:
930 {
931 pre_index = ENCODE (0, 1, 24);
932 write_back = ENCODE (1, 1, 23);
933 break;
934 }
bb903df0
PL
935 case MEMORY_OPERAND_PREINDEX:
936 {
937 pre_index = ENCODE (1, 1, 24);
938 write_back = ENCODE (1, 1, 23);
939 break;
940 }
941 default:
942 return 0;
943 }
944
e1c587c3
YQ
945 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
946 | ENCODE (operand.index >> 3, 7, 15)
947 | ENCODE (rt2.num, 5, 10)
948 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
949}
950
afbe19f8
PL
951/* Write a STP instruction into *BUF.
952
953 STP rt, rt2, [rn, #offset]
954 STP rt, rt2, [rn, #index]!
955 STP rt, rt2, [rn], #index
956
957 RT and RT2 are the registers to store.
958 RN is the base address register.
959 OFFSET is the immediate to add to the base address. It is limited to a
960 -512 .. 504 range (7 bits << 3). */
961
962static int
963emit_stp (uint32_t *buf, struct aarch64_register rt,
964 struct aarch64_register rt2, struct aarch64_register rn,
965 struct aarch64_memory_operand operand)
966{
967 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
968}
969
970/* Write a LDP instruction into *BUF.
971
972 LDP rt, rt2, [rn, #offset]
973 LDP rt, rt2, [rn, #index]!
974 LDP rt, rt2, [rn], #index
975
976 RT and RT2 are the registers to store.
977 RN is the base address register.
978 OFFSET is the immediate to add to the base address. It is limited to a
979 -512 .. 504 range (7 bits << 3). */
980
981static int
982emit_ldp (uint32_t *buf, struct aarch64_register rt,
983 struct aarch64_register rt2, struct aarch64_register rn,
984 struct aarch64_memory_operand operand)
985{
986 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
987}
988
bb903df0
PL
989/* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
990
991 LDP qt, qt2, [rn, #offset]
992
993 RT and RT2 are the Q registers to store.
994 RN is the base address register.
995 OFFSET is the immediate to add to the base address. It is limited to
996 -1024 .. 1008 range (7 bits << 4). */
997
998static int
999emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1000 struct aarch64_register rn, int32_t offset)
1001{
1002 uint32_t opc = ENCODE (2, 2, 30);
1003 uint32_t pre_index = ENCODE (1, 1, 24);
1004
e1c587c3
YQ
1005 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
1006 | ENCODE (offset >> 4, 7, 15)
1007 | ENCODE (rt2, 5, 10)
1008 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1009}
1010
1011/* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1012
1013 STP qt, qt2, [rn, #offset]
1014
1015 RT and RT2 are the Q registers to store.
1016 RN is the base address register.
1017 OFFSET is the immediate to add to the base address. It is limited to
1018 -1024 .. 1008 range (7 bits << 4). */
1019
1020static int
1021emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1022 struct aarch64_register rn, int32_t offset)
1023{
1024 uint32_t opc = ENCODE (2, 2, 30);
1025 uint32_t pre_index = ENCODE (1, 1, 24);
1026
e1c587c3 1027 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
b6542f81
YQ
1028 | ENCODE (offset >> 4, 7, 15)
1029 | ENCODE (rt2, 5, 10)
1030 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1031}
1032
afbe19f8
PL
1033/* Write a LDRH instruction into *BUF.
1034
1035 LDRH wt, [xn, #offset]
1036 LDRH wt, [xn, #index]!
1037 LDRH wt, [xn], #index
1038
1039 RT is the register to store.
1040 RN is the base address register.
1041 OFFSET is the immediate to add to the base address. It is limited to
1042 0 .. 32760 range (12 bits << 3). */
1043
1044static int
1045emit_ldrh (uint32_t *buf, struct aarch64_register rt,
1046 struct aarch64_register rn,
1047 struct aarch64_memory_operand operand)
1048{
1c2e1515 1049 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
afbe19f8
PL
1050}
1051
1052/* Write a LDRB instruction into *BUF.
1053
1054 LDRB wt, [xn, #offset]
1055 LDRB wt, [xn, #index]!
1056 LDRB wt, [xn], #index
1057
1058 RT is the register to store.
1059 RN is the base address register.
1060 OFFSET is the immediate to add to the base address. It is limited to
1061 0 .. 32760 range (12 bits << 3). */
1062
1063static int
1064emit_ldrb (uint32_t *buf, struct aarch64_register rt,
1065 struct aarch64_register rn,
1066 struct aarch64_memory_operand operand)
1067{
1c2e1515 1068 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
afbe19f8
PL
1069}
1070
bb903df0 1071
bb903df0
PL
1072
1073/* Write a STR instruction into *BUF.
1074
1075 STR rt, [rn, #offset]
1076 STR rt, [rn, #index]!
afbe19f8 1077 STR rt, [rn], #index
bb903df0
PL
1078
1079 RT is the register to store.
1080 RN is the base address register.
1081 OFFSET is the immediate to add to the base address. It is limited to
1082 0 .. 32760 range (12 bits << 3). */
1083
1084static int
1085emit_str (uint32_t *buf, struct aarch64_register rt,
1086 struct aarch64_register rn,
1087 struct aarch64_memory_operand operand)
1088{
1c2e1515 1089 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
bb903df0
PL
1090}
1091
1092/* Helper function emitting an exclusive load or store instruction. */
1093
1094static int
1095emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1096 enum aarch64_opcodes opcode,
1097 struct aarch64_register rs,
1098 struct aarch64_register rt,
1099 struct aarch64_register rt2,
1100 struct aarch64_register rn)
1101{
e1c587c3
YQ
1102 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1103 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1104 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
1105}
1106
1107/* Write a LAXR instruction into *BUF.
1108
1109 LDAXR rt, [xn]
1110
1111 RT is the destination register.
1112 RN is the base address register. */
1113
1114static int
1115emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1116 struct aarch64_register rn)
1117{
1118 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1119 xzr, rn);
1120}
1121
1122/* Write a STXR instruction into *BUF.
1123
1124 STXR ws, rt, [xn]
1125
1126 RS is the result register, it indicates if the store succeeded or not.
1127 RT is the destination register.
1128 RN is the base address register. */
1129
1130static int
1131emit_stxr (uint32_t *buf, struct aarch64_register rs,
1132 struct aarch64_register rt, struct aarch64_register rn)
1133{
1134 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1135 xzr, rn);
1136}
1137
1138/* Write a STLR instruction into *BUF.
1139
1140 STLR rt, [xn]
1141
1142 RT is the register to store.
1143 RN is the base address register. */
1144
1145static int
1146emit_stlr (uint32_t *buf, struct aarch64_register rt,
1147 struct aarch64_register rn)
1148{
1149 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1150 xzr, rn);
1151}
1152
1153/* Helper function for data processing instructions with register sources. */
1154
1155static int
231c0592 1156emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
bb903df0
PL
1157 struct aarch64_register rd,
1158 struct aarch64_register rn,
1159 struct aarch64_register rm)
1160{
1161 uint32_t size = ENCODE (rd.is64, 1, 31);
1162
e1c587c3
YQ
1163 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1164 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1165}
1166
1167/* Helper function for data processing instructions taking either a register
1168 or an immediate. */
1169
1170static int
1171emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1172 struct aarch64_register rd,
1173 struct aarch64_register rn,
1174 struct aarch64_operand operand)
1175{
1176 uint32_t size = ENCODE (rd.is64, 1, 31);
1177 /* The opcode is different for register and immediate source operands. */
1178 uint32_t operand_opcode;
1179
1180 if (operand.type == OPERAND_IMMEDIATE)
1181 {
1182 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1183 operand_opcode = ENCODE (8, 4, 25);
1184
e1c587c3
YQ
1185 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1186 | ENCODE (operand.imm, 12, 10)
1187 | ENCODE (rn.num, 5, 5)
1188 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1189 }
1190 else
1191 {
1192 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1193 operand_opcode = ENCODE (5, 4, 25);
1194
1195 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1196 rn, operand.reg);
1197 }
1198}
1199
1200/* Write an ADD instruction into *BUF.
1201
1202 ADD rd, rn, #imm
1203 ADD rd, rn, rm
1204
1205 This function handles both an immediate and register add.
1206
1207 RD is the destination register.
1208 RN is the input register.
1209 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1210 OPERAND_REGISTER. */
1211
1212static int
1213emit_add (uint32_t *buf, struct aarch64_register rd,
1214 struct aarch64_register rn, struct aarch64_operand operand)
1215{
1216 return emit_data_processing (buf, ADD, rd, rn, operand);
1217}
1218
1219/* Write a SUB instruction into *BUF.
1220
1221 SUB rd, rn, #imm
1222 SUB rd, rn, rm
1223
1224 This function handles both an immediate and register sub.
1225
1226 RD is the destination register.
1227 RN is the input register.
1228 IMM is the immediate to substract to RN. */
1229
1230static int
1231emit_sub (uint32_t *buf, struct aarch64_register rd,
1232 struct aarch64_register rn, struct aarch64_operand operand)
1233{
1234 return emit_data_processing (buf, SUB, rd, rn, operand);
1235}
1236
1237/* Write a MOV instruction into *BUF.
1238
1239 MOV rd, #imm
1240 MOV rd, rm
1241
1242 This function handles both a wide immediate move and a register move,
1243 with the condition that the source register is not xzr. xzr and the
1244 stack pointer share the same encoding and this function only supports
1245 the stack pointer.
1246
1247 RD is the destination register.
1248 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1249 OPERAND_REGISTER. */
1250
1251static int
1252emit_mov (uint32_t *buf, struct aarch64_register rd,
1253 struct aarch64_operand operand)
1254{
1255 if (operand.type == OPERAND_IMMEDIATE)
1256 {
1257 uint32_t size = ENCODE (rd.is64, 1, 31);
1258 /* Do not shift the immediate. */
1259 uint32_t shift = ENCODE (0, 2, 21);
1260
e1c587c3
YQ
1261 return aarch64_emit_insn (buf, MOV | size | shift
1262 | ENCODE (operand.imm, 16, 5)
1263 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1264 }
1265 else
1266 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1267}
1268
1269/* Write a MOVK instruction into *BUF.
1270
1271 MOVK rd, #imm, lsl #shift
1272
1273 RD is the destination register.
1274 IMM is the immediate.
1275 SHIFT is the logical shift left to apply to IMM. */
1276
1277static int
7781c06f
YQ
1278emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1279 unsigned shift)
bb903df0
PL
1280{
1281 uint32_t size = ENCODE (rd.is64, 1, 31);
1282
e1c587c3
YQ
1283 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1284 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1285}
1286
1287/* Write instructions into *BUF in order to move ADDR into a register.
1288 ADDR can be a 64-bit value.
1289
1290 This function will emit a series of MOV and MOVK instructions, such as:
1291
1292 MOV xd, #(addr)
1293 MOVK xd, #(addr >> 16), lsl #16
1294 MOVK xd, #(addr >> 32), lsl #32
1295 MOVK xd, #(addr >> 48), lsl #48 */
1296
1297static int
1298emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1299{
1300 uint32_t *p = buf;
1301
1302 /* The MOV (wide immediate) instruction clears to top bits of the
1303 register. */
1304 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1305
1306 if ((addr >> 16) != 0)
1307 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1308 else
1309 return p - buf;
1310
1311 if ((addr >> 32) != 0)
1312 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1313 else
1314 return p - buf;
1315
1316 if ((addr >> 48) != 0)
1317 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1318
1319 return p - buf;
1320}
1321
afbe19f8
PL
1322/* Write a SUBS instruction into *BUF.
1323
1324 SUBS rd, rn, rm
1325
1326 This instruction update the condition flags.
1327
1328 RD is the destination register.
1329 RN and RM are the source registers. */
1330
1331static int
1332emit_subs (uint32_t *buf, struct aarch64_register rd,
1333 struct aarch64_register rn, struct aarch64_operand operand)
1334{
1335 return emit_data_processing (buf, SUBS, rd, rn, operand);
1336}
1337
1338/* Write a CMP instruction into *BUF.
1339
1340 CMP rn, rm
1341
1342 This instruction is an alias of SUBS xzr, rn, rm.
1343
1344 RN and RM are the registers to compare. */
1345
1346static int
1347emit_cmp (uint32_t *buf, struct aarch64_register rn,
1348 struct aarch64_operand operand)
1349{
1350 return emit_subs (buf, xzr, rn, operand);
1351}
1352
1353/* Write a AND instruction into *BUF.
1354
1355 AND rd, rn, rm
1356
1357 RD is the destination register.
1358 RN and RM are the source registers. */
1359
1360static int
1361emit_and (uint32_t *buf, struct aarch64_register rd,
1362 struct aarch64_register rn, struct aarch64_register rm)
1363{
1364 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1365}
1366
1367/* Write a ORR instruction into *BUF.
1368
1369 ORR rd, rn, rm
1370
1371 RD is the destination register.
1372 RN and RM are the source registers. */
1373
1374static int
1375emit_orr (uint32_t *buf, struct aarch64_register rd,
1376 struct aarch64_register rn, struct aarch64_register rm)
1377{
1378 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1379}
1380
1381/* Write a ORN instruction into *BUF.
1382
1383 ORN rd, rn, rm
1384
1385 RD is the destination register.
1386 RN and RM are the source registers. */
1387
1388static int
1389emit_orn (uint32_t *buf, struct aarch64_register rd,
1390 struct aarch64_register rn, struct aarch64_register rm)
1391{
1392 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1393}
1394
1395/* Write a EOR instruction into *BUF.
1396
1397 EOR rd, rn, rm
1398
1399 RD is the destination register.
1400 RN and RM are the source registers. */
1401
1402static int
1403emit_eor (uint32_t *buf, struct aarch64_register rd,
1404 struct aarch64_register rn, struct aarch64_register rm)
1405{
1406 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1407}
1408
1409/* Write a MVN instruction into *BUF.
1410
1411 MVN rd, rm
1412
1413 This is an alias for ORN rd, xzr, rm.
1414
1415 RD is the destination register.
1416 RM is the source register. */
1417
1418static int
1419emit_mvn (uint32_t *buf, struct aarch64_register rd,
1420 struct aarch64_register rm)
1421{
1422 return emit_orn (buf, rd, xzr, rm);
1423}
1424
1425/* Write a LSLV instruction into *BUF.
1426
1427 LSLV rd, rn, rm
1428
1429 RD is the destination register.
1430 RN and RM are the source registers. */
1431
1432static int
1433emit_lslv (uint32_t *buf, struct aarch64_register rd,
1434 struct aarch64_register rn, struct aarch64_register rm)
1435{
1436 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1437}
1438
1439/* Write a LSRV instruction into *BUF.
1440
1441 LSRV rd, rn, rm
1442
1443 RD is the destination register.
1444 RN and RM are the source registers. */
1445
1446static int
1447emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1448 struct aarch64_register rn, struct aarch64_register rm)
1449{
1450 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1451}
1452
1453/* Write a ASRV instruction into *BUF.
1454
1455 ASRV rd, rn, rm
1456
1457 RD is the destination register.
1458 RN and RM are the source registers. */
1459
1460static int
1461emit_asrv (uint32_t *buf, struct aarch64_register rd,
1462 struct aarch64_register rn, struct aarch64_register rm)
1463{
1464 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1465}
1466
1467/* Write a MUL instruction into *BUF.
1468
1469 MUL rd, rn, rm
1470
1471 RD is the destination register.
1472 RN and RM are the source registers. */
1473
1474static int
1475emit_mul (uint32_t *buf, struct aarch64_register rd,
1476 struct aarch64_register rn, struct aarch64_register rm)
1477{
1478 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1479}
1480
bb903df0
PL
1481/* Write a MRS instruction into *BUF. The register size is 64-bit.
1482
1483 MRS xt, system_reg
1484
1485 RT is the destination register.
1486 SYSTEM_REG is special purpose register to read. */
1487
1488static int
1489emit_mrs (uint32_t *buf, struct aarch64_register rt,
1490 enum aarch64_system_control_registers system_reg)
1491{
e1c587c3
YQ
1492 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1493 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1494}
1495
1496/* Write a MSR instruction into *BUF. The register size is 64-bit.
1497
1498 MSR system_reg, xt
1499
1500 SYSTEM_REG is special purpose register to write.
1501 RT is the input register. */
1502
1503static int
1504emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1505 struct aarch64_register rt)
1506{
e1c587c3
YQ
1507 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1508 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1509}
1510
1511/* Write a SEVL instruction into *BUF.
1512
1513 This is a hint instruction telling the hardware to trigger an event. */
1514
1515static int
1516emit_sevl (uint32_t *buf)
1517{
e1c587c3 1518 return aarch64_emit_insn (buf, SEVL);
bb903df0
PL
1519}
1520
1521/* Write a WFE instruction into *BUF.
1522
1523 This is a hint instruction telling the hardware to wait for an event. */
1524
1525static int
1526emit_wfe (uint32_t *buf)
1527{
e1c587c3 1528 return aarch64_emit_insn (buf, WFE);
bb903df0
PL
1529}
1530
afbe19f8
PL
1531/* Write a SBFM instruction into *BUF.
1532
1533 SBFM rd, rn, #immr, #imms
1534
1535 This instruction moves the bits from #immr to #imms into the
1536 destination, sign extending the result.
1537
1538 RD is the destination register.
1539 RN is the source register.
1540 IMMR is the bit number to start at (least significant bit).
1541 IMMS is the bit number to stop at (most significant bit). */
1542
1543static int
1544emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1545 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1546{
1547 uint32_t size = ENCODE (rd.is64, 1, 31);
1548 uint32_t n = ENCODE (rd.is64, 1, 22);
1549
e1c587c3
YQ
1550 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1551 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1552 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1553}
1554
1555/* Write a SBFX instruction into *BUF.
1556
1557 SBFX rd, rn, #lsb, #width
1558
1559 This instruction moves #width bits from #lsb into the destination, sign
1560 extending the result. This is an alias for:
1561
1562 SBFM rd, rn, #lsb, #(lsb + width - 1)
1563
1564 RD is the destination register.
1565 RN is the source register.
1566 LSB is the bit number to start at (least significant bit).
1567 WIDTH is the number of bits to move. */
1568
1569static int
1570emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1571 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1572{
1573 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1574}
1575
1576/* Write a UBFM instruction into *BUF.
1577
1578 UBFM rd, rn, #immr, #imms
1579
1580 This instruction moves the bits from #immr to #imms into the
1581 destination, extending the result with zeros.
1582
1583 RD is the destination register.
1584 RN is the source register.
1585 IMMR is the bit number to start at (least significant bit).
1586 IMMS is the bit number to stop at (most significant bit). */
1587
1588static int
1589emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1590 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1591{
1592 uint32_t size = ENCODE (rd.is64, 1, 31);
1593 uint32_t n = ENCODE (rd.is64, 1, 22);
1594
e1c587c3
YQ
1595 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1596 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1597 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1598}
1599
1600/* Write a UBFX instruction into *BUF.
1601
1602 UBFX rd, rn, #lsb, #width
1603
1604 This instruction moves #width bits from #lsb into the destination,
1605 extending the result with zeros. This is an alias for:
1606
1607 UBFM rd, rn, #lsb, #(lsb + width - 1)
1608
1609 RD is the destination register.
1610 RN is the source register.
1611 LSB is the bit number to start at (least significant bit).
1612 WIDTH is the number of bits to move. */
1613
1614static int
1615emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1616 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1617{
1618 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1619}
1620
1621/* Write a CSINC instruction into *BUF.
1622
1623 CSINC rd, rn, rm, cond
1624
1625 This instruction conditionally increments rn or rm and places the result
1626 in rd. rn is chosen is the condition is true.
1627
1628 RD is the destination register.
1629 RN and RM are the source registers.
1630 COND is the encoded condition. */
1631
1632static int
1633emit_csinc (uint32_t *buf, struct aarch64_register rd,
1634 struct aarch64_register rn, struct aarch64_register rm,
1635 unsigned cond)
1636{
1637 uint32_t size = ENCODE (rd.is64, 1, 31);
1638
e1c587c3
YQ
1639 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1640 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1641 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1642}
1643
1644/* Write a CSET instruction into *BUF.
1645
1646 CSET rd, cond
1647
1648 This instruction conditionally write 1 or 0 in the destination register.
1649 1 is written if the condition is true. This is an alias for:
1650
1651 CSINC rd, xzr, xzr, !cond
1652
1653 Note that the condition needs to be inverted.
1654
1655 RD is the destination register.
1656 RN and RM are the source registers.
1657 COND is the encoded condition. */
1658
1659static int
1660emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1661{
1662 /* The least significant bit of the condition needs toggling in order to
1663 invert it. */
1664 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1665}
1666
bb903df0
PL
1667/* Write LEN instructions from BUF into the inferior memory at *TO.
1668
1669 Note instructions are always little endian on AArch64, unlike data. */
1670
1671static void
1672append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1673{
1674 size_t byte_len = len * sizeof (uint32_t);
1675#if (__BYTE_ORDER == __BIG_ENDIAN)
cb93dc7f 1676 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
bb903df0
PL
1677 size_t i;
1678
1679 for (i = 0; i < len; i++)
1680 le_buf[i] = htole32 (buf[i]);
1681
4196ab2a 1682 target_write_memory (*to, (const unsigned char *) le_buf, byte_len);
bb903df0
PL
1683
1684 xfree (le_buf);
1685#else
4196ab2a 1686 target_write_memory (*to, (const unsigned char *) buf, byte_len);
bb903df0
PL
1687#endif
1688
1689 *to += byte_len;
1690}
1691
0badd99f
YQ
1692/* Sub-class of struct aarch64_insn_data, store information of
1693 instruction relocation for fast tracepoint. Visitor can
1694 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1695 the relocated instructions in buffer pointed by INSN_PTR. */
bb903df0 1696
0badd99f
YQ
1697struct aarch64_insn_relocation_data
1698{
1699 struct aarch64_insn_data base;
1700
1701 /* The new address the instruction is relocated to. */
1702 CORE_ADDR new_addr;
1703 /* Pointer to the buffer of relocated instruction(s). */
1704 uint32_t *insn_ptr;
1705};
1706
1707/* Implementation of aarch64_insn_visitor method "b". */
1708
1709static void
1710aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1711 struct aarch64_insn_data *data)
1712{
1713 struct aarch64_insn_relocation_data *insn_reloc
1714 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1715 int64_t new_offset
0badd99f
YQ
1716 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1717
1718 if (can_encode_int32 (new_offset, 28))
1719 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1720}
1721
1722/* Implementation of aarch64_insn_visitor method "b_cond". */
1723
1724static void
1725aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1726 struct aarch64_insn_data *data)
1727{
1728 struct aarch64_insn_relocation_data *insn_reloc
1729 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1730 int64_t new_offset
0badd99f
YQ
1731 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1732
1733 if (can_encode_int32 (new_offset, 21))
1734 {
1735 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1736 new_offset);
bb903df0 1737 }
0badd99f 1738 else if (can_encode_int32 (new_offset, 28))
bb903df0 1739 {
0badd99f
YQ
1740 /* The offset is out of range for a conditional branch
1741 instruction but not for a unconditional branch. We can use
1742 the following instructions instead:
bb903df0 1743
0badd99f
YQ
1744 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1745 B NOT_TAKEN ; Else jump over TAKEN and continue.
1746 TAKEN:
1747 B #(offset - 8)
1748 NOT_TAKEN:
1749
1750 */
1751
1752 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1753 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1754 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
bb903df0 1755 }
0badd99f 1756}
bb903df0 1757
0badd99f
YQ
1758/* Implementation of aarch64_insn_visitor method "cb". */
1759
1760static void
1761aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1762 const unsigned rn, int is64,
1763 struct aarch64_insn_data *data)
1764{
1765 struct aarch64_insn_relocation_data *insn_reloc
1766 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1767 int64_t new_offset
0badd99f
YQ
1768 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1769
1770 if (can_encode_int32 (new_offset, 21))
1771 {
1772 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1773 aarch64_register (rn, is64), new_offset);
bb903df0 1774 }
0badd99f 1775 else if (can_encode_int32 (new_offset, 28))
bb903df0 1776 {
0badd99f
YQ
1777 /* The offset is out of range for a compare and branch
1778 instruction but not for a unconditional branch. We can use
1779 the following instructions instead:
1780
1781 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1782 B NOT_TAKEN ; Else jump over TAKEN and continue.
1783 TAKEN:
1784 B #(offset - 8)
1785 NOT_TAKEN:
1786
1787 */
1788 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1789 aarch64_register (rn, is64), 8);
1790 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1791 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1792 }
1793}
bb903df0 1794
0badd99f 1795/* Implementation of aarch64_insn_visitor method "tb". */
bb903df0 1796
0badd99f
YQ
1797static void
1798aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1799 const unsigned rt, unsigned bit,
1800 struct aarch64_insn_data *data)
1801{
1802 struct aarch64_insn_relocation_data *insn_reloc
1803 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1804 int64_t new_offset
0badd99f
YQ
1805 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1806
1807 if (can_encode_int32 (new_offset, 16))
1808 {
1809 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1810 aarch64_register (rt, 1), new_offset);
bb903df0 1811 }
0badd99f 1812 else if (can_encode_int32 (new_offset, 28))
bb903df0 1813 {
0badd99f
YQ
1814 /* The offset is out of range for a test bit and branch
1815 instruction but not for a unconditional branch. We can use
1816 the following instructions instead:
1817
1818 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1819 B NOT_TAKEN ; Else jump over TAKEN and continue.
1820 TAKEN:
1821 B #(offset - 8)
1822 NOT_TAKEN:
1823
1824 */
1825 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1826 aarch64_register (rt, 1), 8);
1827 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1828 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1829 new_offset - 8);
1830 }
1831}
bb903df0 1832
0badd99f 1833/* Implementation of aarch64_insn_visitor method "adr". */
bb903df0 1834
0badd99f
YQ
1835static void
1836aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1837 const int is_adrp,
1838 struct aarch64_insn_data *data)
1839{
1840 struct aarch64_insn_relocation_data *insn_reloc
1841 = (struct aarch64_insn_relocation_data *) data;
1842 /* We know exactly the address the ADR{P,} instruction will compute.
1843 We can just write it to the destination register. */
1844 CORE_ADDR address = data->insn_addr + offset;
bb903df0 1845
0badd99f
YQ
1846 if (is_adrp)
1847 {
1848 /* Clear the lower 12 bits of the offset to get the 4K page. */
1849 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1850 aarch64_register (rd, 1),
1851 address & ~0xfff);
1852 }
1853 else
1854 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1855 aarch64_register (rd, 1), address);
1856}
bb903df0 1857
0badd99f 1858/* Implementation of aarch64_insn_visitor method "ldr_literal". */
bb903df0 1859
0badd99f
YQ
1860static void
1861aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1862 const unsigned rt, const int is64,
1863 struct aarch64_insn_data *data)
1864{
1865 struct aarch64_insn_relocation_data *insn_reloc
1866 = (struct aarch64_insn_relocation_data *) data;
1867 CORE_ADDR address = data->insn_addr + offset;
1868
1869 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1870 aarch64_register (rt, 1), address);
1871
1872 /* We know exactly what address to load from, and what register we
1873 can use:
1874
1875 MOV xd, #(oldloc + offset)
1876 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1877 ...
1878
1879 LDR xd, [xd] ; or LDRSW xd, [xd]
1880
1881 */
1882
1883 if (is_sw)
1884 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1885 aarch64_register (rt, 1),
1886 aarch64_register (rt, 1),
1887 offset_memory_operand (0));
bb903df0 1888 else
0badd99f
YQ
1889 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1890 aarch64_register (rt, is64),
1891 aarch64_register (rt, 1),
1892 offset_memory_operand (0));
1893}
1894
1895/* Implementation of aarch64_insn_visitor method "others". */
1896
1897static void
1898aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1899 struct aarch64_insn_data *data)
1900{
1901 struct aarch64_insn_relocation_data *insn_reloc
1902 = (struct aarch64_insn_relocation_data *) data;
bb903df0 1903
0badd99f
YQ
1904 /* The instruction is not PC relative. Just re-emit it at the new
1905 location. */
e1c587c3 1906 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
0badd99f
YQ
1907}
1908
1909static const struct aarch64_insn_visitor visitor =
1910{
1911 aarch64_ftrace_insn_reloc_b,
1912 aarch64_ftrace_insn_reloc_b_cond,
1913 aarch64_ftrace_insn_reloc_cb,
1914 aarch64_ftrace_insn_reloc_tb,
1915 aarch64_ftrace_insn_reloc_adr,
1916 aarch64_ftrace_insn_reloc_ldr_literal,
1917 aarch64_ftrace_insn_reloc_others,
1918};
1919
bb903df0
PL
1920/* Implementation of linux_target_ops method
1921 "install_fast_tracepoint_jump_pad". */
1922
1923static int
1924aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1925 CORE_ADDR tpaddr,
1926 CORE_ADDR collector,
1927 CORE_ADDR lockaddr,
1928 ULONGEST orig_size,
1929 CORE_ADDR *jump_entry,
1930 CORE_ADDR *trampoline,
1931 ULONGEST *trampoline_size,
1932 unsigned char *jjump_pad_insn,
1933 ULONGEST *jjump_pad_insn_size,
1934 CORE_ADDR *adjusted_insn_addr,
1935 CORE_ADDR *adjusted_insn_addr_end,
1936 char *err)
1937{
1938 uint32_t buf[256];
1939 uint32_t *p = buf;
2ac09a5b 1940 int64_t offset;
bb903df0 1941 int i;
70b439f0 1942 uint32_t insn;
bb903df0 1943 CORE_ADDR buildaddr = *jump_entry;
0badd99f 1944 struct aarch64_insn_relocation_data insn_data;
bb903df0
PL
1945
1946 /* We need to save the current state on the stack both to restore it
1947 later and to collect register values when the tracepoint is hit.
1948
1949 The saved registers are pushed in a layout that needs to be in sync
1950 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1951 the supply_fast_tracepoint_registers function will fill in the
1952 register cache from a pointer to saved registers on the stack we build
1953 here.
1954
1955 For simplicity, we set the size of each cell on the stack to 16 bytes.
1956 This way one cell can hold any register type, from system registers
1957 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1958 has to be 16 bytes aligned anyway.
1959
1960 Note that the CPSR register does not exist on AArch64. Instead we
1961 can access system bits describing the process state with the
1962 MRS/MSR instructions, namely the condition flags. We save them as
1963 if they are part of a CPSR register because that's how GDB
1964 interprets these system bits. At the moment, only the condition
1965 flags are saved in CPSR (NZCV).
1966
1967 Stack layout, each cell is 16 bytes (descending):
1968
1969 High *-------- SIMD&FP registers from 31 down to 0. --------*
1970 | q31 |
1971 . .
1972 . . 32 cells
1973 . .
1974 | q0 |
1975 *---- General purpose registers from 30 down to 0. ----*
1976 | x30 |
1977 . .
1978 . . 31 cells
1979 . .
1980 | x0 |
1981 *------------- Special purpose registers. -------------*
1982 | SP |
1983 | PC |
1984 | CPSR (NZCV) | 5 cells
1985 | FPSR |
1986 | FPCR | <- SP + 16
1987 *------------- collecting_t object --------------------*
1988 | TPIDR_EL0 | struct tracepoint * |
1989 Low *------------------------------------------------------*
1990
1991 After this stack is set up, we issue a call to the collector, passing
1992 it the saved registers at (SP + 16). */
1993
1994 /* Push SIMD&FP registers on the stack:
1995
1996 SUB sp, sp, #(32 * 16)
1997
1998 STP q30, q31, [sp, #(30 * 16)]
1999 ...
2000 STP q0, q1, [sp]
2001
2002 */
2003 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
2004 for (i = 30; i >= 0; i -= 2)
2005 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
2006
30baf67b 2007 /* Push general purpose registers on the stack. Note that we do not need
bb903df0
PL
2008 to push x31 as it represents the xzr register and not the stack
2009 pointer in a STR instruction.
2010
2011 SUB sp, sp, #(31 * 16)
2012
2013 STR x30, [sp, #(30 * 16)]
2014 ...
2015 STR x0, [sp]
2016
2017 */
2018 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
2019 for (i = 30; i >= 0; i -= 1)
2020 p += emit_str (p, aarch64_register (i, 1), sp,
2021 offset_memory_operand (i * 16));
2022
2023 /* Make space for 5 more cells.
2024
2025 SUB sp, sp, #(5 * 16)
2026
2027 */
2028 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
2029
2030
2031 /* Save SP:
2032
2033 ADD x4, sp, #((32 + 31 + 5) * 16)
2034 STR x4, [sp, #(4 * 16)]
2035
2036 */
2037 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
2038 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
2039
2040 /* Save PC (tracepoint address):
2041
2042 MOV x3, #(tpaddr)
2043 ...
2044
2045 STR x3, [sp, #(3 * 16)]
2046
2047 */
2048
2049 p += emit_mov_addr (p, x3, tpaddr);
2050 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
2051
2052 /* Save CPSR (NZCV), FPSR and FPCR:
2053
2054 MRS x2, nzcv
2055 MRS x1, fpsr
2056 MRS x0, fpcr
2057
2058 STR x2, [sp, #(2 * 16)]
2059 STR x1, [sp, #(1 * 16)]
2060 STR x0, [sp, #(0 * 16)]
2061
2062 */
2063 p += emit_mrs (p, x2, NZCV);
2064 p += emit_mrs (p, x1, FPSR);
2065 p += emit_mrs (p, x0, FPCR);
2066 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2067 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2068 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2069
2070 /* Push the collecting_t object. It consist of the address of the
2071 tracepoint and an ID for the current thread. We get the latter by
2072 reading the tpidr_el0 system register. It corresponds to the
2073 NT_ARM_TLS register accessible with ptrace.
2074
2075 MOV x0, #(tpoint)
2076 ...
2077
2078 MRS x1, tpidr_el0
2079
2080 STP x0, x1, [sp, #-16]!
2081
2082 */
2083
2084 p += emit_mov_addr (p, x0, tpoint);
2085 p += emit_mrs (p, x1, TPIDR_EL0);
2086 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2087
2088 /* Spin-lock:
2089
2090 The shared memory for the lock is at lockaddr. It will hold zero
2091 if no-one is holding the lock, otherwise it contains the address of
2092 the collecting_t object on the stack of the thread which acquired it.
2093
2094 At this stage, the stack pointer points to this thread's collecting_t
2095 object.
2096
2097 We use the following registers:
2098 - x0: Address of the lock.
2099 - x1: Pointer to collecting_t object.
2100 - x2: Scratch register.
2101
2102 MOV x0, #(lockaddr)
2103 ...
2104 MOV x1, sp
2105
2106 ; Trigger an event local to this core. So the following WFE
2107 ; instruction is ignored.
2108 SEVL
2109 again:
2110 ; Wait for an event. The event is triggered by either the SEVL
2111 ; or STLR instructions (store release).
2112 WFE
2113
2114 ; Atomically read at lockaddr. This marks the memory location as
2115 ; exclusive. This instruction also has memory constraints which
2116 ; make sure all previous data reads and writes are done before
2117 ; executing it.
2118 LDAXR x2, [x0]
2119
2120 ; Try again if another thread holds the lock.
2121 CBNZ x2, again
2122
2123 ; We can lock it! Write the address of the collecting_t object.
2124 ; This instruction will fail if the memory location is not marked
2125 ; as exclusive anymore. If it succeeds, it will remove the
2126 ; exclusive mark on the memory location. This way, if another
2127 ; thread executes this instruction before us, we will fail and try
2128 ; all over again.
2129 STXR w2, x1, [x0]
2130 CBNZ w2, again
2131
2132 */
2133
2134 p += emit_mov_addr (p, x0, lockaddr);
2135 p += emit_mov (p, x1, register_operand (sp));
2136
2137 p += emit_sevl (p);
2138 p += emit_wfe (p);
2139 p += emit_ldaxr (p, x2, x0);
2140 p += emit_cb (p, 1, w2, -2 * 4);
2141 p += emit_stxr (p, w2, x1, x0);
2142 p += emit_cb (p, 1, x2, -4 * 4);
2143
2144 /* Call collector (struct tracepoint *, unsigned char *):
2145
2146 MOV x0, #(tpoint)
2147 ...
2148
2149 ; Saved registers start after the collecting_t object.
2150 ADD x1, sp, #16
2151
2152 ; We use an intra-procedure-call scratch register.
2153 MOV ip0, #(collector)
2154 ...
2155
2156 ; And call back to C!
2157 BLR ip0
2158
2159 */
2160
2161 p += emit_mov_addr (p, x0, tpoint);
2162 p += emit_add (p, x1, sp, immediate_operand (16));
2163
2164 p += emit_mov_addr (p, ip0, collector);
2165 p += emit_blr (p, ip0);
2166
2167 /* Release the lock.
2168
2169 MOV x0, #(lockaddr)
2170 ...
2171
2172 ; This instruction is a normal store with memory ordering
2173 ; constraints. Thanks to this we do not have to put a data
2174 ; barrier instruction to make sure all data read and writes are done
30baf67b 2175 ; before this instruction is executed. Furthermore, this instruction
bb903df0
PL
2176 ; will trigger an event, letting other threads know they can grab
2177 ; the lock.
2178 STLR xzr, [x0]
2179
2180 */
2181 p += emit_mov_addr (p, x0, lockaddr);
2182 p += emit_stlr (p, xzr, x0);
2183
2184 /* Free collecting_t object:
2185
2186 ADD sp, sp, #16
2187
2188 */
2189 p += emit_add (p, sp, sp, immediate_operand (16));
2190
2191 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2192 registers from the stack.
2193
2194 LDR x2, [sp, #(2 * 16)]
2195 LDR x1, [sp, #(1 * 16)]
2196 LDR x0, [sp, #(0 * 16)]
2197
2198 MSR NZCV, x2
2199 MSR FPSR, x1
2200 MSR FPCR, x0
2201
2202 ADD sp, sp #(5 * 16)
2203
2204 */
2205 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2206 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2207 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2208 p += emit_msr (p, NZCV, x2);
2209 p += emit_msr (p, FPSR, x1);
2210 p += emit_msr (p, FPCR, x0);
2211
2212 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2213
2214 /* Pop general purpose registers:
2215
2216 LDR x0, [sp]
2217 ...
2218 LDR x30, [sp, #(30 * 16)]
2219
2220 ADD sp, sp, #(31 * 16)
2221
2222 */
2223 for (i = 0; i <= 30; i += 1)
2224 p += emit_ldr (p, aarch64_register (i, 1), sp,
2225 offset_memory_operand (i * 16));
2226 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2227
2228 /* Pop SIMD&FP registers:
2229
2230 LDP q0, q1, [sp]
2231 ...
2232 LDP q30, q31, [sp, #(30 * 16)]
2233
2234 ADD sp, sp, #(32 * 16)
2235
2236 */
2237 for (i = 0; i <= 30; i += 2)
2238 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2239 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2240
2241 /* Write the code into the inferior memory. */
2242 append_insns (&buildaddr, p - buf, buf);
2243
2244 /* Now emit the relocated instruction. */
2245 *adjusted_insn_addr = buildaddr;
70b439f0 2246 target_read_uint32 (tpaddr, &insn);
0badd99f
YQ
2247
2248 insn_data.base.insn_addr = tpaddr;
2249 insn_data.new_addr = buildaddr;
2250 insn_data.insn_ptr = buf;
2251
2252 aarch64_relocate_instruction (insn, &visitor,
2253 (struct aarch64_insn_data *) &insn_data);
2254
bb903df0 2255 /* We may not have been able to relocate the instruction. */
0badd99f 2256 if (insn_data.insn_ptr == buf)
bb903df0
PL
2257 {
2258 sprintf (err,
2259 "E.Could not relocate instruction from %s to %s.",
2260 core_addr_to_string_nz (tpaddr),
2261 core_addr_to_string_nz (buildaddr));
2262 return 1;
2263 }
dfaffe9d 2264 else
0badd99f 2265 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
dfaffe9d 2266 *adjusted_insn_addr_end = buildaddr;
bb903df0
PL
2267
2268 /* Go back to the start of the buffer. */
2269 p = buf;
2270
2271 /* Emit a branch back from the jump pad. */
2272 offset = (tpaddr + orig_size - buildaddr);
2273 if (!can_encode_int32 (offset, 28))
2274 {
2275 sprintf (err,
2276 "E.Jump back from jump pad too far from tracepoint "
2ac09a5b 2277 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2278 offset);
2279 return 1;
2280 }
2281
2282 p += emit_b (p, 0, offset);
2283 append_insns (&buildaddr, p - buf, buf);
2284
2285 /* Give the caller a branch instruction into the jump pad. */
2286 offset = (*jump_entry - tpaddr);
2287 if (!can_encode_int32 (offset, 28))
2288 {
2289 sprintf (err,
2290 "E.Jump pad too far from tracepoint "
2ac09a5b 2291 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2292 offset);
2293 return 1;
2294 }
2295
2296 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2297 *jjump_pad_insn_size = 4;
2298
2299 /* Return the end address of our pad. */
2300 *jump_entry = buildaddr;
2301
2302 return 0;
2303}
2304
afbe19f8
PL
2305/* Helper function writing LEN instructions from START into
2306 current_insn_ptr. */
2307
2308static void
2309emit_ops_insns (const uint32_t *start, int len)
2310{
2311 CORE_ADDR buildaddr = current_insn_ptr;
2312
2313 if (debug_threads)
2314 debug_printf ("Adding %d instrucions at %s\n",
2315 len, paddress (buildaddr));
2316
2317 append_insns (&buildaddr, len, start);
2318 current_insn_ptr = buildaddr;
2319}
2320
2321/* Pop a register from the stack. */
2322
2323static int
2324emit_pop (uint32_t *buf, struct aarch64_register rt)
2325{
2326 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2327}
2328
2329/* Push a register on the stack. */
2330
2331static int
2332emit_push (uint32_t *buf, struct aarch64_register rt)
2333{
2334 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2335}
2336
2337/* Implementation of emit_ops method "emit_prologue". */
2338
2339static void
2340aarch64_emit_prologue (void)
2341{
2342 uint32_t buf[16];
2343 uint32_t *p = buf;
2344
2345 /* This function emit a prologue for the following function prototype:
2346
2347 enum eval_result_type f (unsigned char *regs,
2348 ULONGEST *value);
2349
2350 The first argument is a buffer of raw registers. The second
2351 argument is the result of
2352 evaluating the expression, which will be set to whatever is on top of
2353 the stack at the end.
2354
2355 The stack set up by the prologue is as such:
2356
2357 High *------------------------------------------------------*
2358 | LR |
2359 | FP | <- FP
2360 | x1 (ULONGEST *value) |
2361 | x0 (unsigned char *regs) |
2362 Low *------------------------------------------------------*
2363
2364 As we are implementing a stack machine, each opcode can expand the
2365 stack so we never know how far we are from the data saved by this
2366 prologue. In order to be able refer to value and regs later, we save
2367 the current stack pointer in the frame pointer. This way, it is not
2368 clobbered when calling C functions.
2369
30baf67b 2370 Finally, throughout every operation, we are using register x0 as the
afbe19f8
PL
2371 top of the stack, and x1 as a scratch register. */
2372
2373 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2374 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2375 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2376
2377 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2378
2379
2380 emit_ops_insns (buf, p - buf);
2381}
2382
2383/* Implementation of emit_ops method "emit_epilogue". */
2384
2385static void
2386aarch64_emit_epilogue (void)
2387{
2388 uint32_t buf[16];
2389 uint32_t *p = buf;
2390
2391 /* Store the result of the expression (x0) in *value. */
2392 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2393 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2394 p += emit_str (p, x0, x1, offset_memory_operand (0));
2395
2396 /* Restore the previous state. */
2397 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2398 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2399
2400 /* Return expr_eval_no_error. */
2401 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2402 p += emit_ret (p, lr);
2403
2404 emit_ops_insns (buf, p - buf);
2405}
2406
2407/* Implementation of emit_ops method "emit_add". */
2408
2409static void
2410aarch64_emit_add (void)
2411{
2412 uint32_t buf[16];
2413 uint32_t *p = buf;
2414
2415 p += emit_pop (p, x1);
45e3745e 2416 p += emit_add (p, x0, x1, register_operand (x0));
afbe19f8
PL
2417
2418 emit_ops_insns (buf, p - buf);
2419}
2420
2421/* Implementation of emit_ops method "emit_sub". */
2422
2423static void
2424aarch64_emit_sub (void)
2425{
2426 uint32_t buf[16];
2427 uint32_t *p = buf;
2428
2429 p += emit_pop (p, x1);
45e3745e 2430 p += emit_sub (p, x0, x1, register_operand (x0));
afbe19f8
PL
2431
2432 emit_ops_insns (buf, p - buf);
2433}
2434
2435/* Implementation of emit_ops method "emit_mul". */
2436
2437static void
2438aarch64_emit_mul (void)
2439{
2440 uint32_t buf[16];
2441 uint32_t *p = buf;
2442
2443 p += emit_pop (p, x1);
2444 p += emit_mul (p, x0, x1, x0);
2445
2446 emit_ops_insns (buf, p - buf);
2447}
2448
2449/* Implementation of emit_ops method "emit_lsh". */
2450
2451static void
2452aarch64_emit_lsh (void)
2453{
2454 uint32_t buf[16];
2455 uint32_t *p = buf;
2456
2457 p += emit_pop (p, x1);
2458 p += emit_lslv (p, x0, x1, x0);
2459
2460 emit_ops_insns (buf, p - buf);
2461}
2462
2463/* Implementation of emit_ops method "emit_rsh_signed". */
2464
2465static void
2466aarch64_emit_rsh_signed (void)
2467{
2468 uint32_t buf[16];
2469 uint32_t *p = buf;
2470
2471 p += emit_pop (p, x1);
2472 p += emit_asrv (p, x0, x1, x0);
2473
2474 emit_ops_insns (buf, p - buf);
2475}
2476
2477/* Implementation of emit_ops method "emit_rsh_unsigned". */
2478
2479static void
2480aarch64_emit_rsh_unsigned (void)
2481{
2482 uint32_t buf[16];
2483 uint32_t *p = buf;
2484
2485 p += emit_pop (p, x1);
2486 p += emit_lsrv (p, x0, x1, x0);
2487
2488 emit_ops_insns (buf, p - buf);
2489}
2490
2491/* Implementation of emit_ops method "emit_ext". */
2492
2493static void
2494aarch64_emit_ext (int arg)
2495{
2496 uint32_t buf[16];
2497 uint32_t *p = buf;
2498
2499 p += emit_sbfx (p, x0, x0, 0, arg);
2500
2501 emit_ops_insns (buf, p - buf);
2502}
2503
2504/* Implementation of emit_ops method "emit_log_not". */
2505
2506static void
2507aarch64_emit_log_not (void)
2508{
2509 uint32_t buf[16];
2510 uint32_t *p = buf;
2511
2512 /* If the top of the stack is 0, replace it with 1. Else replace it with
2513 0. */
2514
2515 p += emit_cmp (p, x0, immediate_operand (0));
2516 p += emit_cset (p, x0, EQ);
2517
2518 emit_ops_insns (buf, p - buf);
2519}
2520
2521/* Implementation of emit_ops method "emit_bit_and". */
2522
2523static void
2524aarch64_emit_bit_and (void)
2525{
2526 uint32_t buf[16];
2527 uint32_t *p = buf;
2528
2529 p += emit_pop (p, x1);
2530 p += emit_and (p, x0, x0, x1);
2531
2532 emit_ops_insns (buf, p - buf);
2533}
2534
2535/* Implementation of emit_ops method "emit_bit_or". */
2536
2537static void
2538aarch64_emit_bit_or (void)
2539{
2540 uint32_t buf[16];
2541 uint32_t *p = buf;
2542
2543 p += emit_pop (p, x1);
2544 p += emit_orr (p, x0, x0, x1);
2545
2546 emit_ops_insns (buf, p - buf);
2547}
2548
2549/* Implementation of emit_ops method "emit_bit_xor". */
2550
2551static void
2552aarch64_emit_bit_xor (void)
2553{
2554 uint32_t buf[16];
2555 uint32_t *p = buf;
2556
2557 p += emit_pop (p, x1);
2558 p += emit_eor (p, x0, x0, x1);
2559
2560 emit_ops_insns (buf, p - buf);
2561}
2562
2563/* Implementation of emit_ops method "emit_bit_not". */
2564
2565static void
2566aarch64_emit_bit_not (void)
2567{
2568 uint32_t buf[16];
2569 uint32_t *p = buf;
2570
2571 p += emit_mvn (p, x0, x0);
2572
2573 emit_ops_insns (buf, p - buf);
2574}
2575
2576/* Implementation of emit_ops method "emit_equal". */
2577
2578static void
2579aarch64_emit_equal (void)
2580{
2581 uint32_t buf[16];
2582 uint32_t *p = buf;
2583
2584 p += emit_pop (p, x1);
2585 p += emit_cmp (p, x0, register_operand (x1));
2586 p += emit_cset (p, x0, EQ);
2587
2588 emit_ops_insns (buf, p - buf);
2589}
2590
2591/* Implementation of emit_ops method "emit_less_signed". */
2592
2593static void
2594aarch64_emit_less_signed (void)
2595{
2596 uint32_t buf[16];
2597 uint32_t *p = buf;
2598
2599 p += emit_pop (p, x1);
2600 p += emit_cmp (p, x1, register_operand (x0));
2601 p += emit_cset (p, x0, LT);
2602
2603 emit_ops_insns (buf, p - buf);
2604}
2605
2606/* Implementation of emit_ops method "emit_less_unsigned". */
2607
2608static void
2609aarch64_emit_less_unsigned (void)
2610{
2611 uint32_t buf[16];
2612 uint32_t *p = buf;
2613
2614 p += emit_pop (p, x1);
2615 p += emit_cmp (p, x1, register_operand (x0));
2616 p += emit_cset (p, x0, LO);
2617
2618 emit_ops_insns (buf, p - buf);
2619}
2620
2621/* Implementation of emit_ops method "emit_ref". */
2622
2623static void
2624aarch64_emit_ref (int size)
2625{
2626 uint32_t buf[16];
2627 uint32_t *p = buf;
2628
2629 switch (size)
2630 {
2631 case 1:
2632 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2633 break;
2634 case 2:
2635 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2636 break;
2637 case 4:
2638 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2639 break;
2640 case 8:
2641 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2642 break;
2643 default:
2644 /* Unknown size, bail on compilation. */
2645 emit_error = 1;
2646 break;
2647 }
2648
2649 emit_ops_insns (buf, p - buf);
2650}
2651
2652/* Implementation of emit_ops method "emit_if_goto". */
2653
2654static void
2655aarch64_emit_if_goto (int *offset_p, int *size_p)
2656{
2657 uint32_t buf[16];
2658 uint32_t *p = buf;
2659
2660 /* The Z flag is set or cleared here. */
2661 p += emit_cmp (p, x0, immediate_operand (0));
2662 /* This instruction must not change the Z flag. */
2663 p += emit_pop (p, x0);
2664 /* Branch over the next instruction if x0 == 0. */
2665 p += emit_bcond (p, EQ, 8);
2666
2667 /* The NOP instruction will be patched with an unconditional branch. */
2668 if (offset_p)
2669 *offset_p = (p - buf) * 4;
2670 if (size_p)
2671 *size_p = 4;
2672 p += emit_nop (p);
2673
2674 emit_ops_insns (buf, p - buf);
2675}
2676
2677/* Implementation of emit_ops method "emit_goto". */
2678
2679static void
2680aarch64_emit_goto (int *offset_p, int *size_p)
2681{
2682 uint32_t buf[16];
2683 uint32_t *p = buf;
2684
2685 /* The NOP instruction will be patched with an unconditional branch. */
2686 if (offset_p)
2687 *offset_p = 0;
2688 if (size_p)
2689 *size_p = 4;
2690 p += emit_nop (p);
2691
2692 emit_ops_insns (buf, p - buf);
2693}
2694
2695/* Implementation of emit_ops method "write_goto_address". */
2696
bb1183e2 2697static void
afbe19f8
PL
2698aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2699{
2700 uint32_t insn;
2701
2702 emit_b (&insn, 0, to - from);
2703 append_insns (&from, 1, &insn);
2704}
2705
2706/* Implementation of emit_ops method "emit_const". */
2707
2708static void
2709aarch64_emit_const (LONGEST num)
2710{
2711 uint32_t buf[16];
2712 uint32_t *p = buf;
2713
2714 p += emit_mov_addr (p, x0, num);
2715
2716 emit_ops_insns (buf, p - buf);
2717}
2718
2719/* Implementation of emit_ops method "emit_call". */
2720
2721static void
2722aarch64_emit_call (CORE_ADDR fn)
2723{
2724 uint32_t buf[16];
2725 uint32_t *p = buf;
2726
2727 p += emit_mov_addr (p, ip0, fn);
2728 p += emit_blr (p, ip0);
2729
2730 emit_ops_insns (buf, p - buf);
2731}
2732
2733/* Implementation of emit_ops method "emit_reg". */
2734
2735static void
2736aarch64_emit_reg (int reg)
2737{
2738 uint32_t buf[16];
2739 uint32_t *p = buf;
2740
2741 /* Set x0 to unsigned char *regs. */
2742 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2743 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2744 p += emit_mov (p, x1, immediate_operand (reg));
2745
2746 emit_ops_insns (buf, p - buf);
2747
2748 aarch64_emit_call (get_raw_reg_func_addr ());
2749}
2750
2751/* Implementation of emit_ops method "emit_pop". */
2752
2753static void
2754aarch64_emit_pop (void)
2755{
2756 uint32_t buf[16];
2757 uint32_t *p = buf;
2758
2759 p += emit_pop (p, x0);
2760
2761 emit_ops_insns (buf, p - buf);
2762}
2763
2764/* Implementation of emit_ops method "emit_stack_flush". */
2765
2766static void
2767aarch64_emit_stack_flush (void)
2768{
2769 uint32_t buf[16];
2770 uint32_t *p = buf;
2771
2772 p += emit_push (p, x0);
2773
2774 emit_ops_insns (buf, p - buf);
2775}
2776
2777/* Implementation of emit_ops method "emit_zero_ext". */
2778
2779static void
2780aarch64_emit_zero_ext (int arg)
2781{
2782 uint32_t buf[16];
2783 uint32_t *p = buf;
2784
2785 p += emit_ubfx (p, x0, x0, 0, arg);
2786
2787 emit_ops_insns (buf, p - buf);
2788}
2789
2790/* Implementation of emit_ops method "emit_swap". */
2791
2792static void
2793aarch64_emit_swap (void)
2794{
2795 uint32_t buf[16];
2796 uint32_t *p = buf;
2797
2798 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2799 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2800 p += emit_mov (p, x0, register_operand (x1));
2801
2802 emit_ops_insns (buf, p - buf);
2803}
2804
2805/* Implementation of emit_ops method "emit_stack_adjust". */
2806
2807static void
2808aarch64_emit_stack_adjust (int n)
2809{
2810 /* This is not needed with our design. */
2811 uint32_t buf[16];
2812 uint32_t *p = buf;
2813
2814 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2815
2816 emit_ops_insns (buf, p - buf);
2817}
2818
2819/* Implementation of emit_ops method "emit_int_call_1". */
2820
2821static void
2822aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2823{
2824 uint32_t buf[16];
2825 uint32_t *p = buf;
2826
2827 p += emit_mov (p, x0, immediate_operand (arg1));
2828
2829 emit_ops_insns (buf, p - buf);
2830
2831 aarch64_emit_call (fn);
2832}
2833
2834/* Implementation of emit_ops method "emit_void_call_2". */
2835
2836static void
2837aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2838{
2839 uint32_t buf[16];
2840 uint32_t *p = buf;
2841
2842 /* Push x0 on the stack. */
2843 aarch64_emit_stack_flush ();
2844
2845 /* Setup arguments for the function call:
2846
2847 x0: arg1
2848 x1: top of the stack
2849
2850 MOV x1, x0
2851 MOV x0, #arg1 */
2852
2853 p += emit_mov (p, x1, register_operand (x0));
2854 p += emit_mov (p, x0, immediate_operand (arg1));
2855
2856 emit_ops_insns (buf, p - buf);
2857
2858 aarch64_emit_call (fn);
2859
2860 /* Restore x0. */
2861 aarch64_emit_pop ();
2862}
2863
2864/* Implementation of emit_ops method "emit_eq_goto". */
2865
2866static void
2867aarch64_emit_eq_goto (int *offset_p, int *size_p)
2868{
2869 uint32_t buf[16];
2870 uint32_t *p = buf;
2871
2872 p += emit_pop (p, x1);
2873 p += emit_cmp (p, x1, register_operand (x0));
2874 /* Branch over the next instruction if x0 != x1. */
2875 p += emit_bcond (p, NE, 8);
2876 /* The NOP instruction will be patched with an unconditional branch. */
2877 if (offset_p)
2878 *offset_p = (p - buf) * 4;
2879 if (size_p)
2880 *size_p = 4;
2881 p += emit_nop (p);
2882
2883 emit_ops_insns (buf, p - buf);
2884}
2885
2886/* Implementation of emit_ops method "emit_ne_goto". */
2887
2888static void
2889aarch64_emit_ne_goto (int *offset_p, int *size_p)
2890{
2891 uint32_t buf[16];
2892 uint32_t *p = buf;
2893
2894 p += emit_pop (p, x1);
2895 p += emit_cmp (p, x1, register_operand (x0));
2896 /* Branch over the next instruction if x0 == x1. */
2897 p += emit_bcond (p, EQ, 8);
2898 /* The NOP instruction will be patched with an unconditional branch. */
2899 if (offset_p)
2900 *offset_p = (p - buf) * 4;
2901 if (size_p)
2902 *size_p = 4;
2903 p += emit_nop (p);
2904
2905 emit_ops_insns (buf, p - buf);
2906}
2907
2908/* Implementation of emit_ops method "emit_lt_goto". */
2909
2910static void
2911aarch64_emit_lt_goto (int *offset_p, int *size_p)
2912{
2913 uint32_t buf[16];
2914 uint32_t *p = buf;
2915
2916 p += emit_pop (p, x1);
2917 p += emit_cmp (p, x1, register_operand (x0));
2918 /* Branch over the next instruction if x0 >= x1. */
2919 p += emit_bcond (p, GE, 8);
2920 /* The NOP instruction will be patched with an unconditional branch. */
2921 if (offset_p)
2922 *offset_p = (p - buf) * 4;
2923 if (size_p)
2924 *size_p = 4;
2925 p += emit_nop (p);
2926
2927 emit_ops_insns (buf, p - buf);
2928}
2929
2930/* Implementation of emit_ops method "emit_le_goto". */
2931
2932static void
2933aarch64_emit_le_goto (int *offset_p, int *size_p)
2934{
2935 uint32_t buf[16];
2936 uint32_t *p = buf;
2937
2938 p += emit_pop (p, x1);
2939 p += emit_cmp (p, x1, register_operand (x0));
2940 /* Branch over the next instruction if x0 > x1. */
2941 p += emit_bcond (p, GT, 8);
2942 /* The NOP instruction will be patched with an unconditional branch. */
2943 if (offset_p)
2944 *offset_p = (p - buf) * 4;
2945 if (size_p)
2946 *size_p = 4;
2947 p += emit_nop (p);
2948
2949 emit_ops_insns (buf, p - buf);
2950}
2951
2952/* Implementation of emit_ops method "emit_gt_goto". */
2953
2954static void
2955aarch64_emit_gt_goto (int *offset_p, int *size_p)
2956{
2957 uint32_t buf[16];
2958 uint32_t *p = buf;
2959
2960 p += emit_pop (p, x1);
2961 p += emit_cmp (p, x1, register_operand (x0));
2962 /* Branch over the next instruction if x0 <= x1. */
2963 p += emit_bcond (p, LE, 8);
2964 /* The NOP instruction will be patched with an unconditional branch. */
2965 if (offset_p)
2966 *offset_p = (p - buf) * 4;
2967 if (size_p)
2968 *size_p = 4;
2969 p += emit_nop (p);
2970
2971 emit_ops_insns (buf, p - buf);
2972}
2973
2974/* Implementation of emit_ops method "emit_ge_got". */
2975
2976static void
2977aarch64_emit_ge_got (int *offset_p, int *size_p)
2978{
2979 uint32_t buf[16];
2980 uint32_t *p = buf;
2981
2982 p += emit_pop (p, x1);
2983 p += emit_cmp (p, x1, register_operand (x0));
2984 /* Branch over the next instruction if x0 <= x1. */
2985 p += emit_bcond (p, LT, 8);
2986 /* The NOP instruction will be patched with an unconditional branch. */
2987 if (offset_p)
2988 *offset_p = (p - buf) * 4;
2989 if (size_p)
2990 *size_p = 4;
2991 p += emit_nop (p);
2992
2993 emit_ops_insns (buf, p - buf);
2994}
2995
2996static struct emit_ops aarch64_emit_ops_impl =
2997{
2998 aarch64_emit_prologue,
2999 aarch64_emit_epilogue,
3000 aarch64_emit_add,
3001 aarch64_emit_sub,
3002 aarch64_emit_mul,
3003 aarch64_emit_lsh,
3004 aarch64_emit_rsh_signed,
3005 aarch64_emit_rsh_unsigned,
3006 aarch64_emit_ext,
3007 aarch64_emit_log_not,
3008 aarch64_emit_bit_and,
3009 aarch64_emit_bit_or,
3010 aarch64_emit_bit_xor,
3011 aarch64_emit_bit_not,
3012 aarch64_emit_equal,
3013 aarch64_emit_less_signed,
3014 aarch64_emit_less_unsigned,
3015 aarch64_emit_ref,
3016 aarch64_emit_if_goto,
3017 aarch64_emit_goto,
3018 aarch64_write_goto_address,
3019 aarch64_emit_const,
3020 aarch64_emit_call,
3021 aarch64_emit_reg,
3022 aarch64_emit_pop,
3023 aarch64_emit_stack_flush,
3024 aarch64_emit_zero_ext,
3025 aarch64_emit_swap,
3026 aarch64_emit_stack_adjust,
3027 aarch64_emit_int_call_1,
3028 aarch64_emit_void_call_2,
3029 aarch64_emit_eq_goto,
3030 aarch64_emit_ne_goto,
3031 aarch64_emit_lt_goto,
3032 aarch64_emit_le_goto,
3033 aarch64_emit_gt_goto,
3034 aarch64_emit_ge_got,
3035};
3036
3037/* Implementation of linux_target_ops method "emit_ops". */
3038
3039static struct emit_ops *
3040aarch64_emit_ops (void)
3041{
3042 return &aarch64_emit_ops_impl;
3043}
3044
bb903df0
PL
3045/* Implementation of linux_target_ops method
3046 "get_min_fast_tracepoint_insn_len". */
3047
3048static int
3049aarch64_get_min_fast_tracepoint_insn_len (void)
3050{
3051 return 4;
3052}
3053
d1d0aea1
PL
3054/* Implementation of linux_target_ops method "supports_range_stepping". */
3055
3056static int
3057aarch64_supports_range_stepping (void)
3058{
3059 return 1;
3060}
3061
3ca4edb6 3062/* Implementation of target ops method "sw_breakpoint_from_kind". */
dd373349 3063
3ca4edb6
TBA
3064const gdb_byte *
3065aarch64_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349 3066{
17b1509a
YQ
3067 if (is_64bit_tdesc ())
3068 {
3069 *size = aarch64_breakpoint_len;
3070 return aarch64_breakpoint;
3071 }
3072 else
3073 return arm_sw_breakpoint_from_kind (kind, size);
3074}
3075
06250e4e 3076/* Implementation of target ops method "breakpoint_kind_from_pc". */
17b1509a 3077
06250e4e
TBA
3078int
3079aarch64_target::breakpoint_kind_from_pc (CORE_ADDR *pcptr)
17b1509a
YQ
3080{
3081 if (is_64bit_tdesc ())
3082 return aarch64_breakpoint_len;
3083 else
3084 return arm_breakpoint_kind_from_pc (pcptr);
3085}
3086
06250e4e 3087/* Implementation of the target ops method
17b1509a
YQ
3088 "breakpoint_kind_from_current_state". */
3089
06250e4e
TBA
3090int
3091aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
17b1509a
YQ
3092{
3093 if (is_64bit_tdesc ())
3094 return aarch64_breakpoint_len;
3095 else
3096 return arm_breakpoint_kind_from_current_state (pcptr);
dd373349
AT
3097}
3098
7d00775e
AT
3099/* Support for hardware single step. */
3100
3101static int
3102aarch64_supports_hardware_single_step (void)
3103{
3104 return 1;
3105}
3106
176eb98c
MS
3107struct linux_target_ops the_low_target =
3108{
176eb98c
MS
3109 aarch64_insert_point,
3110 aarch64_remove_point,
3111 aarch64_stopped_by_watchpoint,
3112 aarch64_stopped_data_address,
421530db
PL
3113 NULL, /* collect_ptrace_register */
3114 NULL, /* supply_ptrace_register */
ade90bde 3115 aarch64_linux_siginfo_fixup,
176eb98c 3116 aarch64_linux_new_process,
04ec7890 3117 aarch64_linux_delete_process,
176eb98c 3118 aarch64_linux_new_thread,
466eecee 3119 aarch64_linux_delete_thread,
3a8a0396 3120 aarch64_linux_new_fork,
176eb98c 3121 aarch64_linux_prepare_to_resume,
421530db 3122 NULL, /* process_qsupported */
7671bf47 3123 aarch64_supports_tracepoints,
bb903df0
PL
3124 aarch64_get_thread_area,
3125 aarch64_install_fast_tracepoint_jump_pad,
afbe19f8 3126 aarch64_emit_ops,
bb903df0 3127 aarch64_get_min_fast_tracepoint_insn_len,
d1d0aea1 3128 aarch64_supports_range_stepping,
7d00775e 3129 aarch64_supports_hardware_single_step,
061fc021 3130 aarch64_get_syscall_trapinfo,
176eb98c 3131};
3aee8918 3132
ef0478f6
TBA
3133/* The linux target ops object. */
3134
3135linux_process_target *the_linux_target = &the_aarch64_target;
3136
3aee8918
PA
3137void
3138initialize_low_arch (void)
3139{
3b53ae99
YQ
3140 initialize_low_arch_aarch32 ();
3141
3aee8918 3142 initialize_regsets_info (&aarch64_regsets_info);
02895270 3143 initialize_regsets_info (&aarch64_sve_regsets_info);
3aee8918 3144}
This page took 0.883849 seconds and 4 git commands to generate.