gdbserver/linux-low: turn fast tracepoint ops into methods
[deliverable/binutils-gdb.git] / gdbserver / linux-aarch64-low.cc
CommitLineData
176eb98c
MS
1/* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
b811d2c2 4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
176eb98c
MS
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "server.h"
23#include "linux-low.h"
db3cb7cb 24#include "nat/aarch64-linux.h"
554717a3 25#include "nat/aarch64-linux-hw-point.h"
bb903df0 26#include "arch/aarch64-insn.h"
3b53ae99 27#include "linux-aarch32-low.h"
176eb98c 28#include "elf/common.h"
afbe19f8
PL
29#include "ax.h"
30#include "tracepoint.h"
f9d949fb 31#include "debug.h"
176eb98c
MS
32
33#include <signal.h>
34#include <sys/user.h>
5826e159 35#include "nat/gdb_ptrace.h"
e9dae05e 36#include <asm/ptrace.h>
bb903df0
PL
37#include <inttypes.h>
38#include <endian.h>
39#include <sys/uio.h>
176eb98c
MS
40
41#include "gdb_proc_service.h"
cc628f3d 42#include "arch/aarch64.h"
7cc17433 43#include "linux-aarch32-tdesc.h"
d6d7ce56 44#include "linux-aarch64-tdesc.h"
fefa175e 45#include "nat/aarch64-sve-linux-ptrace.h"
02895270 46#include "tdesc.h"
176eb98c 47
176eb98c
MS
48#ifdef HAVE_SYS_REG_H
49#include <sys/reg.h>
50#endif
51
ef0478f6
TBA
52/* Linux target op definitions for the AArch64 architecture. */
53
54class aarch64_target : public linux_process_target
55{
56public:
57
aa8d21c9
TBA
58 const regs_info *get_regs_info () override;
59
06250e4e
TBA
60 int breakpoint_kind_from_pc (CORE_ADDR *pcptr) override;
61
62 int breakpoint_kind_from_current_state (CORE_ADDR *pcptr) override;
63
3ca4edb6
TBA
64 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
65
007c9b97
TBA
66 bool supports_z_point_type (char z_type) override;
67
47f70aa7
TBA
68 bool supports_tracepoints () override;
69
809a0c35
TBA
70 bool supports_fast_tracepoints () override;
71
72 int install_fast_tracepoint_jump_pad
73 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
74 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
75 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
76 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
77 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
78 char *err) override;
79
80 int get_min_fast_tracepoint_insn_len () override;
81
797bcff5
TBA
82protected:
83
84 void low_arch_setup () override;
daca57a7
TBA
85
86 bool low_cannot_fetch_register (int regno) override;
87
88 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
89
90 bool low_supports_breakpoints () override;
91
92 CORE_ADDR low_get_pc (regcache *regcache) override;
93
94 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
d7146cda
TBA
95
96 bool low_breakpoint_at (CORE_ADDR pc) override;
9db9aa23
TBA
97
98 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
99 int size, raw_breakpoint *bp) override;
100
101 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
102 int size, raw_breakpoint *bp) override;
ac1bbaca
TBA
103
104 bool low_stopped_by_watchpoint () override;
105
106 CORE_ADDR low_stopped_data_address () override;
cb63de7c
TBA
107
108 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
109 int direction) override;
fd000fb3
TBA
110
111 arch_process_info *low_new_process () override;
112
113 void low_delete_process (arch_process_info *info) override;
114
115 void low_new_thread (lwp_info *) override;
116
117 void low_delete_thread (arch_lwp_info *) override;
118
119 void low_new_fork (process_info *parent, process_info *child) override;
d7599cc0
TBA
120
121 void low_prepare_to_resume (lwp_info *lwp) override;
13e567af
TBA
122
123 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
ef0478f6
TBA
124};
125
126/* The singleton target ops object. */
127
128static aarch64_target the_aarch64_target;
129
daca57a7
TBA
130bool
131aarch64_target::low_cannot_fetch_register (int regno)
132{
133 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
134 "is not implemented by the target");
135}
136
137bool
138aarch64_target::low_cannot_store_register (int regno)
139{
140 gdb_assert_not_reached ("linux target op low_cannot_store_register "
141 "is not implemented by the target");
142}
143
d7599cc0
TBA
144void
145aarch64_target::low_prepare_to_resume (lwp_info *lwp)
146{
147 aarch64_linux_prepare_to_resume (lwp);
148}
149
176eb98c
MS
150/* Per-process arch-specific data we want to keep. */
151
152struct arch_process_info
153{
154 /* Hardware breakpoint/watchpoint data.
155 The reason for them to be per-process rather than per-thread is
156 due to the lack of information in the gdbserver environment;
157 gdbserver is not told that whether a requested hardware
158 breakpoint/watchpoint is thread specific or not, so it has to set
159 each hw bp/wp for every thread in the current process. The
160 higher level bp/wp management in gdb will resume a thread if a hw
161 bp/wp trap is not expected for it. Since the hw bp/wp setting is
162 same for each thread, it is reasonable for the data to live here.
163 */
164 struct aarch64_debug_reg_state debug_reg_state;
165};
166
3b53ae99
YQ
167/* Return true if the size of register 0 is 8 byte. */
168
169static int
170is_64bit_tdesc (void)
171{
172 struct regcache *regcache = get_thread_regcache (current_thread, 0);
173
174 return register_size (regcache->tdesc, 0) == 8;
175}
176
02895270
AH
177/* Return true if the regcache contains the number of SVE registers. */
178
179static bool
180is_sve_tdesc (void)
181{
182 struct regcache *regcache = get_thread_regcache (current_thread, 0);
183
6cdd651f 184 return tdesc_contains_feature (regcache->tdesc, "org.gnu.gdb.aarch64.sve");
02895270
AH
185}
186
176eb98c
MS
187static void
188aarch64_fill_gregset (struct regcache *regcache, void *buf)
189{
6a69a054 190 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
176eb98c
MS
191 int i;
192
193 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
194 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
195 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
196 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
197 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
198}
199
200static void
201aarch64_store_gregset (struct regcache *regcache, const void *buf)
202{
6a69a054 203 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
176eb98c
MS
204 int i;
205
206 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
207 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
208 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
209 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
210 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
211}
212
213static void
214aarch64_fill_fpregset (struct regcache *regcache, void *buf)
215{
9caa3311 216 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
176eb98c
MS
217 int i;
218
219 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
220 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
221 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
222 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
223}
224
225static void
226aarch64_store_fpregset (struct regcache *regcache, const void *buf)
227{
9caa3311
YQ
228 const struct user_fpsimd_state *regset
229 = (const struct user_fpsimd_state *) buf;
176eb98c
MS
230 int i;
231
232 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
233 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
234 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
235 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
236}
237
1ef53e6b
AH
238/* Store the pauth registers to regcache. */
239
240static void
241aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
242{
243 uint64_t *pauth_regset = (uint64_t *) buf;
244 int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
245
246 if (pauth_base == 0)
247 return;
248
249 supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
250 &pauth_regset[0]);
251 supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
252 &pauth_regset[1]);
253}
254
bf9ae9d8
TBA
255bool
256aarch64_target::low_supports_breakpoints ()
257{
258 return true;
259}
260
261/* Implementation of linux target ops method "low_get_pc". */
421530db 262
bf9ae9d8
TBA
263CORE_ADDR
264aarch64_target::low_get_pc (regcache *regcache)
176eb98c 265{
8a7e4587 266 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 267 return linux_get_pc_64bit (regcache);
8a7e4587 268 else
a5652c21 269 return linux_get_pc_32bit (regcache);
176eb98c
MS
270}
271
bf9ae9d8 272/* Implementation of linux target ops method "low_set_pc". */
421530db 273
bf9ae9d8
TBA
274void
275aarch64_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
176eb98c 276{
8a7e4587 277 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 278 linux_set_pc_64bit (regcache, pc);
8a7e4587 279 else
a5652c21 280 linux_set_pc_32bit (regcache, pc);
176eb98c
MS
281}
282
176eb98c
MS
283#define aarch64_breakpoint_len 4
284
37d66942
PL
285/* AArch64 BRK software debug mode instruction.
286 This instruction needs to match gdb/aarch64-tdep.c
287 (aarch64_default_breakpoint). */
288static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
176eb98c 289
d7146cda 290/* Implementation of linux target ops method "low_breakpoint_at". */
421530db 291
d7146cda
TBA
292bool
293aarch64_target::low_breakpoint_at (CORE_ADDR where)
176eb98c 294{
db91f502
YQ
295 if (is_64bit_tdesc ())
296 {
297 gdb_byte insn[aarch64_breakpoint_len];
176eb98c 298
d7146cda 299 read_memory (where, (unsigned char *) &insn, aarch64_breakpoint_len);
db91f502 300 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
d7146cda 301 return true;
176eb98c 302
d7146cda 303 return false;
db91f502
YQ
304 }
305 else
306 return arm_breakpoint_at (where);
176eb98c
MS
307}
308
176eb98c
MS
309static void
310aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
311{
312 int i;
313
314 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
315 {
316 state->dr_addr_bp[i] = 0;
317 state->dr_ctrl_bp[i] = 0;
318 state->dr_ref_count_bp[i] = 0;
319 }
320
321 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
322 {
323 state->dr_addr_wp[i] = 0;
324 state->dr_ctrl_wp[i] = 0;
325 state->dr_ref_count_wp[i] = 0;
326 }
327}
328
176eb98c
MS
329/* Return the pointer to the debug register state structure in the
330 current process' arch-specific data area. */
331
db3cb7cb 332struct aarch64_debug_reg_state *
88e2cf7e 333aarch64_get_debug_reg_state (pid_t pid)
176eb98c 334{
88e2cf7e 335 struct process_info *proc = find_process_pid (pid);
176eb98c 336
fe978cb0 337 return &proc->priv->arch_private->debug_reg_state;
176eb98c
MS
338}
339
007c9b97 340/* Implementation of target ops method "supports_z_point_type". */
421530db 341
007c9b97
TBA
342bool
343aarch64_target::supports_z_point_type (char z_type)
4ff0d3d8
PA
344{
345 switch (z_type)
346 {
96c97461 347 case Z_PACKET_SW_BP:
4ff0d3d8
PA
348 case Z_PACKET_HW_BP:
349 case Z_PACKET_WRITE_WP:
350 case Z_PACKET_READ_WP:
351 case Z_PACKET_ACCESS_WP:
007c9b97 352 return true;
4ff0d3d8 353 default:
007c9b97 354 return false;
4ff0d3d8
PA
355 }
356}
357
9db9aa23 358/* Implementation of linux target ops method "low_insert_point".
176eb98c 359
421530db
PL
360 It actually only records the info of the to-be-inserted bp/wp;
361 the actual insertion will happen when threads are resumed. */
176eb98c 362
9db9aa23
TBA
363int
364aarch64_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
365 int len, raw_breakpoint *bp)
176eb98c
MS
366{
367 int ret;
4ff0d3d8 368 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
369 struct aarch64_debug_reg_state *state
370 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 371
c5e92cca 372 if (show_debug_regs)
176eb98c
MS
373 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
374 (unsigned long) addr, len);
375
802e8e6d
PA
376 /* Determine the type from the raw breakpoint type. */
377 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
378
379 if (targ_type != hw_execute)
39edd165
YQ
380 {
381 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
382 ret = aarch64_handle_watchpoint (targ_type, addr, len,
383 1 /* is_insert */, state);
384 else
385 ret = -1;
386 }
176eb98c 387 else
8d689ee5
YQ
388 {
389 if (len == 3)
390 {
391 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
392 instruction. Set it to 2 to correctly encode length bit
393 mask in hardware/watchpoint control register. */
394 len = 2;
395 }
396 ret = aarch64_handle_breakpoint (targ_type, addr, len,
397 1 /* is_insert */, state);
398 }
176eb98c 399
60a191ed 400 if (show_debug_regs)
88e2cf7e
YQ
401 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
402 targ_type);
176eb98c
MS
403
404 return ret;
405}
406
9db9aa23 407/* Implementation of linux target ops method "low_remove_point".
176eb98c 408
421530db
PL
409 It actually only records the info of the to-be-removed bp/wp,
410 the actual removal will be done when threads are resumed. */
176eb98c 411
9db9aa23
TBA
412int
413aarch64_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
414 int len, raw_breakpoint *bp)
176eb98c
MS
415{
416 int ret;
4ff0d3d8 417 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
418 struct aarch64_debug_reg_state *state
419 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 420
c5e92cca 421 if (show_debug_regs)
176eb98c
MS
422 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
423 (unsigned long) addr, len);
424
802e8e6d
PA
425 /* Determine the type from the raw breakpoint type. */
426 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
427
428 /* Set up state pointers. */
429 if (targ_type != hw_execute)
430 ret =
c67ca4de
YQ
431 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
432 state);
176eb98c 433 else
8d689ee5
YQ
434 {
435 if (len == 3)
436 {
437 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
438 instruction. Set it to 2 to correctly encode length bit
439 mask in hardware/watchpoint control register. */
440 len = 2;
441 }
442 ret = aarch64_handle_breakpoint (targ_type, addr, len,
443 0 /* is_insert */, state);
444 }
176eb98c 445
60a191ed 446 if (show_debug_regs)
88e2cf7e
YQ
447 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
448 targ_type);
176eb98c
MS
449
450 return ret;
451}
452
ac1bbaca 453/* Implementation of linux target ops method "low_stopped_data_address". */
176eb98c 454
ac1bbaca
TBA
455CORE_ADDR
456aarch64_target::low_stopped_data_address ()
176eb98c
MS
457{
458 siginfo_t siginfo;
459 int pid, i;
460 struct aarch64_debug_reg_state *state;
461
0bfdf32f 462 pid = lwpid_of (current_thread);
176eb98c
MS
463
464 /* Get the siginfo. */
465 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
466 return (CORE_ADDR) 0;
467
468 /* Need to be a hardware breakpoint/watchpoint trap. */
469 if (siginfo.si_signo != SIGTRAP
470 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
471 return (CORE_ADDR) 0;
472
473 /* Check if the address matches any watched address. */
88e2cf7e 474 state = aarch64_get_debug_reg_state (pid_of (current_thread));
176eb98c
MS
475 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
476 {
a3b60e45
JK
477 const unsigned int offset
478 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
176eb98c
MS
479 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
480 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
a3b60e45
JK
481 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
482 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
483 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
484
176eb98c
MS
485 if (state->dr_ref_count_wp[i]
486 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
a3b60e45 487 && addr_trap >= addr_watch_aligned
176eb98c 488 && addr_trap < addr_watch + len)
a3b60e45
JK
489 {
490 /* ADDR_TRAP reports the first address of the memory range
491 accessed by the CPU, regardless of what was the memory
492 range watched. Thus, a large CPU access that straddles
493 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
494 ADDR_TRAP that is lower than the
495 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
496
497 addr: | 4 | 5 | 6 | 7 | 8 |
498 |---- range watched ----|
499 |----------- range accessed ------------|
500
501 In this case, ADDR_TRAP will be 4.
502
503 To match a watchpoint known to GDB core, we must never
504 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
505 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
506 positive on kernels older than 4.10. See PR
507 external/20207. */
508 return addr_orig;
509 }
176eb98c
MS
510 }
511
512 return (CORE_ADDR) 0;
513}
514
ac1bbaca 515/* Implementation of linux target ops method "low_stopped_by_watchpoint". */
176eb98c 516
ac1bbaca
TBA
517bool
518aarch64_target::low_stopped_by_watchpoint ()
176eb98c 519{
ac1bbaca 520 return (low_stopped_data_address () != 0);
176eb98c
MS
521}
522
523/* Fetch the thread-local storage pointer for libthread_db. */
524
525ps_err_e
754653a7 526ps_get_thread_area (struct ps_prochandle *ph,
176eb98c
MS
527 lwpid_t lwpid, int idx, void **base)
528{
a0cc84cd
YQ
529 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
530 is_64bit_tdesc ());
176eb98c
MS
531}
532
cb63de7c 533/* Implementation of linux target ops method "low_siginfo_fixup". */
ade90bde 534
cb63de7c
TBA
535bool
536aarch64_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
537 int direction)
ade90bde
YQ
538{
539 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
540 if (!is_64bit_tdesc ())
541 {
542 if (direction == 0)
543 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
544 native);
545 else
546 aarch64_siginfo_from_compat_siginfo (native,
547 (struct compat_siginfo *) inf);
548
cb63de7c 549 return true;
ade90bde
YQ
550 }
551
cb63de7c 552 return false;
ade90bde
YQ
553}
554
fd000fb3 555/* Implementation of linux target ops method "low_new_process". */
176eb98c 556
fd000fb3
TBA
557arch_process_info *
558aarch64_target::low_new_process ()
176eb98c 559{
8d749320 560 struct arch_process_info *info = XCNEW (struct arch_process_info);
176eb98c
MS
561
562 aarch64_init_debug_reg_state (&info->debug_reg_state);
563
564 return info;
565}
566
fd000fb3 567/* Implementation of linux target ops method "low_delete_process". */
04ec7890 568
fd000fb3
TBA
569void
570aarch64_target::low_delete_process (arch_process_info *info)
04ec7890
SM
571{
572 xfree (info);
573}
574
fd000fb3
TBA
575void
576aarch64_target::low_new_thread (lwp_info *lwp)
577{
578 aarch64_linux_new_thread (lwp);
579}
421530db 580
fd000fb3
TBA
581void
582aarch64_target::low_delete_thread (arch_lwp_info *arch_lwp)
583{
584 aarch64_linux_delete_thread (arch_lwp);
585}
586
587/* Implementation of linux target ops method "low_new_fork". */
588
589void
590aarch64_target::low_new_fork (process_info *parent,
591 process_info *child)
3a8a0396
DB
592{
593 /* These are allocated by linux_add_process. */
61a7418c
DB
594 gdb_assert (parent->priv != NULL
595 && parent->priv->arch_private != NULL);
596 gdb_assert (child->priv != NULL
597 && child->priv->arch_private != NULL);
3a8a0396
DB
598
599 /* Linux kernel before 2.6.33 commit
600 72f674d203cd230426437cdcf7dd6f681dad8b0d
601 will inherit hardware debug registers from parent
602 on fork/vfork/clone. Newer Linux kernels create such tasks with
603 zeroed debug registers.
604
605 GDB core assumes the child inherits the watchpoints/hw
606 breakpoints of the parent, and will remove them all from the
607 forked off process. Copy the debug registers mirrors into the
608 new process so that all breakpoints and watchpoints can be
609 removed together. The debug registers mirror will become zeroed
610 in the end before detaching the forked off process, thus making
611 this compatible with older Linux kernels too. */
612
61a7418c 613 *child->priv->arch_private = *parent->priv->arch_private;
3a8a0396
DB
614}
615
ee4fbcfa
AH
616/* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
617#define AARCH64_HWCAP_PACA (1 << 30)
618
797bcff5 619/* Implementation of linux target ops method "low_arch_setup". */
3b53ae99 620
797bcff5
TBA
621void
622aarch64_target::low_arch_setup ()
3b53ae99
YQ
623{
624 unsigned int machine;
625 int is_elf64;
626 int tid;
627
628 tid = lwpid_of (current_thread);
629
630 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
631
632 if (is_elf64)
fefa175e
AH
633 {
634 uint64_t vq = aarch64_sve_get_vq (tid);
974c89e0
AH
635 unsigned long hwcap = linux_get_hwcap (8);
636 bool pauth_p = hwcap & AARCH64_HWCAP_PACA;
ee4fbcfa
AH
637
638 current_process ()->tdesc = aarch64_linux_read_description (vq, pauth_p);
fefa175e 639 }
3b53ae99 640 else
7cc17433 641 current_process ()->tdesc = aarch32_linux_read_description ();
176eb98c 642
af1b22f3 643 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
176eb98c
MS
644}
645
02895270
AH
646/* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
647
648static void
649aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
650{
651 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
652}
653
654/* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
655
656static void
657aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
658{
659 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
660}
661
3aee8918 662static struct regset_info aarch64_regsets[] =
176eb98c
MS
663{
664 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
665 sizeof (struct user_pt_regs), GENERAL_REGS,
666 aarch64_fill_gregset, aarch64_store_gregset },
667 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
668 sizeof (struct user_fpsimd_state), FP_REGS,
669 aarch64_fill_fpregset, aarch64_store_fpregset
670 },
1ef53e6b
AH
671 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
672 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
673 NULL, aarch64_store_pauthregset },
50bc912a 674 NULL_REGSET
176eb98c
MS
675};
676
3aee8918
PA
677static struct regsets_info aarch64_regsets_info =
678 {
679 aarch64_regsets, /* regsets */
680 0, /* num_regsets */
681 NULL, /* disabled_regsets */
682 };
683
3b53ae99 684static struct regs_info regs_info_aarch64 =
3aee8918
PA
685 {
686 NULL, /* regset_bitmap */
c2d65f38 687 NULL, /* usrregs */
3aee8918
PA
688 &aarch64_regsets_info,
689 };
690
02895270
AH
691static struct regset_info aarch64_sve_regsets[] =
692{
693 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
694 sizeof (struct user_pt_regs), GENERAL_REGS,
695 aarch64_fill_gregset, aarch64_store_gregset },
696 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
697 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
698 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
699 },
1ef53e6b
AH
700 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
701 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
702 NULL, aarch64_store_pauthregset },
02895270
AH
703 NULL_REGSET
704};
705
706static struct regsets_info aarch64_sve_regsets_info =
707 {
708 aarch64_sve_regsets, /* regsets. */
709 0, /* num_regsets. */
710 NULL, /* disabled_regsets. */
711 };
712
713static struct regs_info regs_info_aarch64_sve =
714 {
715 NULL, /* regset_bitmap. */
716 NULL, /* usrregs. */
717 &aarch64_sve_regsets_info,
718 };
719
aa8d21c9 720/* Implementation of linux target ops method "get_regs_info". */
421530db 721
aa8d21c9
TBA
722const regs_info *
723aarch64_target::get_regs_info ()
3aee8918 724{
02895270 725 if (!is_64bit_tdesc ())
3b53ae99 726 return &regs_info_aarch32;
02895270
AH
727
728 if (is_sve_tdesc ())
729 return &regs_info_aarch64_sve;
730
731 return &regs_info_aarch64;
3aee8918
PA
732}
733
47f70aa7 734/* Implementation of target ops method "supports_tracepoints". */
7671bf47 735
47f70aa7
TBA
736bool
737aarch64_target::supports_tracepoints ()
7671bf47 738{
524b57e6 739 if (current_thread == NULL)
47f70aa7 740 return true;
524b57e6
YQ
741 else
742 {
743 /* We don't support tracepoints on aarch32 now. */
744 return is_64bit_tdesc ();
745 }
7671bf47
PL
746}
747
13e567af 748/* Implementation of linux target ops method "low_get_thread_area". */
bb903df0 749
13e567af
TBA
750int
751aarch64_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
bb903df0
PL
752{
753 struct iovec iovec;
754 uint64_t reg;
755
756 iovec.iov_base = &reg;
757 iovec.iov_len = sizeof (reg);
758
759 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
760 return -1;
761
762 *addrp = reg;
763
764 return 0;
765}
766
061fc021
YQ
767/* Implementation of linux_target_ops method "get_syscall_trapinfo". */
768
769static void
770aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
771{
772 int use_64bit = register_size (regcache->tdesc, 0) == 8;
773
774 if (use_64bit)
775 {
776 long l_sysno;
777
778 collect_register_by_name (regcache, "x8", &l_sysno);
779 *sysno = (int) l_sysno;
780 }
781 else
782 collect_register_by_name (regcache, "r7", sysno);
783}
784
afbe19f8
PL
785/* List of condition codes that we need. */
786
787enum aarch64_condition_codes
788{
789 EQ = 0x0,
790 NE = 0x1,
791 LO = 0x3,
792 GE = 0xa,
793 LT = 0xb,
794 GT = 0xc,
795 LE = 0xd,
bb903df0
PL
796};
797
6c1c9a8b
YQ
798enum aarch64_operand_type
799{
800 OPERAND_IMMEDIATE,
801 OPERAND_REGISTER,
802};
803
bb903df0
PL
804/* Representation of an operand. At this time, it only supports register
805 and immediate types. */
806
807struct aarch64_operand
808{
809 /* Type of the operand. */
6c1c9a8b
YQ
810 enum aarch64_operand_type type;
811
bb903df0
PL
812 /* Value of the operand according to the type. */
813 union
814 {
815 uint32_t imm;
816 struct aarch64_register reg;
817 };
818};
819
820/* List of registers that we are currently using, we can add more here as
821 we need to use them. */
822
823/* General purpose scratch registers (64 bit). */
824static const struct aarch64_register x0 = { 0, 1 };
825static const struct aarch64_register x1 = { 1, 1 };
826static const struct aarch64_register x2 = { 2, 1 };
827static const struct aarch64_register x3 = { 3, 1 };
828static const struct aarch64_register x4 = { 4, 1 };
829
830/* General purpose scratch registers (32 bit). */
afbe19f8 831static const struct aarch64_register w0 = { 0, 0 };
bb903df0
PL
832static const struct aarch64_register w2 = { 2, 0 };
833
834/* Intra-procedure scratch registers. */
835static const struct aarch64_register ip0 = { 16, 1 };
836
837/* Special purpose registers. */
afbe19f8
PL
838static const struct aarch64_register fp = { 29, 1 };
839static const struct aarch64_register lr = { 30, 1 };
bb903df0
PL
840static const struct aarch64_register sp = { 31, 1 };
841static const struct aarch64_register xzr = { 31, 1 };
842
843/* Dynamically allocate a new register. If we know the register
844 statically, we should make it a global as above instead of using this
845 helper function. */
846
847static struct aarch64_register
848aarch64_register (unsigned num, int is64)
849{
850 return (struct aarch64_register) { num, is64 };
851}
852
853/* Helper function to create a register operand, for instructions with
854 different types of operands.
855
856 For example:
857 p += emit_mov (p, x0, register_operand (x1)); */
858
859static struct aarch64_operand
860register_operand (struct aarch64_register reg)
861{
862 struct aarch64_operand operand;
863
864 operand.type = OPERAND_REGISTER;
865 operand.reg = reg;
866
867 return operand;
868}
869
870/* Helper function to create an immediate operand, for instructions with
871 different types of operands.
872
873 For example:
874 p += emit_mov (p, x0, immediate_operand (12)); */
875
876static struct aarch64_operand
877immediate_operand (uint32_t imm)
878{
879 struct aarch64_operand operand;
880
881 operand.type = OPERAND_IMMEDIATE;
882 operand.imm = imm;
883
884 return operand;
885}
886
bb903df0
PL
887/* Helper function to create an offset memory operand.
888
889 For example:
890 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
891
892static struct aarch64_memory_operand
893offset_memory_operand (int32_t offset)
894{
895 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
896}
897
898/* Helper function to create a pre-index memory operand.
899
900 For example:
901 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
902
903static struct aarch64_memory_operand
904preindex_memory_operand (int32_t index)
905{
906 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
907}
908
afbe19f8
PL
909/* Helper function to create a post-index memory operand.
910
911 For example:
912 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
913
914static struct aarch64_memory_operand
915postindex_memory_operand (int32_t index)
916{
917 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
918}
919
bb903df0
PL
920/* System control registers. These special registers can be written and
921 read with the MRS and MSR instructions.
922
923 - NZCV: Condition flags. GDB refers to this register under the CPSR
924 name.
925 - FPSR: Floating-point status register.
926 - FPCR: Floating-point control registers.
927 - TPIDR_EL0: Software thread ID register. */
928
929enum aarch64_system_control_registers
930{
931 /* op0 op1 crn crm op2 */
932 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
933 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
934 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
935 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
936};
937
bb903df0
PL
938/* Write a BLR instruction into *BUF.
939
940 BLR rn
941
942 RN is the register to branch to. */
943
944static int
945emit_blr (uint32_t *buf, struct aarch64_register rn)
946{
e1c587c3 947 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
bb903df0
PL
948}
949
afbe19f8 950/* Write a RET instruction into *BUF.
bb903df0 951
afbe19f8 952 RET xn
bb903df0 953
afbe19f8 954 RN is the register to branch to. */
bb903df0
PL
955
956static int
afbe19f8
PL
957emit_ret (uint32_t *buf, struct aarch64_register rn)
958{
e1c587c3 959 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
afbe19f8
PL
960}
961
962static int
963emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
964 struct aarch64_register rt,
965 struct aarch64_register rt2,
966 struct aarch64_register rn,
967 struct aarch64_memory_operand operand)
bb903df0
PL
968{
969 uint32_t opc;
970 uint32_t pre_index;
971 uint32_t write_back;
972
973 if (rt.is64)
974 opc = ENCODE (2, 2, 30);
975 else
976 opc = ENCODE (0, 2, 30);
977
978 switch (operand.type)
979 {
980 case MEMORY_OPERAND_OFFSET:
981 {
982 pre_index = ENCODE (1, 1, 24);
983 write_back = ENCODE (0, 1, 23);
984 break;
985 }
afbe19f8
PL
986 case MEMORY_OPERAND_POSTINDEX:
987 {
988 pre_index = ENCODE (0, 1, 24);
989 write_back = ENCODE (1, 1, 23);
990 break;
991 }
bb903df0
PL
992 case MEMORY_OPERAND_PREINDEX:
993 {
994 pre_index = ENCODE (1, 1, 24);
995 write_back = ENCODE (1, 1, 23);
996 break;
997 }
998 default:
999 return 0;
1000 }
1001
e1c587c3
YQ
1002 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
1003 | ENCODE (operand.index >> 3, 7, 15)
1004 | ENCODE (rt2.num, 5, 10)
1005 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
1006}
1007
afbe19f8
PL
1008/* Write a STP instruction into *BUF.
1009
1010 STP rt, rt2, [rn, #offset]
1011 STP rt, rt2, [rn, #index]!
1012 STP rt, rt2, [rn], #index
1013
1014 RT and RT2 are the registers to store.
1015 RN is the base address register.
1016 OFFSET is the immediate to add to the base address. It is limited to a
1017 -512 .. 504 range (7 bits << 3). */
1018
1019static int
1020emit_stp (uint32_t *buf, struct aarch64_register rt,
1021 struct aarch64_register rt2, struct aarch64_register rn,
1022 struct aarch64_memory_operand operand)
1023{
1024 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
1025}
1026
1027/* Write a LDP instruction into *BUF.
1028
1029 LDP rt, rt2, [rn, #offset]
1030 LDP rt, rt2, [rn, #index]!
1031 LDP rt, rt2, [rn], #index
1032
1033 RT and RT2 are the registers to store.
1034 RN is the base address register.
1035 OFFSET is the immediate to add to the base address. It is limited to a
1036 -512 .. 504 range (7 bits << 3). */
1037
1038static int
1039emit_ldp (uint32_t *buf, struct aarch64_register rt,
1040 struct aarch64_register rt2, struct aarch64_register rn,
1041 struct aarch64_memory_operand operand)
1042{
1043 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
1044}
1045
bb903df0
PL
1046/* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
1047
1048 LDP qt, qt2, [rn, #offset]
1049
1050 RT and RT2 are the Q registers to store.
1051 RN is the base address register.
1052 OFFSET is the immediate to add to the base address. It is limited to
1053 -1024 .. 1008 range (7 bits << 4). */
1054
1055static int
1056emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1057 struct aarch64_register rn, int32_t offset)
1058{
1059 uint32_t opc = ENCODE (2, 2, 30);
1060 uint32_t pre_index = ENCODE (1, 1, 24);
1061
e1c587c3
YQ
1062 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
1063 | ENCODE (offset >> 4, 7, 15)
1064 | ENCODE (rt2, 5, 10)
1065 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1066}
1067
1068/* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1069
1070 STP qt, qt2, [rn, #offset]
1071
1072 RT and RT2 are the Q registers to store.
1073 RN is the base address register.
1074 OFFSET is the immediate to add to the base address. It is limited to
1075 -1024 .. 1008 range (7 bits << 4). */
1076
1077static int
1078emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1079 struct aarch64_register rn, int32_t offset)
1080{
1081 uint32_t opc = ENCODE (2, 2, 30);
1082 uint32_t pre_index = ENCODE (1, 1, 24);
1083
e1c587c3 1084 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
b6542f81
YQ
1085 | ENCODE (offset >> 4, 7, 15)
1086 | ENCODE (rt2, 5, 10)
1087 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1088}
1089
afbe19f8
PL
1090/* Write a LDRH instruction into *BUF.
1091
1092 LDRH wt, [xn, #offset]
1093 LDRH wt, [xn, #index]!
1094 LDRH wt, [xn], #index
1095
1096 RT is the register to store.
1097 RN is the base address register.
1098 OFFSET is the immediate to add to the base address. It is limited to
1099 0 .. 32760 range (12 bits << 3). */
1100
1101static int
1102emit_ldrh (uint32_t *buf, struct aarch64_register rt,
1103 struct aarch64_register rn,
1104 struct aarch64_memory_operand operand)
1105{
1c2e1515 1106 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
afbe19f8
PL
1107}
1108
1109/* Write a LDRB instruction into *BUF.
1110
1111 LDRB wt, [xn, #offset]
1112 LDRB wt, [xn, #index]!
1113 LDRB wt, [xn], #index
1114
1115 RT is the register to store.
1116 RN is the base address register.
1117 OFFSET is the immediate to add to the base address. It is limited to
1118 0 .. 32760 range (12 bits << 3). */
1119
1120static int
1121emit_ldrb (uint32_t *buf, struct aarch64_register rt,
1122 struct aarch64_register rn,
1123 struct aarch64_memory_operand operand)
1124{
1c2e1515 1125 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
afbe19f8
PL
1126}
1127
bb903df0 1128
bb903df0
PL
1129
1130/* Write a STR instruction into *BUF.
1131
1132 STR rt, [rn, #offset]
1133 STR rt, [rn, #index]!
afbe19f8 1134 STR rt, [rn], #index
bb903df0
PL
1135
1136 RT is the register to store.
1137 RN is the base address register.
1138 OFFSET is the immediate to add to the base address. It is limited to
1139 0 .. 32760 range (12 bits << 3). */
1140
1141static int
1142emit_str (uint32_t *buf, struct aarch64_register rt,
1143 struct aarch64_register rn,
1144 struct aarch64_memory_operand operand)
1145{
1c2e1515 1146 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
bb903df0
PL
1147}
1148
1149/* Helper function emitting an exclusive load or store instruction. */
1150
1151static int
1152emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1153 enum aarch64_opcodes opcode,
1154 struct aarch64_register rs,
1155 struct aarch64_register rt,
1156 struct aarch64_register rt2,
1157 struct aarch64_register rn)
1158{
e1c587c3
YQ
1159 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1160 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1161 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
1162}
1163
1164/* Write a LAXR instruction into *BUF.
1165
1166 LDAXR rt, [xn]
1167
1168 RT is the destination register.
1169 RN is the base address register. */
1170
1171static int
1172emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1173 struct aarch64_register rn)
1174{
1175 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1176 xzr, rn);
1177}
1178
1179/* Write a STXR instruction into *BUF.
1180
1181 STXR ws, rt, [xn]
1182
1183 RS is the result register, it indicates if the store succeeded or not.
1184 RT is the destination register.
1185 RN is the base address register. */
1186
1187static int
1188emit_stxr (uint32_t *buf, struct aarch64_register rs,
1189 struct aarch64_register rt, struct aarch64_register rn)
1190{
1191 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1192 xzr, rn);
1193}
1194
1195/* Write a STLR instruction into *BUF.
1196
1197 STLR rt, [xn]
1198
1199 RT is the register to store.
1200 RN is the base address register. */
1201
1202static int
1203emit_stlr (uint32_t *buf, struct aarch64_register rt,
1204 struct aarch64_register rn)
1205{
1206 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1207 xzr, rn);
1208}
1209
1210/* Helper function for data processing instructions with register sources. */
1211
1212static int
231c0592 1213emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
bb903df0
PL
1214 struct aarch64_register rd,
1215 struct aarch64_register rn,
1216 struct aarch64_register rm)
1217{
1218 uint32_t size = ENCODE (rd.is64, 1, 31);
1219
e1c587c3
YQ
1220 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1221 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1222}
1223
1224/* Helper function for data processing instructions taking either a register
1225 or an immediate. */
1226
1227static int
1228emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1229 struct aarch64_register rd,
1230 struct aarch64_register rn,
1231 struct aarch64_operand operand)
1232{
1233 uint32_t size = ENCODE (rd.is64, 1, 31);
1234 /* The opcode is different for register and immediate source operands. */
1235 uint32_t operand_opcode;
1236
1237 if (operand.type == OPERAND_IMMEDIATE)
1238 {
1239 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1240 operand_opcode = ENCODE (8, 4, 25);
1241
e1c587c3
YQ
1242 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1243 | ENCODE (operand.imm, 12, 10)
1244 | ENCODE (rn.num, 5, 5)
1245 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1246 }
1247 else
1248 {
1249 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1250 operand_opcode = ENCODE (5, 4, 25);
1251
1252 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1253 rn, operand.reg);
1254 }
1255}
1256
1257/* Write an ADD instruction into *BUF.
1258
1259 ADD rd, rn, #imm
1260 ADD rd, rn, rm
1261
1262 This function handles both an immediate and register add.
1263
1264 RD is the destination register.
1265 RN is the input register.
1266 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1267 OPERAND_REGISTER. */
1268
1269static int
1270emit_add (uint32_t *buf, struct aarch64_register rd,
1271 struct aarch64_register rn, struct aarch64_operand operand)
1272{
1273 return emit_data_processing (buf, ADD, rd, rn, operand);
1274}
1275
1276/* Write a SUB instruction into *BUF.
1277
1278 SUB rd, rn, #imm
1279 SUB rd, rn, rm
1280
1281 This function handles both an immediate and register sub.
1282
1283 RD is the destination register.
1284 RN is the input register.
1285 IMM is the immediate to substract to RN. */
1286
1287static int
1288emit_sub (uint32_t *buf, struct aarch64_register rd,
1289 struct aarch64_register rn, struct aarch64_operand operand)
1290{
1291 return emit_data_processing (buf, SUB, rd, rn, operand);
1292}
1293
1294/* Write a MOV instruction into *BUF.
1295
1296 MOV rd, #imm
1297 MOV rd, rm
1298
1299 This function handles both a wide immediate move and a register move,
1300 with the condition that the source register is not xzr. xzr and the
1301 stack pointer share the same encoding and this function only supports
1302 the stack pointer.
1303
1304 RD is the destination register.
1305 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1306 OPERAND_REGISTER. */
1307
1308static int
1309emit_mov (uint32_t *buf, struct aarch64_register rd,
1310 struct aarch64_operand operand)
1311{
1312 if (operand.type == OPERAND_IMMEDIATE)
1313 {
1314 uint32_t size = ENCODE (rd.is64, 1, 31);
1315 /* Do not shift the immediate. */
1316 uint32_t shift = ENCODE (0, 2, 21);
1317
e1c587c3
YQ
1318 return aarch64_emit_insn (buf, MOV | size | shift
1319 | ENCODE (operand.imm, 16, 5)
1320 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1321 }
1322 else
1323 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1324}
1325
1326/* Write a MOVK instruction into *BUF.
1327
1328 MOVK rd, #imm, lsl #shift
1329
1330 RD is the destination register.
1331 IMM is the immediate.
1332 SHIFT is the logical shift left to apply to IMM. */
1333
1334static int
7781c06f
YQ
1335emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1336 unsigned shift)
bb903df0
PL
1337{
1338 uint32_t size = ENCODE (rd.is64, 1, 31);
1339
e1c587c3
YQ
1340 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1341 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1342}
1343
1344/* Write instructions into *BUF in order to move ADDR into a register.
1345 ADDR can be a 64-bit value.
1346
1347 This function will emit a series of MOV and MOVK instructions, such as:
1348
1349 MOV xd, #(addr)
1350 MOVK xd, #(addr >> 16), lsl #16
1351 MOVK xd, #(addr >> 32), lsl #32
1352 MOVK xd, #(addr >> 48), lsl #48 */
1353
1354static int
1355emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1356{
1357 uint32_t *p = buf;
1358
1359 /* The MOV (wide immediate) instruction clears to top bits of the
1360 register. */
1361 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1362
1363 if ((addr >> 16) != 0)
1364 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1365 else
1366 return p - buf;
1367
1368 if ((addr >> 32) != 0)
1369 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1370 else
1371 return p - buf;
1372
1373 if ((addr >> 48) != 0)
1374 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1375
1376 return p - buf;
1377}
1378
afbe19f8
PL
1379/* Write a SUBS instruction into *BUF.
1380
1381 SUBS rd, rn, rm
1382
1383 This instruction update the condition flags.
1384
1385 RD is the destination register.
1386 RN and RM are the source registers. */
1387
1388static int
1389emit_subs (uint32_t *buf, struct aarch64_register rd,
1390 struct aarch64_register rn, struct aarch64_operand operand)
1391{
1392 return emit_data_processing (buf, SUBS, rd, rn, operand);
1393}
1394
1395/* Write a CMP instruction into *BUF.
1396
1397 CMP rn, rm
1398
1399 This instruction is an alias of SUBS xzr, rn, rm.
1400
1401 RN and RM are the registers to compare. */
1402
1403static int
1404emit_cmp (uint32_t *buf, struct aarch64_register rn,
1405 struct aarch64_operand operand)
1406{
1407 return emit_subs (buf, xzr, rn, operand);
1408}
1409
1410/* Write a AND instruction into *BUF.
1411
1412 AND rd, rn, rm
1413
1414 RD is the destination register.
1415 RN and RM are the source registers. */
1416
1417static int
1418emit_and (uint32_t *buf, struct aarch64_register rd,
1419 struct aarch64_register rn, struct aarch64_register rm)
1420{
1421 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1422}
1423
1424/* Write a ORR instruction into *BUF.
1425
1426 ORR rd, rn, rm
1427
1428 RD is the destination register.
1429 RN and RM are the source registers. */
1430
1431static int
1432emit_orr (uint32_t *buf, struct aarch64_register rd,
1433 struct aarch64_register rn, struct aarch64_register rm)
1434{
1435 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1436}
1437
1438/* Write a ORN instruction into *BUF.
1439
1440 ORN rd, rn, rm
1441
1442 RD is the destination register.
1443 RN and RM are the source registers. */
1444
1445static int
1446emit_orn (uint32_t *buf, struct aarch64_register rd,
1447 struct aarch64_register rn, struct aarch64_register rm)
1448{
1449 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1450}
1451
1452/* Write a EOR instruction into *BUF.
1453
1454 EOR rd, rn, rm
1455
1456 RD is the destination register.
1457 RN and RM are the source registers. */
1458
1459static int
1460emit_eor (uint32_t *buf, struct aarch64_register rd,
1461 struct aarch64_register rn, struct aarch64_register rm)
1462{
1463 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1464}
1465
1466/* Write a MVN instruction into *BUF.
1467
1468 MVN rd, rm
1469
1470 This is an alias for ORN rd, xzr, rm.
1471
1472 RD is the destination register.
1473 RM is the source register. */
1474
1475static int
1476emit_mvn (uint32_t *buf, struct aarch64_register rd,
1477 struct aarch64_register rm)
1478{
1479 return emit_orn (buf, rd, xzr, rm);
1480}
1481
1482/* Write a LSLV instruction into *BUF.
1483
1484 LSLV rd, rn, rm
1485
1486 RD is the destination register.
1487 RN and RM are the source registers. */
1488
1489static int
1490emit_lslv (uint32_t *buf, struct aarch64_register rd,
1491 struct aarch64_register rn, struct aarch64_register rm)
1492{
1493 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1494}
1495
1496/* Write a LSRV instruction into *BUF.
1497
1498 LSRV rd, rn, rm
1499
1500 RD is the destination register.
1501 RN and RM are the source registers. */
1502
1503static int
1504emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1505 struct aarch64_register rn, struct aarch64_register rm)
1506{
1507 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1508}
1509
1510/* Write a ASRV instruction into *BUF.
1511
1512 ASRV rd, rn, rm
1513
1514 RD is the destination register.
1515 RN and RM are the source registers. */
1516
1517static int
1518emit_asrv (uint32_t *buf, struct aarch64_register rd,
1519 struct aarch64_register rn, struct aarch64_register rm)
1520{
1521 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1522}
1523
1524/* Write a MUL instruction into *BUF.
1525
1526 MUL rd, rn, rm
1527
1528 RD is the destination register.
1529 RN and RM are the source registers. */
1530
1531static int
1532emit_mul (uint32_t *buf, struct aarch64_register rd,
1533 struct aarch64_register rn, struct aarch64_register rm)
1534{
1535 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1536}
1537
bb903df0
PL
1538/* Write a MRS instruction into *BUF. The register size is 64-bit.
1539
1540 MRS xt, system_reg
1541
1542 RT is the destination register.
1543 SYSTEM_REG is special purpose register to read. */
1544
1545static int
1546emit_mrs (uint32_t *buf, struct aarch64_register rt,
1547 enum aarch64_system_control_registers system_reg)
1548{
e1c587c3
YQ
1549 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1550 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1551}
1552
1553/* Write a MSR instruction into *BUF. The register size is 64-bit.
1554
1555 MSR system_reg, xt
1556
1557 SYSTEM_REG is special purpose register to write.
1558 RT is the input register. */
1559
1560static int
1561emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1562 struct aarch64_register rt)
1563{
e1c587c3
YQ
1564 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1565 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1566}
1567
1568/* Write a SEVL instruction into *BUF.
1569
1570 This is a hint instruction telling the hardware to trigger an event. */
1571
1572static int
1573emit_sevl (uint32_t *buf)
1574{
e1c587c3 1575 return aarch64_emit_insn (buf, SEVL);
bb903df0
PL
1576}
1577
1578/* Write a WFE instruction into *BUF.
1579
1580 This is a hint instruction telling the hardware to wait for an event. */
1581
1582static int
1583emit_wfe (uint32_t *buf)
1584{
e1c587c3 1585 return aarch64_emit_insn (buf, WFE);
bb903df0
PL
1586}
1587
afbe19f8
PL
1588/* Write a SBFM instruction into *BUF.
1589
1590 SBFM rd, rn, #immr, #imms
1591
1592 This instruction moves the bits from #immr to #imms into the
1593 destination, sign extending the result.
1594
1595 RD is the destination register.
1596 RN is the source register.
1597 IMMR is the bit number to start at (least significant bit).
1598 IMMS is the bit number to stop at (most significant bit). */
1599
1600static int
1601emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1602 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1603{
1604 uint32_t size = ENCODE (rd.is64, 1, 31);
1605 uint32_t n = ENCODE (rd.is64, 1, 22);
1606
e1c587c3
YQ
1607 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1608 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1609 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1610}
1611
1612/* Write a SBFX instruction into *BUF.
1613
1614 SBFX rd, rn, #lsb, #width
1615
1616 This instruction moves #width bits from #lsb into the destination, sign
1617 extending the result. This is an alias for:
1618
1619 SBFM rd, rn, #lsb, #(lsb + width - 1)
1620
1621 RD is the destination register.
1622 RN is the source register.
1623 LSB is the bit number to start at (least significant bit).
1624 WIDTH is the number of bits to move. */
1625
1626static int
1627emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1628 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1629{
1630 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1631}
1632
1633/* Write a UBFM instruction into *BUF.
1634
1635 UBFM rd, rn, #immr, #imms
1636
1637 This instruction moves the bits from #immr to #imms into the
1638 destination, extending the result with zeros.
1639
1640 RD is the destination register.
1641 RN is the source register.
1642 IMMR is the bit number to start at (least significant bit).
1643 IMMS is the bit number to stop at (most significant bit). */
1644
1645static int
1646emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1647 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1648{
1649 uint32_t size = ENCODE (rd.is64, 1, 31);
1650 uint32_t n = ENCODE (rd.is64, 1, 22);
1651
e1c587c3
YQ
1652 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1653 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1654 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1655}
1656
1657/* Write a UBFX instruction into *BUF.
1658
1659 UBFX rd, rn, #lsb, #width
1660
1661 This instruction moves #width bits from #lsb into the destination,
1662 extending the result with zeros. This is an alias for:
1663
1664 UBFM rd, rn, #lsb, #(lsb + width - 1)
1665
1666 RD is the destination register.
1667 RN is the source register.
1668 LSB is the bit number to start at (least significant bit).
1669 WIDTH is the number of bits to move. */
1670
1671static int
1672emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1673 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1674{
1675 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1676}
1677
1678/* Write a CSINC instruction into *BUF.
1679
1680 CSINC rd, rn, rm, cond
1681
1682 This instruction conditionally increments rn or rm and places the result
1683 in rd. rn is chosen is the condition is true.
1684
1685 RD is the destination register.
1686 RN and RM are the source registers.
1687 COND is the encoded condition. */
1688
1689static int
1690emit_csinc (uint32_t *buf, struct aarch64_register rd,
1691 struct aarch64_register rn, struct aarch64_register rm,
1692 unsigned cond)
1693{
1694 uint32_t size = ENCODE (rd.is64, 1, 31);
1695
e1c587c3
YQ
1696 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1697 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1698 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1699}
1700
1701/* Write a CSET instruction into *BUF.
1702
1703 CSET rd, cond
1704
1705 This instruction conditionally write 1 or 0 in the destination register.
1706 1 is written if the condition is true. This is an alias for:
1707
1708 CSINC rd, xzr, xzr, !cond
1709
1710 Note that the condition needs to be inverted.
1711
1712 RD is the destination register.
1713 RN and RM are the source registers.
1714 COND is the encoded condition. */
1715
1716static int
1717emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1718{
1719 /* The least significant bit of the condition needs toggling in order to
1720 invert it. */
1721 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1722}
1723
bb903df0
PL
1724/* Write LEN instructions from BUF into the inferior memory at *TO.
1725
1726 Note instructions are always little endian on AArch64, unlike data. */
1727
1728static void
1729append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1730{
1731 size_t byte_len = len * sizeof (uint32_t);
1732#if (__BYTE_ORDER == __BIG_ENDIAN)
cb93dc7f 1733 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
bb903df0
PL
1734 size_t i;
1735
1736 for (i = 0; i < len; i++)
1737 le_buf[i] = htole32 (buf[i]);
1738
4196ab2a 1739 target_write_memory (*to, (const unsigned char *) le_buf, byte_len);
bb903df0
PL
1740
1741 xfree (le_buf);
1742#else
4196ab2a 1743 target_write_memory (*to, (const unsigned char *) buf, byte_len);
bb903df0
PL
1744#endif
1745
1746 *to += byte_len;
1747}
1748
0badd99f
YQ
1749/* Sub-class of struct aarch64_insn_data, store information of
1750 instruction relocation for fast tracepoint. Visitor can
1751 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1752 the relocated instructions in buffer pointed by INSN_PTR. */
bb903df0 1753
0badd99f
YQ
1754struct aarch64_insn_relocation_data
1755{
1756 struct aarch64_insn_data base;
1757
1758 /* The new address the instruction is relocated to. */
1759 CORE_ADDR new_addr;
1760 /* Pointer to the buffer of relocated instruction(s). */
1761 uint32_t *insn_ptr;
1762};
1763
1764/* Implementation of aarch64_insn_visitor method "b". */
1765
1766static void
1767aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1768 struct aarch64_insn_data *data)
1769{
1770 struct aarch64_insn_relocation_data *insn_reloc
1771 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1772 int64_t new_offset
0badd99f
YQ
1773 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1774
1775 if (can_encode_int32 (new_offset, 28))
1776 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1777}
1778
1779/* Implementation of aarch64_insn_visitor method "b_cond". */
1780
1781static void
1782aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1783 struct aarch64_insn_data *data)
1784{
1785 struct aarch64_insn_relocation_data *insn_reloc
1786 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1787 int64_t new_offset
0badd99f
YQ
1788 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1789
1790 if (can_encode_int32 (new_offset, 21))
1791 {
1792 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1793 new_offset);
bb903df0 1794 }
0badd99f 1795 else if (can_encode_int32 (new_offset, 28))
bb903df0 1796 {
0badd99f
YQ
1797 /* The offset is out of range for a conditional branch
1798 instruction but not for a unconditional branch. We can use
1799 the following instructions instead:
bb903df0 1800
0badd99f
YQ
1801 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1802 B NOT_TAKEN ; Else jump over TAKEN and continue.
1803 TAKEN:
1804 B #(offset - 8)
1805 NOT_TAKEN:
1806
1807 */
1808
1809 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1810 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1811 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
bb903df0 1812 }
0badd99f 1813}
bb903df0 1814
0badd99f
YQ
1815/* Implementation of aarch64_insn_visitor method "cb". */
1816
1817static void
1818aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1819 const unsigned rn, int is64,
1820 struct aarch64_insn_data *data)
1821{
1822 struct aarch64_insn_relocation_data *insn_reloc
1823 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1824 int64_t new_offset
0badd99f
YQ
1825 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1826
1827 if (can_encode_int32 (new_offset, 21))
1828 {
1829 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1830 aarch64_register (rn, is64), new_offset);
bb903df0 1831 }
0badd99f 1832 else if (can_encode_int32 (new_offset, 28))
bb903df0 1833 {
0badd99f
YQ
1834 /* The offset is out of range for a compare and branch
1835 instruction but not for a unconditional branch. We can use
1836 the following instructions instead:
1837
1838 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1839 B NOT_TAKEN ; Else jump over TAKEN and continue.
1840 TAKEN:
1841 B #(offset - 8)
1842 NOT_TAKEN:
1843
1844 */
1845 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1846 aarch64_register (rn, is64), 8);
1847 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1848 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1849 }
1850}
bb903df0 1851
0badd99f 1852/* Implementation of aarch64_insn_visitor method "tb". */
bb903df0 1853
0badd99f
YQ
1854static void
1855aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1856 const unsigned rt, unsigned bit,
1857 struct aarch64_insn_data *data)
1858{
1859 struct aarch64_insn_relocation_data *insn_reloc
1860 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1861 int64_t new_offset
0badd99f
YQ
1862 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1863
1864 if (can_encode_int32 (new_offset, 16))
1865 {
1866 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1867 aarch64_register (rt, 1), new_offset);
bb903df0 1868 }
0badd99f 1869 else if (can_encode_int32 (new_offset, 28))
bb903df0 1870 {
0badd99f
YQ
1871 /* The offset is out of range for a test bit and branch
1872 instruction but not for a unconditional branch. We can use
1873 the following instructions instead:
1874
1875 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1876 B NOT_TAKEN ; Else jump over TAKEN and continue.
1877 TAKEN:
1878 B #(offset - 8)
1879 NOT_TAKEN:
1880
1881 */
1882 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1883 aarch64_register (rt, 1), 8);
1884 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1885 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1886 new_offset - 8);
1887 }
1888}
bb903df0 1889
0badd99f 1890/* Implementation of aarch64_insn_visitor method "adr". */
bb903df0 1891
0badd99f
YQ
1892static void
1893aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1894 const int is_adrp,
1895 struct aarch64_insn_data *data)
1896{
1897 struct aarch64_insn_relocation_data *insn_reloc
1898 = (struct aarch64_insn_relocation_data *) data;
1899 /* We know exactly the address the ADR{P,} instruction will compute.
1900 We can just write it to the destination register. */
1901 CORE_ADDR address = data->insn_addr + offset;
bb903df0 1902
0badd99f
YQ
1903 if (is_adrp)
1904 {
1905 /* Clear the lower 12 bits of the offset to get the 4K page. */
1906 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1907 aarch64_register (rd, 1),
1908 address & ~0xfff);
1909 }
1910 else
1911 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1912 aarch64_register (rd, 1), address);
1913}
bb903df0 1914
0badd99f 1915/* Implementation of aarch64_insn_visitor method "ldr_literal". */
bb903df0 1916
0badd99f
YQ
1917static void
1918aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1919 const unsigned rt, const int is64,
1920 struct aarch64_insn_data *data)
1921{
1922 struct aarch64_insn_relocation_data *insn_reloc
1923 = (struct aarch64_insn_relocation_data *) data;
1924 CORE_ADDR address = data->insn_addr + offset;
1925
1926 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1927 aarch64_register (rt, 1), address);
1928
1929 /* We know exactly what address to load from, and what register we
1930 can use:
1931
1932 MOV xd, #(oldloc + offset)
1933 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1934 ...
1935
1936 LDR xd, [xd] ; or LDRSW xd, [xd]
1937
1938 */
1939
1940 if (is_sw)
1941 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1942 aarch64_register (rt, 1),
1943 aarch64_register (rt, 1),
1944 offset_memory_operand (0));
bb903df0 1945 else
0badd99f
YQ
1946 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1947 aarch64_register (rt, is64),
1948 aarch64_register (rt, 1),
1949 offset_memory_operand (0));
1950}
1951
1952/* Implementation of aarch64_insn_visitor method "others". */
1953
1954static void
1955aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1956 struct aarch64_insn_data *data)
1957{
1958 struct aarch64_insn_relocation_data *insn_reloc
1959 = (struct aarch64_insn_relocation_data *) data;
bb903df0 1960
0badd99f
YQ
1961 /* The instruction is not PC relative. Just re-emit it at the new
1962 location. */
e1c587c3 1963 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
0badd99f
YQ
1964}
1965
1966static const struct aarch64_insn_visitor visitor =
1967{
1968 aarch64_ftrace_insn_reloc_b,
1969 aarch64_ftrace_insn_reloc_b_cond,
1970 aarch64_ftrace_insn_reloc_cb,
1971 aarch64_ftrace_insn_reloc_tb,
1972 aarch64_ftrace_insn_reloc_adr,
1973 aarch64_ftrace_insn_reloc_ldr_literal,
1974 aarch64_ftrace_insn_reloc_others,
1975};
1976
809a0c35
TBA
1977bool
1978aarch64_target::supports_fast_tracepoints ()
1979{
1980 return true;
1981}
1982
1983/* Implementation of target ops method
bb903df0
PL
1984 "install_fast_tracepoint_jump_pad". */
1985
809a0c35
TBA
1986int
1987aarch64_target::install_fast_tracepoint_jump_pad
1988 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
1989 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
1990 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
1991 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
1992 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
1993 char *err)
bb903df0
PL
1994{
1995 uint32_t buf[256];
1996 uint32_t *p = buf;
2ac09a5b 1997 int64_t offset;
bb903df0 1998 int i;
70b439f0 1999 uint32_t insn;
bb903df0 2000 CORE_ADDR buildaddr = *jump_entry;
0badd99f 2001 struct aarch64_insn_relocation_data insn_data;
bb903df0
PL
2002
2003 /* We need to save the current state on the stack both to restore it
2004 later and to collect register values when the tracepoint is hit.
2005
2006 The saved registers are pushed in a layout that needs to be in sync
2007 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
2008 the supply_fast_tracepoint_registers function will fill in the
2009 register cache from a pointer to saved registers on the stack we build
2010 here.
2011
2012 For simplicity, we set the size of each cell on the stack to 16 bytes.
2013 This way one cell can hold any register type, from system registers
2014 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
2015 has to be 16 bytes aligned anyway.
2016
2017 Note that the CPSR register does not exist on AArch64. Instead we
2018 can access system bits describing the process state with the
2019 MRS/MSR instructions, namely the condition flags. We save them as
2020 if they are part of a CPSR register because that's how GDB
2021 interprets these system bits. At the moment, only the condition
2022 flags are saved in CPSR (NZCV).
2023
2024 Stack layout, each cell is 16 bytes (descending):
2025
2026 High *-------- SIMD&FP registers from 31 down to 0. --------*
2027 | q31 |
2028 . .
2029 . . 32 cells
2030 . .
2031 | q0 |
2032 *---- General purpose registers from 30 down to 0. ----*
2033 | x30 |
2034 . .
2035 . . 31 cells
2036 . .
2037 | x0 |
2038 *------------- Special purpose registers. -------------*
2039 | SP |
2040 | PC |
2041 | CPSR (NZCV) | 5 cells
2042 | FPSR |
2043 | FPCR | <- SP + 16
2044 *------------- collecting_t object --------------------*
2045 | TPIDR_EL0 | struct tracepoint * |
2046 Low *------------------------------------------------------*
2047
2048 After this stack is set up, we issue a call to the collector, passing
2049 it the saved registers at (SP + 16). */
2050
2051 /* Push SIMD&FP registers on the stack:
2052
2053 SUB sp, sp, #(32 * 16)
2054
2055 STP q30, q31, [sp, #(30 * 16)]
2056 ...
2057 STP q0, q1, [sp]
2058
2059 */
2060 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
2061 for (i = 30; i >= 0; i -= 2)
2062 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
2063
30baf67b 2064 /* Push general purpose registers on the stack. Note that we do not need
bb903df0
PL
2065 to push x31 as it represents the xzr register and not the stack
2066 pointer in a STR instruction.
2067
2068 SUB sp, sp, #(31 * 16)
2069
2070 STR x30, [sp, #(30 * 16)]
2071 ...
2072 STR x0, [sp]
2073
2074 */
2075 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
2076 for (i = 30; i >= 0; i -= 1)
2077 p += emit_str (p, aarch64_register (i, 1), sp,
2078 offset_memory_operand (i * 16));
2079
2080 /* Make space for 5 more cells.
2081
2082 SUB sp, sp, #(5 * 16)
2083
2084 */
2085 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
2086
2087
2088 /* Save SP:
2089
2090 ADD x4, sp, #((32 + 31 + 5) * 16)
2091 STR x4, [sp, #(4 * 16)]
2092
2093 */
2094 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
2095 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
2096
2097 /* Save PC (tracepoint address):
2098
2099 MOV x3, #(tpaddr)
2100 ...
2101
2102 STR x3, [sp, #(3 * 16)]
2103
2104 */
2105
2106 p += emit_mov_addr (p, x3, tpaddr);
2107 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
2108
2109 /* Save CPSR (NZCV), FPSR and FPCR:
2110
2111 MRS x2, nzcv
2112 MRS x1, fpsr
2113 MRS x0, fpcr
2114
2115 STR x2, [sp, #(2 * 16)]
2116 STR x1, [sp, #(1 * 16)]
2117 STR x0, [sp, #(0 * 16)]
2118
2119 */
2120 p += emit_mrs (p, x2, NZCV);
2121 p += emit_mrs (p, x1, FPSR);
2122 p += emit_mrs (p, x0, FPCR);
2123 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2124 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2125 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2126
2127 /* Push the collecting_t object. It consist of the address of the
2128 tracepoint and an ID for the current thread. We get the latter by
2129 reading the tpidr_el0 system register. It corresponds to the
2130 NT_ARM_TLS register accessible with ptrace.
2131
2132 MOV x0, #(tpoint)
2133 ...
2134
2135 MRS x1, tpidr_el0
2136
2137 STP x0, x1, [sp, #-16]!
2138
2139 */
2140
2141 p += emit_mov_addr (p, x0, tpoint);
2142 p += emit_mrs (p, x1, TPIDR_EL0);
2143 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2144
2145 /* Spin-lock:
2146
2147 The shared memory for the lock is at lockaddr. It will hold zero
2148 if no-one is holding the lock, otherwise it contains the address of
2149 the collecting_t object on the stack of the thread which acquired it.
2150
2151 At this stage, the stack pointer points to this thread's collecting_t
2152 object.
2153
2154 We use the following registers:
2155 - x0: Address of the lock.
2156 - x1: Pointer to collecting_t object.
2157 - x2: Scratch register.
2158
2159 MOV x0, #(lockaddr)
2160 ...
2161 MOV x1, sp
2162
2163 ; Trigger an event local to this core. So the following WFE
2164 ; instruction is ignored.
2165 SEVL
2166 again:
2167 ; Wait for an event. The event is triggered by either the SEVL
2168 ; or STLR instructions (store release).
2169 WFE
2170
2171 ; Atomically read at lockaddr. This marks the memory location as
2172 ; exclusive. This instruction also has memory constraints which
2173 ; make sure all previous data reads and writes are done before
2174 ; executing it.
2175 LDAXR x2, [x0]
2176
2177 ; Try again if another thread holds the lock.
2178 CBNZ x2, again
2179
2180 ; We can lock it! Write the address of the collecting_t object.
2181 ; This instruction will fail if the memory location is not marked
2182 ; as exclusive anymore. If it succeeds, it will remove the
2183 ; exclusive mark on the memory location. This way, if another
2184 ; thread executes this instruction before us, we will fail and try
2185 ; all over again.
2186 STXR w2, x1, [x0]
2187 CBNZ w2, again
2188
2189 */
2190
2191 p += emit_mov_addr (p, x0, lockaddr);
2192 p += emit_mov (p, x1, register_operand (sp));
2193
2194 p += emit_sevl (p);
2195 p += emit_wfe (p);
2196 p += emit_ldaxr (p, x2, x0);
2197 p += emit_cb (p, 1, w2, -2 * 4);
2198 p += emit_stxr (p, w2, x1, x0);
2199 p += emit_cb (p, 1, x2, -4 * 4);
2200
2201 /* Call collector (struct tracepoint *, unsigned char *):
2202
2203 MOV x0, #(tpoint)
2204 ...
2205
2206 ; Saved registers start after the collecting_t object.
2207 ADD x1, sp, #16
2208
2209 ; We use an intra-procedure-call scratch register.
2210 MOV ip0, #(collector)
2211 ...
2212
2213 ; And call back to C!
2214 BLR ip0
2215
2216 */
2217
2218 p += emit_mov_addr (p, x0, tpoint);
2219 p += emit_add (p, x1, sp, immediate_operand (16));
2220
2221 p += emit_mov_addr (p, ip0, collector);
2222 p += emit_blr (p, ip0);
2223
2224 /* Release the lock.
2225
2226 MOV x0, #(lockaddr)
2227 ...
2228
2229 ; This instruction is a normal store with memory ordering
2230 ; constraints. Thanks to this we do not have to put a data
2231 ; barrier instruction to make sure all data read and writes are done
30baf67b 2232 ; before this instruction is executed. Furthermore, this instruction
bb903df0
PL
2233 ; will trigger an event, letting other threads know they can grab
2234 ; the lock.
2235 STLR xzr, [x0]
2236
2237 */
2238 p += emit_mov_addr (p, x0, lockaddr);
2239 p += emit_stlr (p, xzr, x0);
2240
2241 /* Free collecting_t object:
2242
2243 ADD sp, sp, #16
2244
2245 */
2246 p += emit_add (p, sp, sp, immediate_operand (16));
2247
2248 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2249 registers from the stack.
2250
2251 LDR x2, [sp, #(2 * 16)]
2252 LDR x1, [sp, #(1 * 16)]
2253 LDR x0, [sp, #(0 * 16)]
2254
2255 MSR NZCV, x2
2256 MSR FPSR, x1
2257 MSR FPCR, x0
2258
2259 ADD sp, sp #(5 * 16)
2260
2261 */
2262 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2263 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2264 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2265 p += emit_msr (p, NZCV, x2);
2266 p += emit_msr (p, FPSR, x1);
2267 p += emit_msr (p, FPCR, x0);
2268
2269 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2270
2271 /* Pop general purpose registers:
2272
2273 LDR x0, [sp]
2274 ...
2275 LDR x30, [sp, #(30 * 16)]
2276
2277 ADD sp, sp, #(31 * 16)
2278
2279 */
2280 for (i = 0; i <= 30; i += 1)
2281 p += emit_ldr (p, aarch64_register (i, 1), sp,
2282 offset_memory_operand (i * 16));
2283 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2284
2285 /* Pop SIMD&FP registers:
2286
2287 LDP q0, q1, [sp]
2288 ...
2289 LDP q30, q31, [sp, #(30 * 16)]
2290
2291 ADD sp, sp, #(32 * 16)
2292
2293 */
2294 for (i = 0; i <= 30; i += 2)
2295 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2296 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2297
2298 /* Write the code into the inferior memory. */
2299 append_insns (&buildaddr, p - buf, buf);
2300
2301 /* Now emit the relocated instruction. */
2302 *adjusted_insn_addr = buildaddr;
70b439f0 2303 target_read_uint32 (tpaddr, &insn);
0badd99f
YQ
2304
2305 insn_data.base.insn_addr = tpaddr;
2306 insn_data.new_addr = buildaddr;
2307 insn_data.insn_ptr = buf;
2308
2309 aarch64_relocate_instruction (insn, &visitor,
2310 (struct aarch64_insn_data *) &insn_data);
2311
bb903df0 2312 /* We may not have been able to relocate the instruction. */
0badd99f 2313 if (insn_data.insn_ptr == buf)
bb903df0
PL
2314 {
2315 sprintf (err,
2316 "E.Could not relocate instruction from %s to %s.",
2317 core_addr_to_string_nz (tpaddr),
2318 core_addr_to_string_nz (buildaddr));
2319 return 1;
2320 }
dfaffe9d 2321 else
0badd99f 2322 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
dfaffe9d 2323 *adjusted_insn_addr_end = buildaddr;
bb903df0
PL
2324
2325 /* Go back to the start of the buffer. */
2326 p = buf;
2327
2328 /* Emit a branch back from the jump pad. */
2329 offset = (tpaddr + orig_size - buildaddr);
2330 if (!can_encode_int32 (offset, 28))
2331 {
2332 sprintf (err,
2333 "E.Jump back from jump pad too far from tracepoint "
2ac09a5b 2334 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2335 offset);
2336 return 1;
2337 }
2338
2339 p += emit_b (p, 0, offset);
2340 append_insns (&buildaddr, p - buf, buf);
2341
2342 /* Give the caller a branch instruction into the jump pad. */
2343 offset = (*jump_entry - tpaddr);
2344 if (!can_encode_int32 (offset, 28))
2345 {
2346 sprintf (err,
2347 "E.Jump pad too far from tracepoint "
2ac09a5b 2348 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2349 offset);
2350 return 1;
2351 }
2352
2353 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2354 *jjump_pad_insn_size = 4;
2355
2356 /* Return the end address of our pad. */
2357 *jump_entry = buildaddr;
2358
2359 return 0;
2360}
2361
afbe19f8
PL
2362/* Helper function writing LEN instructions from START into
2363 current_insn_ptr. */
2364
2365static void
2366emit_ops_insns (const uint32_t *start, int len)
2367{
2368 CORE_ADDR buildaddr = current_insn_ptr;
2369
2370 if (debug_threads)
2371 debug_printf ("Adding %d instrucions at %s\n",
2372 len, paddress (buildaddr));
2373
2374 append_insns (&buildaddr, len, start);
2375 current_insn_ptr = buildaddr;
2376}
2377
2378/* Pop a register from the stack. */
2379
2380static int
2381emit_pop (uint32_t *buf, struct aarch64_register rt)
2382{
2383 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2384}
2385
2386/* Push a register on the stack. */
2387
2388static int
2389emit_push (uint32_t *buf, struct aarch64_register rt)
2390{
2391 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2392}
2393
2394/* Implementation of emit_ops method "emit_prologue". */
2395
2396static void
2397aarch64_emit_prologue (void)
2398{
2399 uint32_t buf[16];
2400 uint32_t *p = buf;
2401
2402 /* This function emit a prologue for the following function prototype:
2403
2404 enum eval_result_type f (unsigned char *regs,
2405 ULONGEST *value);
2406
2407 The first argument is a buffer of raw registers. The second
2408 argument is the result of
2409 evaluating the expression, which will be set to whatever is on top of
2410 the stack at the end.
2411
2412 The stack set up by the prologue is as such:
2413
2414 High *------------------------------------------------------*
2415 | LR |
2416 | FP | <- FP
2417 | x1 (ULONGEST *value) |
2418 | x0 (unsigned char *regs) |
2419 Low *------------------------------------------------------*
2420
2421 As we are implementing a stack machine, each opcode can expand the
2422 stack so we never know how far we are from the data saved by this
2423 prologue. In order to be able refer to value and regs later, we save
2424 the current stack pointer in the frame pointer. This way, it is not
2425 clobbered when calling C functions.
2426
30baf67b 2427 Finally, throughout every operation, we are using register x0 as the
afbe19f8
PL
2428 top of the stack, and x1 as a scratch register. */
2429
2430 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2431 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2432 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2433
2434 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2435
2436
2437 emit_ops_insns (buf, p - buf);
2438}
2439
2440/* Implementation of emit_ops method "emit_epilogue". */
2441
2442static void
2443aarch64_emit_epilogue (void)
2444{
2445 uint32_t buf[16];
2446 uint32_t *p = buf;
2447
2448 /* Store the result of the expression (x0) in *value. */
2449 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2450 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2451 p += emit_str (p, x0, x1, offset_memory_operand (0));
2452
2453 /* Restore the previous state. */
2454 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2455 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2456
2457 /* Return expr_eval_no_error. */
2458 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2459 p += emit_ret (p, lr);
2460
2461 emit_ops_insns (buf, p - buf);
2462}
2463
2464/* Implementation of emit_ops method "emit_add". */
2465
2466static void
2467aarch64_emit_add (void)
2468{
2469 uint32_t buf[16];
2470 uint32_t *p = buf;
2471
2472 p += emit_pop (p, x1);
45e3745e 2473 p += emit_add (p, x0, x1, register_operand (x0));
afbe19f8
PL
2474
2475 emit_ops_insns (buf, p - buf);
2476}
2477
2478/* Implementation of emit_ops method "emit_sub". */
2479
2480static void
2481aarch64_emit_sub (void)
2482{
2483 uint32_t buf[16];
2484 uint32_t *p = buf;
2485
2486 p += emit_pop (p, x1);
45e3745e 2487 p += emit_sub (p, x0, x1, register_operand (x0));
afbe19f8
PL
2488
2489 emit_ops_insns (buf, p - buf);
2490}
2491
2492/* Implementation of emit_ops method "emit_mul". */
2493
2494static void
2495aarch64_emit_mul (void)
2496{
2497 uint32_t buf[16];
2498 uint32_t *p = buf;
2499
2500 p += emit_pop (p, x1);
2501 p += emit_mul (p, x0, x1, x0);
2502
2503 emit_ops_insns (buf, p - buf);
2504}
2505
2506/* Implementation of emit_ops method "emit_lsh". */
2507
2508static void
2509aarch64_emit_lsh (void)
2510{
2511 uint32_t buf[16];
2512 uint32_t *p = buf;
2513
2514 p += emit_pop (p, x1);
2515 p += emit_lslv (p, x0, x1, x0);
2516
2517 emit_ops_insns (buf, p - buf);
2518}
2519
2520/* Implementation of emit_ops method "emit_rsh_signed". */
2521
2522static void
2523aarch64_emit_rsh_signed (void)
2524{
2525 uint32_t buf[16];
2526 uint32_t *p = buf;
2527
2528 p += emit_pop (p, x1);
2529 p += emit_asrv (p, x0, x1, x0);
2530
2531 emit_ops_insns (buf, p - buf);
2532}
2533
2534/* Implementation of emit_ops method "emit_rsh_unsigned". */
2535
2536static void
2537aarch64_emit_rsh_unsigned (void)
2538{
2539 uint32_t buf[16];
2540 uint32_t *p = buf;
2541
2542 p += emit_pop (p, x1);
2543 p += emit_lsrv (p, x0, x1, x0);
2544
2545 emit_ops_insns (buf, p - buf);
2546}
2547
2548/* Implementation of emit_ops method "emit_ext". */
2549
2550static void
2551aarch64_emit_ext (int arg)
2552{
2553 uint32_t buf[16];
2554 uint32_t *p = buf;
2555
2556 p += emit_sbfx (p, x0, x0, 0, arg);
2557
2558 emit_ops_insns (buf, p - buf);
2559}
2560
2561/* Implementation of emit_ops method "emit_log_not". */
2562
2563static void
2564aarch64_emit_log_not (void)
2565{
2566 uint32_t buf[16];
2567 uint32_t *p = buf;
2568
2569 /* If the top of the stack is 0, replace it with 1. Else replace it with
2570 0. */
2571
2572 p += emit_cmp (p, x0, immediate_operand (0));
2573 p += emit_cset (p, x0, EQ);
2574
2575 emit_ops_insns (buf, p - buf);
2576}
2577
2578/* Implementation of emit_ops method "emit_bit_and". */
2579
2580static void
2581aarch64_emit_bit_and (void)
2582{
2583 uint32_t buf[16];
2584 uint32_t *p = buf;
2585
2586 p += emit_pop (p, x1);
2587 p += emit_and (p, x0, x0, x1);
2588
2589 emit_ops_insns (buf, p - buf);
2590}
2591
2592/* Implementation of emit_ops method "emit_bit_or". */
2593
2594static void
2595aarch64_emit_bit_or (void)
2596{
2597 uint32_t buf[16];
2598 uint32_t *p = buf;
2599
2600 p += emit_pop (p, x1);
2601 p += emit_orr (p, x0, x0, x1);
2602
2603 emit_ops_insns (buf, p - buf);
2604}
2605
2606/* Implementation of emit_ops method "emit_bit_xor". */
2607
2608static void
2609aarch64_emit_bit_xor (void)
2610{
2611 uint32_t buf[16];
2612 uint32_t *p = buf;
2613
2614 p += emit_pop (p, x1);
2615 p += emit_eor (p, x0, x0, x1);
2616
2617 emit_ops_insns (buf, p - buf);
2618}
2619
2620/* Implementation of emit_ops method "emit_bit_not". */
2621
2622static void
2623aarch64_emit_bit_not (void)
2624{
2625 uint32_t buf[16];
2626 uint32_t *p = buf;
2627
2628 p += emit_mvn (p, x0, x0);
2629
2630 emit_ops_insns (buf, p - buf);
2631}
2632
2633/* Implementation of emit_ops method "emit_equal". */
2634
2635static void
2636aarch64_emit_equal (void)
2637{
2638 uint32_t buf[16];
2639 uint32_t *p = buf;
2640
2641 p += emit_pop (p, x1);
2642 p += emit_cmp (p, x0, register_operand (x1));
2643 p += emit_cset (p, x0, EQ);
2644
2645 emit_ops_insns (buf, p - buf);
2646}
2647
2648/* Implementation of emit_ops method "emit_less_signed". */
2649
2650static void
2651aarch64_emit_less_signed (void)
2652{
2653 uint32_t buf[16];
2654 uint32_t *p = buf;
2655
2656 p += emit_pop (p, x1);
2657 p += emit_cmp (p, x1, register_operand (x0));
2658 p += emit_cset (p, x0, LT);
2659
2660 emit_ops_insns (buf, p - buf);
2661}
2662
2663/* Implementation of emit_ops method "emit_less_unsigned". */
2664
2665static void
2666aarch64_emit_less_unsigned (void)
2667{
2668 uint32_t buf[16];
2669 uint32_t *p = buf;
2670
2671 p += emit_pop (p, x1);
2672 p += emit_cmp (p, x1, register_operand (x0));
2673 p += emit_cset (p, x0, LO);
2674
2675 emit_ops_insns (buf, p - buf);
2676}
2677
2678/* Implementation of emit_ops method "emit_ref". */
2679
2680static void
2681aarch64_emit_ref (int size)
2682{
2683 uint32_t buf[16];
2684 uint32_t *p = buf;
2685
2686 switch (size)
2687 {
2688 case 1:
2689 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2690 break;
2691 case 2:
2692 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2693 break;
2694 case 4:
2695 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2696 break;
2697 case 8:
2698 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2699 break;
2700 default:
2701 /* Unknown size, bail on compilation. */
2702 emit_error = 1;
2703 break;
2704 }
2705
2706 emit_ops_insns (buf, p - buf);
2707}
2708
2709/* Implementation of emit_ops method "emit_if_goto". */
2710
2711static void
2712aarch64_emit_if_goto (int *offset_p, int *size_p)
2713{
2714 uint32_t buf[16];
2715 uint32_t *p = buf;
2716
2717 /* The Z flag is set or cleared here. */
2718 p += emit_cmp (p, x0, immediate_operand (0));
2719 /* This instruction must not change the Z flag. */
2720 p += emit_pop (p, x0);
2721 /* Branch over the next instruction if x0 == 0. */
2722 p += emit_bcond (p, EQ, 8);
2723
2724 /* The NOP instruction will be patched with an unconditional branch. */
2725 if (offset_p)
2726 *offset_p = (p - buf) * 4;
2727 if (size_p)
2728 *size_p = 4;
2729 p += emit_nop (p);
2730
2731 emit_ops_insns (buf, p - buf);
2732}
2733
2734/* Implementation of emit_ops method "emit_goto". */
2735
2736static void
2737aarch64_emit_goto (int *offset_p, int *size_p)
2738{
2739 uint32_t buf[16];
2740 uint32_t *p = buf;
2741
2742 /* The NOP instruction will be patched with an unconditional branch. */
2743 if (offset_p)
2744 *offset_p = 0;
2745 if (size_p)
2746 *size_p = 4;
2747 p += emit_nop (p);
2748
2749 emit_ops_insns (buf, p - buf);
2750}
2751
2752/* Implementation of emit_ops method "write_goto_address". */
2753
bb1183e2 2754static void
afbe19f8
PL
2755aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2756{
2757 uint32_t insn;
2758
2759 emit_b (&insn, 0, to - from);
2760 append_insns (&from, 1, &insn);
2761}
2762
2763/* Implementation of emit_ops method "emit_const". */
2764
2765static void
2766aarch64_emit_const (LONGEST num)
2767{
2768 uint32_t buf[16];
2769 uint32_t *p = buf;
2770
2771 p += emit_mov_addr (p, x0, num);
2772
2773 emit_ops_insns (buf, p - buf);
2774}
2775
2776/* Implementation of emit_ops method "emit_call". */
2777
2778static void
2779aarch64_emit_call (CORE_ADDR fn)
2780{
2781 uint32_t buf[16];
2782 uint32_t *p = buf;
2783
2784 p += emit_mov_addr (p, ip0, fn);
2785 p += emit_blr (p, ip0);
2786
2787 emit_ops_insns (buf, p - buf);
2788}
2789
2790/* Implementation of emit_ops method "emit_reg". */
2791
2792static void
2793aarch64_emit_reg (int reg)
2794{
2795 uint32_t buf[16];
2796 uint32_t *p = buf;
2797
2798 /* Set x0 to unsigned char *regs. */
2799 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2800 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2801 p += emit_mov (p, x1, immediate_operand (reg));
2802
2803 emit_ops_insns (buf, p - buf);
2804
2805 aarch64_emit_call (get_raw_reg_func_addr ());
2806}
2807
2808/* Implementation of emit_ops method "emit_pop". */
2809
2810static void
2811aarch64_emit_pop (void)
2812{
2813 uint32_t buf[16];
2814 uint32_t *p = buf;
2815
2816 p += emit_pop (p, x0);
2817
2818 emit_ops_insns (buf, p - buf);
2819}
2820
2821/* Implementation of emit_ops method "emit_stack_flush". */
2822
2823static void
2824aarch64_emit_stack_flush (void)
2825{
2826 uint32_t buf[16];
2827 uint32_t *p = buf;
2828
2829 p += emit_push (p, x0);
2830
2831 emit_ops_insns (buf, p - buf);
2832}
2833
2834/* Implementation of emit_ops method "emit_zero_ext". */
2835
2836static void
2837aarch64_emit_zero_ext (int arg)
2838{
2839 uint32_t buf[16];
2840 uint32_t *p = buf;
2841
2842 p += emit_ubfx (p, x0, x0, 0, arg);
2843
2844 emit_ops_insns (buf, p - buf);
2845}
2846
2847/* Implementation of emit_ops method "emit_swap". */
2848
2849static void
2850aarch64_emit_swap (void)
2851{
2852 uint32_t buf[16];
2853 uint32_t *p = buf;
2854
2855 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2856 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2857 p += emit_mov (p, x0, register_operand (x1));
2858
2859 emit_ops_insns (buf, p - buf);
2860}
2861
2862/* Implementation of emit_ops method "emit_stack_adjust". */
2863
2864static void
2865aarch64_emit_stack_adjust (int n)
2866{
2867 /* This is not needed with our design. */
2868 uint32_t buf[16];
2869 uint32_t *p = buf;
2870
2871 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2872
2873 emit_ops_insns (buf, p - buf);
2874}
2875
2876/* Implementation of emit_ops method "emit_int_call_1". */
2877
2878static void
2879aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2880{
2881 uint32_t buf[16];
2882 uint32_t *p = buf;
2883
2884 p += emit_mov (p, x0, immediate_operand (arg1));
2885
2886 emit_ops_insns (buf, p - buf);
2887
2888 aarch64_emit_call (fn);
2889}
2890
2891/* Implementation of emit_ops method "emit_void_call_2". */
2892
2893static void
2894aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2895{
2896 uint32_t buf[16];
2897 uint32_t *p = buf;
2898
2899 /* Push x0 on the stack. */
2900 aarch64_emit_stack_flush ();
2901
2902 /* Setup arguments for the function call:
2903
2904 x0: arg1
2905 x1: top of the stack
2906
2907 MOV x1, x0
2908 MOV x0, #arg1 */
2909
2910 p += emit_mov (p, x1, register_operand (x0));
2911 p += emit_mov (p, x0, immediate_operand (arg1));
2912
2913 emit_ops_insns (buf, p - buf);
2914
2915 aarch64_emit_call (fn);
2916
2917 /* Restore x0. */
2918 aarch64_emit_pop ();
2919}
2920
2921/* Implementation of emit_ops method "emit_eq_goto". */
2922
2923static void
2924aarch64_emit_eq_goto (int *offset_p, int *size_p)
2925{
2926 uint32_t buf[16];
2927 uint32_t *p = buf;
2928
2929 p += emit_pop (p, x1);
2930 p += emit_cmp (p, x1, register_operand (x0));
2931 /* Branch over the next instruction if x0 != x1. */
2932 p += emit_bcond (p, NE, 8);
2933 /* The NOP instruction will be patched with an unconditional branch. */
2934 if (offset_p)
2935 *offset_p = (p - buf) * 4;
2936 if (size_p)
2937 *size_p = 4;
2938 p += emit_nop (p);
2939
2940 emit_ops_insns (buf, p - buf);
2941}
2942
2943/* Implementation of emit_ops method "emit_ne_goto". */
2944
2945static void
2946aarch64_emit_ne_goto (int *offset_p, int *size_p)
2947{
2948 uint32_t buf[16];
2949 uint32_t *p = buf;
2950
2951 p += emit_pop (p, x1);
2952 p += emit_cmp (p, x1, register_operand (x0));
2953 /* Branch over the next instruction if x0 == x1. */
2954 p += emit_bcond (p, EQ, 8);
2955 /* The NOP instruction will be patched with an unconditional branch. */
2956 if (offset_p)
2957 *offset_p = (p - buf) * 4;
2958 if (size_p)
2959 *size_p = 4;
2960 p += emit_nop (p);
2961
2962 emit_ops_insns (buf, p - buf);
2963}
2964
2965/* Implementation of emit_ops method "emit_lt_goto". */
2966
2967static void
2968aarch64_emit_lt_goto (int *offset_p, int *size_p)
2969{
2970 uint32_t buf[16];
2971 uint32_t *p = buf;
2972
2973 p += emit_pop (p, x1);
2974 p += emit_cmp (p, x1, register_operand (x0));
2975 /* Branch over the next instruction if x0 >= x1. */
2976 p += emit_bcond (p, GE, 8);
2977 /* The NOP instruction will be patched with an unconditional branch. */
2978 if (offset_p)
2979 *offset_p = (p - buf) * 4;
2980 if (size_p)
2981 *size_p = 4;
2982 p += emit_nop (p);
2983
2984 emit_ops_insns (buf, p - buf);
2985}
2986
2987/* Implementation of emit_ops method "emit_le_goto". */
2988
2989static void
2990aarch64_emit_le_goto (int *offset_p, int *size_p)
2991{
2992 uint32_t buf[16];
2993 uint32_t *p = buf;
2994
2995 p += emit_pop (p, x1);
2996 p += emit_cmp (p, x1, register_operand (x0));
2997 /* Branch over the next instruction if x0 > x1. */
2998 p += emit_bcond (p, GT, 8);
2999 /* The NOP instruction will be patched with an unconditional branch. */
3000 if (offset_p)
3001 *offset_p = (p - buf) * 4;
3002 if (size_p)
3003 *size_p = 4;
3004 p += emit_nop (p);
3005
3006 emit_ops_insns (buf, p - buf);
3007}
3008
3009/* Implementation of emit_ops method "emit_gt_goto". */
3010
3011static void
3012aarch64_emit_gt_goto (int *offset_p, int *size_p)
3013{
3014 uint32_t buf[16];
3015 uint32_t *p = buf;
3016
3017 p += emit_pop (p, x1);
3018 p += emit_cmp (p, x1, register_operand (x0));
3019 /* Branch over the next instruction if x0 <= x1. */
3020 p += emit_bcond (p, LE, 8);
3021 /* The NOP instruction will be patched with an unconditional branch. */
3022 if (offset_p)
3023 *offset_p = (p - buf) * 4;
3024 if (size_p)
3025 *size_p = 4;
3026 p += emit_nop (p);
3027
3028 emit_ops_insns (buf, p - buf);
3029}
3030
3031/* Implementation of emit_ops method "emit_ge_got". */
3032
3033static void
3034aarch64_emit_ge_got (int *offset_p, int *size_p)
3035{
3036 uint32_t buf[16];
3037 uint32_t *p = buf;
3038
3039 p += emit_pop (p, x1);
3040 p += emit_cmp (p, x1, register_operand (x0));
3041 /* Branch over the next instruction if x0 <= x1. */
3042 p += emit_bcond (p, LT, 8);
3043 /* The NOP instruction will be patched with an unconditional branch. */
3044 if (offset_p)
3045 *offset_p = (p - buf) * 4;
3046 if (size_p)
3047 *size_p = 4;
3048 p += emit_nop (p);
3049
3050 emit_ops_insns (buf, p - buf);
3051}
3052
3053static struct emit_ops aarch64_emit_ops_impl =
3054{
3055 aarch64_emit_prologue,
3056 aarch64_emit_epilogue,
3057 aarch64_emit_add,
3058 aarch64_emit_sub,
3059 aarch64_emit_mul,
3060 aarch64_emit_lsh,
3061 aarch64_emit_rsh_signed,
3062 aarch64_emit_rsh_unsigned,
3063 aarch64_emit_ext,
3064 aarch64_emit_log_not,
3065 aarch64_emit_bit_and,
3066 aarch64_emit_bit_or,
3067 aarch64_emit_bit_xor,
3068 aarch64_emit_bit_not,
3069 aarch64_emit_equal,
3070 aarch64_emit_less_signed,
3071 aarch64_emit_less_unsigned,
3072 aarch64_emit_ref,
3073 aarch64_emit_if_goto,
3074 aarch64_emit_goto,
3075 aarch64_write_goto_address,
3076 aarch64_emit_const,
3077 aarch64_emit_call,
3078 aarch64_emit_reg,
3079 aarch64_emit_pop,
3080 aarch64_emit_stack_flush,
3081 aarch64_emit_zero_ext,
3082 aarch64_emit_swap,
3083 aarch64_emit_stack_adjust,
3084 aarch64_emit_int_call_1,
3085 aarch64_emit_void_call_2,
3086 aarch64_emit_eq_goto,
3087 aarch64_emit_ne_goto,
3088 aarch64_emit_lt_goto,
3089 aarch64_emit_le_goto,
3090 aarch64_emit_gt_goto,
3091 aarch64_emit_ge_got,
3092};
3093
3094/* Implementation of linux_target_ops method "emit_ops". */
3095
3096static struct emit_ops *
3097aarch64_emit_ops (void)
3098{
3099 return &aarch64_emit_ops_impl;
3100}
3101
809a0c35 3102/* Implementation of target ops method
bb903df0
PL
3103 "get_min_fast_tracepoint_insn_len". */
3104
809a0c35
TBA
3105int
3106aarch64_target::get_min_fast_tracepoint_insn_len ()
bb903df0
PL
3107{
3108 return 4;
3109}
3110
d1d0aea1
PL
3111/* Implementation of linux_target_ops method "supports_range_stepping". */
3112
3113static int
3114aarch64_supports_range_stepping (void)
3115{
3116 return 1;
3117}
3118
3ca4edb6 3119/* Implementation of target ops method "sw_breakpoint_from_kind". */
dd373349 3120
3ca4edb6
TBA
3121const gdb_byte *
3122aarch64_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349 3123{
17b1509a
YQ
3124 if (is_64bit_tdesc ())
3125 {
3126 *size = aarch64_breakpoint_len;
3127 return aarch64_breakpoint;
3128 }
3129 else
3130 return arm_sw_breakpoint_from_kind (kind, size);
3131}
3132
06250e4e 3133/* Implementation of target ops method "breakpoint_kind_from_pc". */
17b1509a 3134
06250e4e
TBA
3135int
3136aarch64_target::breakpoint_kind_from_pc (CORE_ADDR *pcptr)
17b1509a
YQ
3137{
3138 if (is_64bit_tdesc ())
3139 return aarch64_breakpoint_len;
3140 else
3141 return arm_breakpoint_kind_from_pc (pcptr);
3142}
3143
06250e4e 3144/* Implementation of the target ops method
17b1509a
YQ
3145 "breakpoint_kind_from_current_state". */
3146
06250e4e
TBA
3147int
3148aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
17b1509a
YQ
3149{
3150 if (is_64bit_tdesc ())
3151 return aarch64_breakpoint_len;
3152 else
3153 return arm_breakpoint_kind_from_current_state (pcptr);
dd373349
AT
3154}
3155
7d00775e
AT
3156/* Support for hardware single step. */
3157
3158static int
3159aarch64_supports_hardware_single_step (void)
3160{
3161 return 1;
3162}
3163
176eb98c
MS
3164struct linux_target_ops the_low_target =
3165{
afbe19f8 3166 aarch64_emit_ops,
d1d0aea1 3167 aarch64_supports_range_stepping,
7d00775e 3168 aarch64_supports_hardware_single_step,
061fc021 3169 aarch64_get_syscall_trapinfo,
176eb98c 3170};
3aee8918 3171
ef0478f6
TBA
3172/* The linux target ops object. */
3173
3174linux_process_target *the_linux_target = &the_aarch64_target;
3175
3aee8918
PA
3176void
3177initialize_low_arch (void)
3178{
3b53ae99
YQ
3179 initialize_low_arch_aarch32 ();
3180
3aee8918 3181 initialize_regsets_info (&aarch64_regsets_info);
02895270 3182 initialize_regsets_info (&aarch64_sve_regsets_info);
3aee8918 3183}
This page took 0.748143 seconds and 4 git commands to generate.