gdbserver/linux-low: turn 'get_thread_area' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-aarch64-low.cc
CommitLineData
176eb98c
MS
1/* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
b811d2c2 4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
176eb98c
MS
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "server.h"
23#include "linux-low.h"
db3cb7cb 24#include "nat/aarch64-linux.h"
554717a3 25#include "nat/aarch64-linux-hw-point.h"
bb903df0 26#include "arch/aarch64-insn.h"
3b53ae99 27#include "linux-aarch32-low.h"
176eb98c 28#include "elf/common.h"
afbe19f8
PL
29#include "ax.h"
30#include "tracepoint.h"
f9d949fb 31#include "debug.h"
176eb98c
MS
32
33#include <signal.h>
34#include <sys/user.h>
5826e159 35#include "nat/gdb_ptrace.h"
e9dae05e 36#include <asm/ptrace.h>
bb903df0
PL
37#include <inttypes.h>
38#include <endian.h>
39#include <sys/uio.h>
176eb98c
MS
40
41#include "gdb_proc_service.h"
cc628f3d 42#include "arch/aarch64.h"
7cc17433 43#include "linux-aarch32-tdesc.h"
d6d7ce56 44#include "linux-aarch64-tdesc.h"
fefa175e 45#include "nat/aarch64-sve-linux-ptrace.h"
02895270 46#include "tdesc.h"
176eb98c 47
176eb98c
MS
48#ifdef HAVE_SYS_REG_H
49#include <sys/reg.h>
50#endif
51
ef0478f6
TBA
52/* Linux target op definitions for the AArch64 architecture. */
53
54class aarch64_target : public linux_process_target
55{
56public:
57
aa8d21c9
TBA
58 const regs_info *get_regs_info () override;
59
06250e4e
TBA
60 int breakpoint_kind_from_pc (CORE_ADDR *pcptr) override;
61
62 int breakpoint_kind_from_current_state (CORE_ADDR *pcptr) override;
63
3ca4edb6
TBA
64 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
65
007c9b97
TBA
66 bool supports_z_point_type (char z_type) override;
67
47f70aa7
TBA
68 bool supports_tracepoints () override;
69
797bcff5
TBA
70protected:
71
72 void low_arch_setup () override;
daca57a7
TBA
73
74 bool low_cannot_fetch_register (int regno) override;
75
76 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
77
78 bool low_supports_breakpoints () override;
79
80 CORE_ADDR low_get_pc (regcache *regcache) override;
81
82 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
d7146cda
TBA
83
84 bool low_breakpoint_at (CORE_ADDR pc) override;
9db9aa23
TBA
85
86 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
87 int size, raw_breakpoint *bp) override;
88
89 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
90 int size, raw_breakpoint *bp) override;
ac1bbaca
TBA
91
92 bool low_stopped_by_watchpoint () override;
93
94 CORE_ADDR low_stopped_data_address () override;
cb63de7c
TBA
95
96 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
97 int direction) override;
fd000fb3
TBA
98
99 arch_process_info *low_new_process () override;
100
101 void low_delete_process (arch_process_info *info) override;
102
103 void low_new_thread (lwp_info *) override;
104
105 void low_delete_thread (arch_lwp_info *) override;
106
107 void low_new_fork (process_info *parent, process_info *child) override;
d7599cc0
TBA
108
109 void low_prepare_to_resume (lwp_info *lwp) override;
13e567af
TBA
110
111 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
ef0478f6
TBA
112};
113
114/* The singleton target ops object. */
115
116static aarch64_target the_aarch64_target;
117
daca57a7
TBA
118bool
119aarch64_target::low_cannot_fetch_register (int regno)
120{
121 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
122 "is not implemented by the target");
123}
124
125bool
126aarch64_target::low_cannot_store_register (int regno)
127{
128 gdb_assert_not_reached ("linux target op low_cannot_store_register "
129 "is not implemented by the target");
130}
131
d7599cc0
TBA
132void
133aarch64_target::low_prepare_to_resume (lwp_info *lwp)
134{
135 aarch64_linux_prepare_to_resume (lwp);
136}
137
176eb98c
MS
138/* Per-process arch-specific data we want to keep. */
139
140struct arch_process_info
141{
142 /* Hardware breakpoint/watchpoint data.
143 The reason for them to be per-process rather than per-thread is
144 due to the lack of information in the gdbserver environment;
145 gdbserver is not told that whether a requested hardware
146 breakpoint/watchpoint is thread specific or not, so it has to set
147 each hw bp/wp for every thread in the current process. The
148 higher level bp/wp management in gdb will resume a thread if a hw
149 bp/wp trap is not expected for it. Since the hw bp/wp setting is
150 same for each thread, it is reasonable for the data to live here.
151 */
152 struct aarch64_debug_reg_state debug_reg_state;
153};
154
3b53ae99
YQ
155/* Return true if the size of register 0 is 8 byte. */
156
157static int
158is_64bit_tdesc (void)
159{
160 struct regcache *regcache = get_thread_regcache (current_thread, 0);
161
162 return register_size (regcache->tdesc, 0) == 8;
163}
164
02895270
AH
165/* Return true if the regcache contains the number of SVE registers. */
166
167static bool
168is_sve_tdesc (void)
169{
170 struct regcache *regcache = get_thread_regcache (current_thread, 0);
171
6cdd651f 172 return tdesc_contains_feature (regcache->tdesc, "org.gnu.gdb.aarch64.sve");
02895270
AH
173}
174
176eb98c
MS
175static void
176aarch64_fill_gregset (struct regcache *regcache, void *buf)
177{
6a69a054 178 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
176eb98c
MS
179 int i;
180
181 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
182 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
183 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
184 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
185 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
186}
187
188static void
189aarch64_store_gregset (struct regcache *regcache, const void *buf)
190{
6a69a054 191 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
176eb98c
MS
192 int i;
193
194 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
195 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
196 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
197 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
198 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
199}
200
201static void
202aarch64_fill_fpregset (struct regcache *regcache, void *buf)
203{
9caa3311 204 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
176eb98c
MS
205 int i;
206
207 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
208 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
209 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
210 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
211}
212
213static void
214aarch64_store_fpregset (struct regcache *regcache, const void *buf)
215{
9caa3311
YQ
216 const struct user_fpsimd_state *regset
217 = (const struct user_fpsimd_state *) buf;
176eb98c
MS
218 int i;
219
220 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
221 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
222 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
223 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
224}
225
1ef53e6b
AH
226/* Store the pauth registers to regcache. */
227
228static void
229aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
230{
231 uint64_t *pauth_regset = (uint64_t *) buf;
232 int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
233
234 if (pauth_base == 0)
235 return;
236
237 supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
238 &pauth_regset[0]);
239 supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
240 &pauth_regset[1]);
241}
242
bf9ae9d8
TBA
243bool
244aarch64_target::low_supports_breakpoints ()
245{
246 return true;
247}
248
249/* Implementation of linux target ops method "low_get_pc". */
421530db 250
bf9ae9d8
TBA
251CORE_ADDR
252aarch64_target::low_get_pc (regcache *regcache)
176eb98c 253{
8a7e4587 254 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 255 return linux_get_pc_64bit (regcache);
8a7e4587 256 else
a5652c21 257 return linux_get_pc_32bit (regcache);
176eb98c
MS
258}
259
bf9ae9d8 260/* Implementation of linux target ops method "low_set_pc". */
421530db 261
bf9ae9d8
TBA
262void
263aarch64_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
176eb98c 264{
8a7e4587 265 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 266 linux_set_pc_64bit (regcache, pc);
8a7e4587 267 else
a5652c21 268 linux_set_pc_32bit (regcache, pc);
176eb98c
MS
269}
270
176eb98c
MS
271#define aarch64_breakpoint_len 4
272
37d66942
PL
273/* AArch64 BRK software debug mode instruction.
274 This instruction needs to match gdb/aarch64-tdep.c
275 (aarch64_default_breakpoint). */
276static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
176eb98c 277
d7146cda 278/* Implementation of linux target ops method "low_breakpoint_at". */
421530db 279
d7146cda
TBA
280bool
281aarch64_target::low_breakpoint_at (CORE_ADDR where)
176eb98c 282{
db91f502
YQ
283 if (is_64bit_tdesc ())
284 {
285 gdb_byte insn[aarch64_breakpoint_len];
176eb98c 286
d7146cda 287 read_memory (where, (unsigned char *) &insn, aarch64_breakpoint_len);
db91f502 288 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
d7146cda 289 return true;
176eb98c 290
d7146cda 291 return false;
db91f502
YQ
292 }
293 else
294 return arm_breakpoint_at (where);
176eb98c
MS
295}
296
176eb98c
MS
297static void
298aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
299{
300 int i;
301
302 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
303 {
304 state->dr_addr_bp[i] = 0;
305 state->dr_ctrl_bp[i] = 0;
306 state->dr_ref_count_bp[i] = 0;
307 }
308
309 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
310 {
311 state->dr_addr_wp[i] = 0;
312 state->dr_ctrl_wp[i] = 0;
313 state->dr_ref_count_wp[i] = 0;
314 }
315}
316
176eb98c
MS
317/* Return the pointer to the debug register state structure in the
318 current process' arch-specific data area. */
319
db3cb7cb 320struct aarch64_debug_reg_state *
88e2cf7e 321aarch64_get_debug_reg_state (pid_t pid)
176eb98c 322{
88e2cf7e 323 struct process_info *proc = find_process_pid (pid);
176eb98c 324
fe978cb0 325 return &proc->priv->arch_private->debug_reg_state;
176eb98c
MS
326}
327
007c9b97 328/* Implementation of target ops method "supports_z_point_type". */
421530db 329
007c9b97
TBA
330bool
331aarch64_target::supports_z_point_type (char z_type)
4ff0d3d8
PA
332{
333 switch (z_type)
334 {
96c97461 335 case Z_PACKET_SW_BP:
4ff0d3d8
PA
336 case Z_PACKET_HW_BP:
337 case Z_PACKET_WRITE_WP:
338 case Z_PACKET_READ_WP:
339 case Z_PACKET_ACCESS_WP:
007c9b97 340 return true;
4ff0d3d8 341 default:
007c9b97 342 return false;
4ff0d3d8
PA
343 }
344}
345
9db9aa23 346/* Implementation of linux target ops method "low_insert_point".
176eb98c 347
421530db
PL
348 It actually only records the info of the to-be-inserted bp/wp;
349 the actual insertion will happen when threads are resumed. */
176eb98c 350
9db9aa23
TBA
351int
352aarch64_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
353 int len, raw_breakpoint *bp)
176eb98c
MS
354{
355 int ret;
4ff0d3d8 356 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
357 struct aarch64_debug_reg_state *state
358 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 359
c5e92cca 360 if (show_debug_regs)
176eb98c
MS
361 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
362 (unsigned long) addr, len);
363
802e8e6d
PA
364 /* Determine the type from the raw breakpoint type. */
365 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
366
367 if (targ_type != hw_execute)
39edd165
YQ
368 {
369 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
370 ret = aarch64_handle_watchpoint (targ_type, addr, len,
371 1 /* is_insert */, state);
372 else
373 ret = -1;
374 }
176eb98c 375 else
8d689ee5
YQ
376 {
377 if (len == 3)
378 {
379 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
380 instruction. Set it to 2 to correctly encode length bit
381 mask in hardware/watchpoint control register. */
382 len = 2;
383 }
384 ret = aarch64_handle_breakpoint (targ_type, addr, len,
385 1 /* is_insert */, state);
386 }
176eb98c 387
60a191ed 388 if (show_debug_regs)
88e2cf7e
YQ
389 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
390 targ_type);
176eb98c
MS
391
392 return ret;
393}
394
9db9aa23 395/* Implementation of linux target ops method "low_remove_point".
176eb98c 396
421530db
PL
397 It actually only records the info of the to-be-removed bp/wp,
398 the actual removal will be done when threads are resumed. */
176eb98c 399
9db9aa23
TBA
400int
401aarch64_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
402 int len, raw_breakpoint *bp)
176eb98c
MS
403{
404 int ret;
4ff0d3d8 405 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
406 struct aarch64_debug_reg_state *state
407 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 408
c5e92cca 409 if (show_debug_regs)
176eb98c
MS
410 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
411 (unsigned long) addr, len);
412
802e8e6d
PA
413 /* Determine the type from the raw breakpoint type. */
414 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
415
416 /* Set up state pointers. */
417 if (targ_type != hw_execute)
418 ret =
c67ca4de
YQ
419 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
420 state);
176eb98c 421 else
8d689ee5
YQ
422 {
423 if (len == 3)
424 {
425 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
426 instruction. Set it to 2 to correctly encode length bit
427 mask in hardware/watchpoint control register. */
428 len = 2;
429 }
430 ret = aarch64_handle_breakpoint (targ_type, addr, len,
431 0 /* is_insert */, state);
432 }
176eb98c 433
60a191ed 434 if (show_debug_regs)
88e2cf7e
YQ
435 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
436 targ_type);
176eb98c
MS
437
438 return ret;
439}
440
ac1bbaca 441/* Implementation of linux target ops method "low_stopped_data_address". */
176eb98c 442
ac1bbaca
TBA
443CORE_ADDR
444aarch64_target::low_stopped_data_address ()
176eb98c
MS
445{
446 siginfo_t siginfo;
447 int pid, i;
448 struct aarch64_debug_reg_state *state;
449
0bfdf32f 450 pid = lwpid_of (current_thread);
176eb98c
MS
451
452 /* Get the siginfo. */
453 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
454 return (CORE_ADDR) 0;
455
456 /* Need to be a hardware breakpoint/watchpoint trap. */
457 if (siginfo.si_signo != SIGTRAP
458 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
459 return (CORE_ADDR) 0;
460
461 /* Check if the address matches any watched address. */
88e2cf7e 462 state = aarch64_get_debug_reg_state (pid_of (current_thread));
176eb98c
MS
463 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
464 {
a3b60e45
JK
465 const unsigned int offset
466 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
176eb98c
MS
467 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
468 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
a3b60e45
JK
469 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
470 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
471 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
472
176eb98c
MS
473 if (state->dr_ref_count_wp[i]
474 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
a3b60e45 475 && addr_trap >= addr_watch_aligned
176eb98c 476 && addr_trap < addr_watch + len)
a3b60e45
JK
477 {
478 /* ADDR_TRAP reports the first address of the memory range
479 accessed by the CPU, regardless of what was the memory
480 range watched. Thus, a large CPU access that straddles
481 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
482 ADDR_TRAP that is lower than the
483 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
484
485 addr: | 4 | 5 | 6 | 7 | 8 |
486 |---- range watched ----|
487 |----------- range accessed ------------|
488
489 In this case, ADDR_TRAP will be 4.
490
491 To match a watchpoint known to GDB core, we must never
492 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
493 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
494 positive on kernels older than 4.10. See PR
495 external/20207. */
496 return addr_orig;
497 }
176eb98c
MS
498 }
499
500 return (CORE_ADDR) 0;
501}
502
ac1bbaca 503/* Implementation of linux target ops method "low_stopped_by_watchpoint". */
176eb98c 504
ac1bbaca
TBA
505bool
506aarch64_target::low_stopped_by_watchpoint ()
176eb98c 507{
ac1bbaca 508 return (low_stopped_data_address () != 0);
176eb98c
MS
509}
510
511/* Fetch the thread-local storage pointer for libthread_db. */
512
513ps_err_e
754653a7 514ps_get_thread_area (struct ps_prochandle *ph,
176eb98c
MS
515 lwpid_t lwpid, int idx, void **base)
516{
a0cc84cd
YQ
517 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
518 is_64bit_tdesc ());
176eb98c
MS
519}
520
cb63de7c 521/* Implementation of linux target ops method "low_siginfo_fixup". */
ade90bde 522
cb63de7c
TBA
523bool
524aarch64_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
525 int direction)
ade90bde
YQ
526{
527 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
528 if (!is_64bit_tdesc ())
529 {
530 if (direction == 0)
531 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
532 native);
533 else
534 aarch64_siginfo_from_compat_siginfo (native,
535 (struct compat_siginfo *) inf);
536
cb63de7c 537 return true;
ade90bde
YQ
538 }
539
cb63de7c 540 return false;
ade90bde
YQ
541}
542
fd000fb3 543/* Implementation of linux target ops method "low_new_process". */
176eb98c 544
fd000fb3
TBA
545arch_process_info *
546aarch64_target::low_new_process ()
176eb98c 547{
8d749320 548 struct arch_process_info *info = XCNEW (struct arch_process_info);
176eb98c
MS
549
550 aarch64_init_debug_reg_state (&info->debug_reg_state);
551
552 return info;
553}
554
fd000fb3 555/* Implementation of linux target ops method "low_delete_process". */
04ec7890 556
fd000fb3
TBA
557void
558aarch64_target::low_delete_process (arch_process_info *info)
04ec7890
SM
559{
560 xfree (info);
561}
562
fd000fb3
TBA
563void
564aarch64_target::low_new_thread (lwp_info *lwp)
565{
566 aarch64_linux_new_thread (lwp);
567}
421530db 568
fd000fb3
TBA
569void
570aarch64_target::low_delete_thread (arch_lwp_info *arch_lwp)
571{
572 aarch64_linux_delete_thread (arch_lwp);
573}
574
575/* Implementation of linux target ops method "low_new_fork". */
576
577void
578aarch64_target::low_new_fork (process_info *parent,
579 process_info *child)
3a8a0396
DB
580{
581 /* These are allocated by linux_add_process. */
61a7418c
DB
582 gdb_assert (parent->priv != NULL
583 && parent->priv->arch_private != NULL);
584 gdb_assert (child->priv != NULL
585 && child->priv->arch_private != NULL);
3a8a0396
DB
586
587 /* Linux kernel before 2.6.33 commit
588 72f674d203cd230426437cdcf7dd6f681dad8b0d
589 will inherit hardware debug registers from parent
590 on fork/vfork/clone. Newer Linux kernels create such tasks with
591 zeroed debug registers.
592
593 GDB core assumes the child inherits the watchpoints/hw
594 breakpoints of the parent, and will remove them all from the
595 forked off process. Copy the debug registers mirrors into the
596 new process so that all breakpoints and watchpoints can be
597 removed together. The debug registers mirror will become zeroed
598 in the end before detaching the forked off process, thus making
599 this compatible with older Linux kernels too. */
600
61a7418c 601 *child->priv->arch_private = *parent->priv->arch_private;
3a8a0396
DB
602}
603
ee4fbcfa
AH
604/* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
605#define AARCH64_HWCAP_PACA (1 << 30)
606
797bcff5 607/* Implementation of linux target ops method "low_arch_setup". */
3b53ae99 608
797bcff5
TBA
609void
610aarch64_target::low_arch_setup ()
3b53ae99
YQ
611{
612 unsigned int machine;
613 int is_elf64;
614 int tid;
615
616 tid = lwpid_of (current_thread);
617
618 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
619
620 if (is_elf64)
fefa175e
AH
621 {
622 uint64_t vq = aarch64_sve_get_vq (tid);
974c89e0
AH
623 unsigned long hwcap = linux_get_hwcap (8);
624 bool pauth_p = hwcap & AARCH64_HWCAP_PACA;
ee4fbcfa
AH
625
626 current_process ()->tdesc = aarch64_linux_read_description (vq, pauth_p);
fefa175e 627 }
3b53ae99 628 else
7cc17433 629 current_process ()->tdesc = aarch32_linux_read_description ();
176eb98c 630
af1b22f3 631 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
176eb98c
MS
632}
633
02895270
AH
634/* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
635
636static void
637aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
638{
639 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
640}
641
642/* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
643
644static void
645aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
646{
647 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
648}
649
3aee8918 650static struct regset_info aarch64_regsets[] =
176eb98c
MS
651{
652 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
653 sizeof (struct user_pt_regs), GENERAL_REGS,
654 aarch64_fill_gregset, aarch64_store_gregset },
655 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
656 sizeof (struct user_fpsimd_state), FP_REGS,
657 aarch64_fill_fpregset, aarch64_store_fpregset
658 },
1ef53e6b
AH
659 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
660 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
661 NULL, aarch64_store_pauthregset },
50bc912a 662 NULL_REGSET
176eb98c
MS
663};
664
3aee8918
PA
665static struct regsets_info aarch64_regsets_info =
666 {
667 aarch64_regsets, /* regsets */
668 0, /* num_regsets */
669 NULL, /* disabled_regsets */
670 };
671
3b53ae99 672static struct regs_info regs_info_aarch64 =
3aee8918
PA
673 {
674 NULL, /* regset_bitmap */
c2d65f38 675 NULL, /* usrregs */
3aee8918
PA
676 &aarch64_regsets_info,
677 };
678
02895270
AH
679static struct regset_info aarch64_sve_regsets[] =
680{
681 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
682 sizeof (struct user_pt_regs), GENERAL_REGS,
683 aarch64_fill_gregset, aarch64_store_gregset },
684 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
685 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
686 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
687 },
1ef53e6b
AH
688 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
689 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
690 NULL, aarch64_store_pauthregset },
02895270
AH
691 NULL_REGSET
692};
693
694static struct regsets_info aarch64_sve_regsets_info =
695 {
696 aarch64_sve_regsets, /* regsets. */
697 0, /* num_regsets. */
698 NULL, /* disabled_regsets. */
699 };
700
701static struct regs_info regs_info_aarch64_sve =
702 {
703 NULL, /* regset_bitmap. */
704 NULL, /* usrregs. */
705 &aarch64_sve_regsets_info,
706 };
707
aa8d21c9 708/* Implementation of linux target ops method "get_regs_info". */
421530db 709
aa8d21c9
TBA
710const regs_info *
711aarch64_target::get_regs_info ()
3aee8918 712{
02895270 713 if (!is_64bit_tdesc ())
3b53ae99 714 return &regs_info_aarch32;
02895270
AH
715
716 if (is_sve_tdesc ())
717 return &regs_info_aarch64_sve;
718
719 return &regs_info_aarch64;
3aee8918
PA
720}
721
47f70aa7 722/* Implementation of target ops method "supports_tracepoints". */
7671bf47 723
47f70aa7
TBA
724bool
725aarch64_target::supports_tracepoints ()
7671bf47 726{
524b57e6 727 if (current_thread == NULL)
47f70aa7 728 return true;
524b57e6
YQ
729 else
730 {
731 /* We don't support tracepoints on aarch32 now. */
732 return is_64bit_tdesc ();
733 }
7671bf47
PL
734}
735
13e567af 736/* Implementation of linux target ops method "low_get_thread_area". */
bb903df0 737
13e567af
TBA
738int
739aarch64_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
bb903df0
PL
740{
741 struct iovec iovec;
742 uint64_t reg;
743
744 iovec.iov_base = &reg;
745 iovec.iov_len = sizeof (reg);
746
747 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
748 return -1;
749
750 *addrp = reg;
751
752 return 0;
753}
754
061fc021
YQ
755/* Implementation of linux_target_ops method "get_syscall_trapinfo". */
756
757static void
758aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
759{
760 int use_64bit = register_size (regcache->tdesc, 0) == 8;
761
762 if (use_64bit)
763 {
764 long l_sysno;
765
766 collect_register_by_name (regcache, "x8", &l_sysno);
767 *sysno = (int) l_sysno;
768 }
769 else
770 collect_register_by_name (regcache, "r7", sysno);
771}
772
afbe19f8
PL
773/* List of condition codes that we need. */
774
775enum aarch64_condition_codes
776{
777 EQ = 0x0,
778 NE = 0x1,
779 LO = 0x3,
780 GE = 0xa,
781 LT = 0xb,
782 GT = 0xc,
783 LE = 0xd,
bb903df0
PL
784};
785
6c1c9a8b
YQ
786enum aarch64_operand_type
787{
788 OPERAND_IMMEDIATE,
789 OPERAND_REGISTER,
790};
791
bb903df0
PL
792/* Representation of an operand. At this time, it only supports register
793 and immediate types. */
794
795struct aarch64_operand
796{
797 /* Type of the operand. */
6c1c9a8b
YQ
798 enum aarch64_operand_type type;
799
bb903df0
PL
800 /* Value of the operand according to the type. */
801 union
802 {
803 uint32_t imm;
804 struct aarch64_register reg;
805 };
806};
807
808/* List of registers that we are currently using, we can add more here as
809 we need to use them. */
810
811/* General purpose scratch registers (64 bit). */
812static const struct aarch64_register x0 = { 0, 1 };
813static const struct aarch64_register x1 = { 1, 1 };
814static const struct aarch64_register x2 = { 2, 1 };
815static const struct aarch64_register x3 = { 3, 1 };
816static const struct aarch64_register x4 = { 4, 1 };
817
818/* General purpose scratch registers (32 bit). */
afbe19f8 819static const struct aarch64_register w0 = { 0, 0 };
bb903df0
PL
820static const struct aarch64_register w2 = { 2, 0 };
821
822/* Intra-procedure scratch registers. */
823static const struct aarch64_register ip0 = { 16, 1 };
824
825/* Special purpose registers. */
afbe19f8
PL
826static const struct aarch64_register fp = { 29, 1 };
827static const struct aarch64_register lr = { 30, 1 };
bb903df0
PL
828static const struct aarch64_register sp = { 31, 1 };
829static const struct aarch64_register xzr = { 31, 1 };
830
831/* Dynamically allocate a new register. If we know the register
832 statically, we should make it a global as above instead of using this
833 helper function. */
834
835static struct aarch64_register
836aarch64_register (unsigned num, int is64)
837{
838 return (struct aarch64_register) { num, is64 };
839}
840
841/* Helper function to create a register operand, for instructions with
842 different types of operands.
843
844 For example:
845 p += emit_mov (p, x0, register_operand (x1)); */
846
847static struct aarch64_operand
848register_operand (struct aarch64_register reg)
849{
850 struct aarch64_operand operand;
851
852 operand.type = OPERAND_REGISTER;
853 operand.reg = reg;
854
855 return operand;
856}
857
858/* Helper function to create an immediate operand, for instructions with
859 different types of operands.
860
861 For example:
862 p += emit_mov (p, x0, immediate_operand (12)); */
863
864static struct aarch64_operand
865immediate_operand (uint32_t imm)
866{
867 struct aarch64_operand operand;
868
869 operand.type = OPERAND_IMMEDIATE;
870 operand.imm = imm;
871
872 return operand;
873}
874
bb903df0
PL
875/* Helper function to create an offset memory operand.
876
877 For example:
878 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
879
880static struct aarch64_memory_operand
881offset_memory_operand (int32_t offset)
882{
883 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
884}
885
886/* Helper function to create a pre-index memory operand.
887
888 For example:
889 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
890
891static struct aarch64_memory_operand
892preindex_memory_operand (int32_t index)
893{
894 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
895}
896
afbe19f8
PL
897/* Helper function to create a post-index memory operand.
898
899 For example:
900 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
901
902static struct aarch64_memory_operand
903postindex_memory_operand (int32_t index)
904{
905 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
906}
907
bb903df0
PL
908/* System control registers. These special registers can be written and
909 read with the MRS and MSR instructions.
910
911 - NZCV: Condition flags. GDB refers to this register under the CPSR
912 name.
913 - FPSR: Floating-point status register.
914 - FPCR: Floating-point control registers.
915 - TPIDR_EL0: Software thread ID register. */
916
917enum aarch64_system_control_registers
918{
919 /* op0 op1 crn crm op2 */
920 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
921 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
922 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
923 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
924};
925
bb903df0
PL
926/* Write a BLR instruction into *BUF.
927
928 BLR rn
929
930 RN is the register to branch to. */
931
932static int
933emit_blr (uint32_t *buf, struct aarch64_register rn)
934{
e1c587c3 935 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
bb903df0
PL
936}
937
afbe19f8 938/* Write a RET instruction into *BUF.
bb903df0 939
afbe19f8 940 RET xn
bb903df0 941
afbe19f8 942 RN is the register to branch to. */
bb903df0
PL
943
944static int
afbe19f8
PL
945emit_ret (uint32_t *buf, struct aarch64_register rn)
946{
e1c587c3 947 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
afbe19f8
PL
948}
949
950static int
951emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
952 struct aarch64_register rt,
953 struct aarch64_register rt2,
954 struct aarch64_register rn,
955 struct aarch64_memory_operand operand)
bb903df0
PL
956{
957 uint32_t opc;
958 uint32_t pre_index;
959 uint32_t write_back;
960
961 if (rt.is64)
962 opc = ENCODE (2, 2, 30);
963 else
964 opc = ENCODE (0, 2, 30);
965
966 switch (operand.type)
967 {
968 case MEMORY_OPERAND_OFFSET:
969 {
970 pre_index = ENCODE (1, 1, 24);
971 write_back = ENCODE (0, 1, 23);
972 break;
973 }
afbe19f8
PL
974 case MEMORY_OPERAND_POSTINDEX:
975 {
976 pre_index = ENCODE (0, 1, 24);
977 write_back = ENCODE (1, 1, 23);
978 break;
979 }
bb903df0
PL
980 case MEMORY_OPERAND_PREINDEX:
981 {
982 pre_index = ENCODE (1, 1, 24);
983 write_back = ENCODE (1, 1, 23);
984 break;
985 }
986 default:
987 return 0;
988 }
989
e1c587c3
YQ
990 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
991 | ENCODE (operand.index >> 3, 7, 15)
992 | ENCODE (rt2.num, 5, 10)
993 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
994}
995
afbe19f8
PL
996/* Write a STP instruction into *BUF.
997
998 STP rt, rt2, [rn, #offset]
999 STP rt, rt2, [rn, #index]!
1000 STP rt, rt2, [rn], #index
1001
1002 RT and RT2 are the registers to store.
1003 RN is the base address register.
1004 OFFSET is the immediate to add to the base address. It is limited to a
1005 -512 .. 504 range (7 bits << 3). */
1006
1007static int
1008emit_stp (uint32_t *buf, struct aarch64_register rt,
1009 struct aarch64_register rt2, struct aarch64_register rn,
1010 struct aarch64_memory_operand operand)
1011{
1012 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
1013}
1014
1015/* Write a LDP instruction into *BUF.
1016
1017 LDP rt, rt2, [rn, #offset]
1018 LDP rt, rt2, [rn, #index]!
1019 LDP rt, rt2, [rn], #index
1020
1021 RT and RT2 are the registers to store.
1022 RN is the base address register.
1023 OFFSET is the immediate to add to the base address. It is limited to a
1024 -512 .. 504 range (7 bits << 3). */
1025
1026static int
1027emit_ldp (uint32_t *buf, struct aarch64_register rt,
1028 struct aarch64_register rt2, struct aarch64_register rn,
1029 struct aarch64_memory_operand operand)
1030{
1031 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
1032}
1033
bb903df0
PL
1034/* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
1035
1036 LDP qt, qt2, [rn, #offset]
1037
1038 RT and RT2 are the Q registers to store.
1039 RN is the base address register.
1040 OFFSET is the immediate to add to the base address. It is limited to
1041 -1024 .. 1008 range (7 bits << 4). */
1042
1043static int
1044emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1045 struct aarch64_register rn, int32_t offset)
1046{
1047 uint32_t opc = ENCODE (2, 2, 30);
1048 uint32_t pre_index = ENCODE (1, 1, 24);
1049
e1c587c3
YQ
1050 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
1051 | ENCODE (offset >> 4, 7, 15)
1052 | ENCODE (rt2, 5, 10)
1053 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1054}
1055
1056/* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1057
1058 STP qt, qt2, [rn, #offset]
1059
1060 RT and RT2 are the Q registers to store.
1061 RN is the base address register.
1062 OFFSET is the immediate to add to the base address. It is limited to
1063 -1024 .. 1008 range (7 bits << 4). */
1064
1065static int
1066emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1067 struct aarch64_register rn, int32_t offset)
1068{
1069 uint32_t opc = ENCODE (2, 2, 30);
1070 uint32_t pre_index = ENCODE (1, 1, 24);
1071
e1c587c3 1072 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
b6542f81
YQ
1073 | ENCODE (offset >> 4, 7, 15)
1074 | ENCODE (rt2, 5, 10)
1075 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1076}
1077
afbe19f8
PL
1078/* Write a LDRH instruction into *BUF.
1079
1080 LDRH wt, [xn, #offset]
1081 LDRH wt, [xn, #index]!
1082 LDRH wt, [xn], #index
1083
1084 RT is the register to store.
1085 RN is the base address register.
1086 OFFSET is the immediate to add to the base address. It is limited to
1087 0 .. 32760 range (12 bits << 3). */
1088
1089static int
1090emit_ldrh (uint32_t *buf, struct aarch64_register rt,
1091 struct aarch64_register rn,
1092 struct aarch64_memory_operand operand)
1093{
1c2e1515 1094 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
afbe19f8
PL
1095}
1096
1097/* Write a LDRB instruction into *BUF.
1098
1099 LDRB wt, [xn, #offset]
1100 LDRB wt, [xn, #index]!
1101 LDRB wt, [xn], #index
1102
1103 RT is the register to store.
1104 RN is the base address register.
1105 OFFSET is the immediate to add to the base address. It is limited to
1106 0 .. 32760 range (12 bits << 3). */
1107
1108static int
1109emit_ldrb (uint32_t *buf, struct aarch64_register rt,
1110 struct aarch64_register rn,
1111 struct aarch64_memory_operand operand)
1112{
1c2e1515 1113 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
afbe19f8
PL
1114}
1115
bb903df0 1116
bb903df0
PL
1117
1118/* Write a STR instruction into *BUF.
1119
1120 STR rt, [rn, #offset]
1121 STR rt, [rn, #index]!
afbe19f8 1122 STR rt, [rn], #index
bb903df0
PL
1123
1124 RT is the register to store.
1125 RN is the base address register.
1126 OFFSET is the immediate to add to the base address. It is limited to
1127 0 .. 32760 range (12 bits << 3). */
1128
1129static int
1130emit_str (uint32_t *buf, struct aarch64_register rt,
1131 struct aarch64_register rn,
1132 struct aarch64_memory_operand operand)
1133{
1c2e1515 1134 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
bb903df0
PL
1135}
1136
1137/* Helper function emitting an exclusive load or store instruction. */
1138
1139static int
1140emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1141 enum aarch64_opcodes opcode,
1142 struct aarch64_register rs,
1143 struct aarch64_register rt,
1144 struct aarch64_register rt2,
1145 struct aarch64_register rn)
1146{
e1c587c3
YQ
1147 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1148 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1149 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
1150}
1151
1152/* Write a LAXR instruction into *BUF.
1153
1154 LDAXR rt, [xn]
1155
1156 RT is the destination register.
1157 RN is the base address register. */
1158
1159static int
1160emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1161 struct aarch64_register rn)
1162{
1163 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1164 xzr, rn);
1165}
1166
1167/* Write a STXR instruction into *BUF.
1168
1169 STXR ws, rt, [xn]
1170
1171 RS is the result register, it indicates if the store succeeded or not.
1172 RT is the destination register.
1173 RN is the base address register. */
1174
1175static int
1176emit_stxr (uint32_t *buf, struct aarch64_register rs,
1177 struct aarch64_register rt, struct aarch64_register rn)
1178{
1179 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1180 xzr, rn);
1181}
1182
1183/* Write a STLR instruction into *BUF.
1184
1185 STLR rt, [xn]
1186
1187 RT is the register to store.
1188 RN is the base address register. */
1189
1190static int
1191emit_stlr (uint32_t *buf, struct aarch64_register rt,
1192 struct aarch64_register rn)
1193{
1194 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1195 xzr, rn);
1196}
1197
1198/* Helper function for data processing instructions with register sources. */
1199
1200static int
231c0592 1201emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
bb903df0
PL
1202 struct aarch64_register rd,
1203 struct aarch64_register rn,
1204 struct aarch64_register rm)
1205{
1206 uint32_t size = ENCODE (rd.is64, 1, 31);
1207
e1c587c3
YQ
1208 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1209 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1210}
1211
1212/* Helper function for data processing instructions taking either a register
1213 or an immediate. */
1214
1215static int
1216emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1217 struct aarch64_register rd,
1218 struct aarch64_register rn,
1219 struct aarch64_operand operand)
1220{
1221 uint32_t size = ENCODE (rd.is64, 1, 31);
1222 /* The opcode is different for register and immediate source operands. */
1223 uint32_t operand_opcode;
1224
1225 if (operand.type == OPERAND_IMMEDIATE)
1226 {
1227 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1228 operand_opcode = ENCODE (8, 4, 25);
1229
e1c587c3
YQ
1230 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1231 | ENCODE (operand.imm, 12, 10)
1232 | ENCODE (rn.num, 5, 5)
1233 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1234 }
1235 else
1236 {
1237 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1238 operand_opcode = ENCODE (5, 4, 25);
1239
1240 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1241 rn, operand.reg);
1242 }
1243}
1244
1245/* Write an ADD instruction into *BUF.
1246
1247 ADD rd, rn, #imm
1248 ADD rd, rn, rm
1249
1250 This function handles both an immediate and register add.
1251
1252 RD is the destination register.
1253 RN is the input register.
1254 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1255 OPERAND_REGISTER. */
1256
1257static int
1258emit_add (uint32_t *buf, struct aarch64_register rd,
1259 struct aarch64_register rn, struct aarch64_operand operand)
1260{
1261 return emit_data_processing (buf, ADD, rd, rn, operand);
1262}
1263
1264/* Write a SUB instruction into *BUF.
1265
1266 SUB rd, rn, #imm
1267 SUB rd, rn, rm
1268
1269 This function handles both an immediate and register sub.
1270
1271 RD is the destination register.
1272 RN is the input register.
1273 IMM is the immediate to substract to RN. */
1274
1275static int
1276emit_sub (uint32_t *buf, struct aarch64_register rd,
1277 struct aarch64_register rn, struct aarch64_operand operand)
1278{
1279 return emit_data_processing (buf, SUB, rd, rn, operand);
1280}
1281
1282/* Write a MOV instruction into *BUF.
1283
1284 MOV rd, #imm
1285 MOV rd, rm
1286
1287 This function handles both a wide immediate move and a register move,
1288 with the condition that the source register is not xzr. xzr and the
1289 stack pointer share the same encoding and this function only supports
1290 the stack pointer.
1291
1292 RD is the destination register.
1293 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1294 OPERAND_REGISTER. */
1295
1296static int
1297emit_mov (uint32_t *buf, struct aarch64_register rd,
1298 struct aarch64_operand operand)
1299{
1300 if (operand.type == OPERAND_IMMEDIATE)
1301 {
1302 uint32_t size = ENCODE (rd.is64, 1, 31);
1303 /* Do not shift the immediate. */
1304 uint32_t shift = ENCODE (0, 2, 21);
1305
e1c587c3
YQ
1306 return aarch64_emit_insn (buf, MOV | size | shift
1307 | ENCODE (operand.imm, 16, 5)
1308 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1309 }
1310 else
1311 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1312}
1313
1314/* Write a MOVK instruction into *BUF.
1315
1316 MOVK rd, #imm, lsl #shift
1317
1318 RD is the destination register.
1319 IMM is the immediate.
1320 SHIFT is the logical shift left to apply to IMM. */
1321
1322static int
7781c06f
YQ
1323emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1324 unsigned shift)
bb903df0
PL
1325{
1326 uint32_t size = ENCODE (rd.is64, 1, 31);
1327
e1c587c3
YQ
1328 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1329 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1330}
1331
1332/* Write instructions into *BUF in order to move ADDR into a register.
1333 ADDR can be a 64-bit value.
1334
1335 This function will emit a series of MOV and MOVK instructions, such as:
1336
1337 MOV xd, #(addr)
1338 MOVK xd, #(addr >> 16), lsl #16
1339 MOVK xd, #(addr >> 32), lsl #32
1340 MOVK xd, #(addr >> 48), lsl #48 */
1341
1342static int
1343emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1344{
1345 uint32_t *p = buf;
1346
1347 /* The MOV (wide immediate) instruction clears to top bits of the
1348 register. */
1349 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1350
1351 if ((addr >> 16) != 0)
1352 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1353 else
1354 return p - buf;
1355
1356 if ((addr >> 32) != 0)
1357 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1358 else
1359 return p - buf;
1360
1361 if ((addr >> 48) != 0)
1362 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1363
1364 return p - buf;
1365}
1366
afbe19f8
PL
1367/* Write a SUBS instruction into *BUF.
1368
1369 SUBS rd, rn, rm
1370
1371 This instruction update the condition flags.
1372
1373 RD is the destination register.
1374 RN and RM are the source registers. */
1375
1376static int
1377emit_subs (uint32_t *buf, struct aarch64_register rd,
1378 struct aarch64_register rn, struct aarch64_operand operand)
1379{
1380 return emit_data_processing (buf, SUBS, rd, rn, operand);
1381}
1382
1383/* Write a CMP instruction into *BUF.
1384
1385 CMP rn, rm
1386
1387 This instruction is an alias of SUBS xzr, rn, rm.
1388
1389 RN and RM are the registers to compare. */
1390
1391static int
1392emit_cmp (uint32_t *buf, struct aarch64_register rn,
1393 struct aarch64_operand operand)
1394{
1395 return emit_subs (buf, xzr, rn, operand);
1396}
1397
1398/* Write a AND instruction into *BUF.
1399
1400 AND rd, rn, rm
1401
1402 RD is the destination register.
1403 RN and RM are the source registers. */
1404
1405static int
1406emit_and (uint32_t *buf, struct aarch64_register rd,
1407 struct aarch64_register rn, struct aarch64_register rm)
1408{
1409 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1410}
1411
1412/* Write a ORR instruction into *BUF.
1413
1414 ORR rd, rn, rm
1415
1416 RD is the destination register.
1417 RN and RM are the source registers. */
1418
1419static int
1420emit_orr (uint32_t *buf, struct aarch64_register rd,
1421 struct aarch64_register rn, struct aarch64_register rm)
1422{
1423 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1424}
1425
1426/* Write a ORN instruction into *BUF.
1427
1428 ORN rd, rn, rm
1429
1430 RD is the destination register.
1431 RN and RM are the source registers. */
1432
1433static int
1434emit_orn (uint32_t *buf, struct aarch64_register rd,
1435 struct aarch64_register rn, struct aarch64_register rm)
1436{
1437 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1438}
1439
1440/* Write a EOR instruction into *BUF.
1441
1442 EOR rd, rn, rm
1443
1444 RD is the destination register.
1445 RN and RM are the source registers. */
1446
1447static int
1448emit_eor (uint32_t *buf, struct aarch64_register rd,
1449 struct aarch64_register rn, struct aarch64_register rm)
1450{
1451 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1452}
1453
1454/* Write a MVN instruction into *BUF.
1455
1456 MVN rd, rm
1457
1458 This is an alias for ORN rd, xzr, rm.
1459
1460 RD is the destination register.
1461 RM is the source register. */
1462
1463static int
1464emit_mvn (uint32_t *buf, struct aarch64_register rd,
1465 struct aarch64_register rm)
1466{
1467 return emit_orn (buf, rd, xzr, rm);
1468}
1469
1470/* Write a LSLV instruction into *BUF.
1471
1472 LSLV rd, rn, rm
1473
1474 RD is the destination register.
1475 RN and RM are the source registers. */
1476
1477static int
1478emit_lslv (uint32_t *buf, struct aarch64_register rd,
1479 struct aarch64_register rn, struct aarch64_register rm)
1480{
1481 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1482}
1483
1484/* Write a LSRV instruction into *BUF.
1485
1486 LSRV rd, rn, rm
1487
1488 RD is the destination register.
1489 RN and RM are the source registers. */
1490
1491static int
1492emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1493 struct aarch64_register rn, struct aarch64_register rm)
1494{
1495 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1496}
1497
1498/* Write a ASRV instruction into *BUF.
1499
1500 ASRV rd, rn, rm
1501
1502 RD is the destination register.
1503 RN and RM are the source registers. */
1504
1505static int
1506emit_asrv (uint32_t *buf, struct aarch64_register rd,
1507 struct aarch64_register rn, struct aarch64_register rm)
1508{
1509 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1510}
1511
1512/* Write a MUL instruction into *BUF.
1513
1514 MUL rd, rn, rm
1515
1516 RD is the destination register.
1517 RN and RM are the source registers. */
1518
1519static int
1520emit_mul (uint32_t *buf, struct aarch64_register rd,
1521 struct aarch64_register rn, struct aarch64_register rm)
1522{
1523 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1524}
1525
bb903df0
PL
1526/* Write a MRS instruction into *BUF. The register size is 64-bit.
1527
1528 MRS xt, system_reg
1529
1530 RT is the destination register.
1531 SYSTEM_REG is special purpose register to read. */
1532
1533static int
1534emit_mrs (uint32_t *buf, struct aarch64_register rt,
1535 enum aarch64_system_control_registers system_reg)
1536{
e1c587c3
YQ
1537 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1538 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1539}
1540
1541/* Write a MSR instruction into *BUF. The register size is 64-bit.
1542
1543 MSR system_reg, xt
1544
1545 SYSTEM_REG is special purpose register to write.
1546 RT is the input register. */
1547
1548static int
1549emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1550 struct aarch64_register rt)
1551{
e1c587c3
YQ
1552 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1553 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1554}
1555
1556/* Write a SEVL instruction into *BUF.
1557
1558 This is a hint instruction telling the hardware to trigger an event. */
1559
1560static int
1561emit_sevl (uint32_t *buf)
1562{
e1c587c3 1563 return aarch64_emit_insn (buf, SEVL);
bb903df0
PL
1564}
1565
1566/* Write a WFE instruction into *BUF.
1567
1568 This is a hint instruction telling the hardware to wait for an event. */
1569
1570static int
1571emit_wfe (uint32_t *buf)
1572{
e1c587c3 1573 return aarch64_emit_insn (buf, WFE);
bb903df0
PL
1574}
1575
afbe19f8
PL
1576/* Write a SBFM instruction into *BUF.
1577
1578 SBFM rd, rn, #immr, #imms
1579
1580 This instruction moves the bits from #immr to #imms into the
1581 destination, sign extending the result.
1582
1583 RD is the destination register.
1584 RN is the source register.
1585 IMMR is the bit number to start at (least significant bit).
1586 IMMS is the bit number to stop at (most significant bit). */
1587
1588static int
1589emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1590 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1591{
1592 uint32_t size = ENCODE (rd.is64, 1, 31);
1593 uint32_t n = ENCODE (rd.is64, 1, 22);
1594
e1c587c3
YQ
1595 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1596 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1597 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1598}
1599
1600/* Write a SBFX instruction into *BUF.
1601
1602 SBFX rd, rn, #lsb, #width
1603
1604 This instruction moves #width bits from #lsb into the destination, sign
1605 extending the result. This is an alias for:
1606
1607 SBFM rd, rn, #lsb, #(lsb + width - 1)
1608
1609 RD is the destination register.
1610 RN is the source register.
1611 LSB is the bit number to start at (least significant bit).
1612 WIDTH is the number of bits to move. */
1613
1614static int
1615emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1616 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1617{
1618 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1619}
1620
1621/* Write a UBFM instruction into *BUF.
1622
1623 UBFM rd, rn, #immr, #imms
1624
1625 This instruction moves the bits from #immr to #imms into the
1626 destination, extending the result with zeros.
1627
1628 RD is the destination register.
1629 RN is the source register.
1630 IMMR is the bit number to start at (least significant bit).
1631 IMMS is the bit number to stop at (most significant bit). */
1632
1633static int
1634emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1635 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1636{
1637 uint32_t size = ENCODE (rd.is64, 1, 31);
1638 uint32_t n = ENCODE (rd.is64, 1, 22);
1639
e1c587c3
YQ
1640 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1641 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1642 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1643}
1644
1645/* Write a UBFX instruction into *BUF.
1646
1647 UBFX rd, rn, #lsb, #width
1648
1649 This instruction moves #width bits from #lsb into the destination,
1650 extending the result with zeros. This is an alias for:
1651
1652 UBFM rd, rn, #lsb, #(lsb + width - 1)
1653
1654 RD is the destination register.
1655 RN is the source register.
1656 LSB is the bit number to start at (least significant bit).
1657 WIDTH is the number of bits to move. */
1658
1659static int
1660emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1661 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1662{
1663 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1664}
1665
1666/* Write a CSINC instruction into *BUF.
1667
1668 CSINC rd, rn, rm, cond
1669
1670 This instruction conditionally increments rn or rm and places the result
1671 in rd. rn is chosen is the condition is true.
1672
1673 RD is the destination register.
1674 RN and RM are the source registers.
1675 COND is the encoded condition. */
1676
1677static int
1678emit_csinc (uint32_t *buf, struct aarch64_register rd,
1679 struct aarch64_register rn, struct aarch64_register rm,
1680 unsigned cond)
1681{
1682 uint32_t size = ENCODE (rd.is64, 1, 31);
1683
e1c587c3
YQ
1684 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1685 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1686 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1687}
1688
1689/* Write a CSET instruction into *BUF.
1690
1691 CSET rd, cond
1692
1693 This instruction conditionally write 1 or 0 in the destination register.
1694 1 is written if the condition is true. This is an alias for:
1695
1696 CSINC rd, xzr, xzr, !cond
1697
1698 Note that the condition needs to be inverted.
1699
1700 RD is the destination register.
1701 RN and RM are the source registers.
1702 COND is the encoded condition. */
1703
1704static int
1705emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1706{
1707 /* The least significant bit of the condition needs toggling in order to
1708 invert it. */
1709 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1710}
1711
bb903df0
PL
1712/* Write LEN instructions from BUF into the inferior memory at *TO.
1713
1714 Note instructions are always little endian on AArch64, unlike data. */
1715
1716static void
1717append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1718{
1719 size_t byte_len = len * sizeof (uint32_t);
1720#if (__BYTE_ORDER == __BIG_ENDIAN)
cb93dc7f 1721 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
bb903df0
PL
1722 size_t i;
1723
1724 for (i = 0; i < len; i++)
1725 le_buf[i] = htole32 (buf[i]);
1726
4196ab2a 1727 target_write_memory (*to, (const unsigned char *) le_buf, byte_len);
bb903df0
PL
1728
1729 xfree (le_buf);
1730#else
4196ab2a 1731 target_write_memory (*to, (const unsigned char *) buf, byte_len);
bb903df0
PL
1732#endif
1733
1734 *to += byte_len;
1735}
1736
0badd99f
YQ
1737/* Sub-class of struct aarch64_insn_data, store information of
1738 instruction relocation for fast tracepoint. Visitor can
1739 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1740 the relocated instructions in buffer pointed by INSN_PTR. */
bb903df0 1741
0badd99f
YQ
1742struct aarch64_insn_relocation_data
1743{
1744 struct aarch64_insn_data base;
1745
1746 /* The new address the instruction is relocated to. */
1747 CORE_ADDR new_addr;
1748 /* Pointer to the buffer of relocated instruction(s). */
1749 uint32_t *insn_ptr;
1750};
1751
1752/* Implementation of aarch64_insn_visitor method "b". */
1753
1754static void
1755aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1756 struct aarch64_insn_data *data)
1757{
1758 struct aarch64_insn_relocation_data *insn_reloc
1759 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1760 int64_t new_offset
0badd99f
YQ
1761 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1762
1763 if (can_encode_int32 (new_offset, 28))
1764 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1765}
1766
1767/* Implementation of aarch64_insn_visitor method "b_cond". */
1768
1769static void
1770aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1771 struct aarch64_insn_data *data)
1772{
1773 struct aarch64_insn_relocation_data *insn_reloc
1774 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1775 int64_t new_offset
0badd99f
YQ
1776 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1777
1778 if (can_encode_int32 (new_offset, 21))
1779 {
1780 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1781 new_offset);
bb903df0 1782 }
0badd99f 1783 else if (can_encode_int32 (new_offset, 28))
bb903df0 1784 {
0badd99f
YQ
1785 /* The offset is out of range for a conditional branch
1786 instruction but not for a unconditional branch. We can use
1787 the following instructions instead:
bb903df0 1788
0badd99f
YQ
1789 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1790 B NOT_TAKEN ; Else jump over TAKEN and continue.
1791 TAKEN:
1792 B #(offset - 8)
1793 NOT_TAKEN:
1794
1795 */
1796
1797 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1798 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1799 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
bb903df0 1800 }
0badd99f 1801}
bb903df0 1802
0badd99f
YQ
1803/* Implementation of aarch64_insn_visitor method "cb". */
1804
1805static void
1806aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1807 const unsigned rn, int is64,
1808 struct aarch64_insn_data *data)
1809{
1810 struct aarch64_insn_relocation_data *insn_reloc
1811 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1812 int64_t new_offset
0badd99f
YQ
1813 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1814
1815 if (can_encode_int32 (new_offset, 21))
1816 {
1817 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1818 aarch64_register (rn, is64), new_offset);
bb903df0 1819 }
0badd99f 1820 else if (can_encode_int32 (new_offset, 28))
bb903df0 1821 {
0badd99f
YQ
1822 /* The offset is out of range for a compare and branch
1823 instruction but not for a unconditional branch. We can use
1824 the following instructions instead:
1825
1826 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1827 B NOT_TAKEN ; Else jump over TAKEN and continue.
1828 TAKEN:
1829 B #(offset - 8)
1830 NOT_TAKEN:
1831
1832 */
1833 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1834 aarch64_register (rn, is64), 8);
1835 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1836 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1837 }
1838}
bb903df0 1839
0badd99f 1840/* Implementation of aarch64_insn_visitor method "tb". */
bb903df0 1841
0badd99f
YQ
1842static void
1843aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1844 const unsigned rt, unsigned bit,
1845 struct aarch64_insn_data *data)
1846{
1847 struct aarch64_insn_relocation_data *insn_reloc
1848 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1849 int64_t new_offset
0badd99f
YQ
1850 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1851
1852 if (can_encode_int32 (new_offset, 16))
1853 {
1854 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1855 aarch64_register (rt, 1), new_offset);
bb903df0 1856 }
0badd99f 1857 else if (can_encode_int32 (new_offset, 28))
bb903df0 1858 {
0badd99f
YQ
1859 /* The offset is out of range for a test bit and branch
1860 instruction but not for a unconditional branch. We can use
1861 the following instructions instead:
1862
1863 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1864 B NOT_TAKEN ; Else jump over TAKEN and continue.
1865 TAKEN:
1866 B #(offset - 8)
1867 NOT_TAKEN:
1868
1869 */
1870 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1871 aarch64_register (rt, 1), 8);
1872 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1873 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1874 new_offset - 8);
1875 }
1876}
bb903df0 1877
0badd99f 1878/* Implementation of aarch64_insn_visitor method "adr". */
bb903df0 1879
0badd99f
YQ
1880static void
1881aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1882 const int is_adrp,
1883 struct aarch64_insn_data *data)
1884{
1885 struct aarch64_insn_relocation_data *insn_reloc
1886 = (struct aarch64_insn_relocation_data *) data;
1887 /* We know exactly the address the ADR{P,} instruction will compute.
1888 We can just write it to the destination register. */
1889 CORE_ADDR address = data->insn_addr + offset;
bb903df0 1890
0badd99f
YQ
1891 if (is_adrp)
1892 {
1893 /* Clear the lower 12 bits of the offset to get the 4K page. */
1894 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1895 aarch64_register (rd, 1),
1896 address & ~0xfff);
1897 }
1898 else
1899 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1900 aarch64_register (rd, 1), address);
1901}
bb903df0 1902
0badd99f 1903/* Implementation of aarch64_insn_visitor method "ldr_literal". */
bb903df0 1904
0badd99f
YQ
1905static void
1906aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1907 const unsigned rt, const int is64,
1908 struct aarch64_insn_data *data)
1909{
1910 struct aarch64_insn_relocation_data *insn_reloc
1911 = (struct aarch64_insn_relocation_data *) data;
1912 CORE_ADDR address = data->insn_addr + offset;
1913
1914 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1915 aarch64_register (rt, 1), address);
1916
1917 /* We know exactly what address to load from, and what register we
1918 can use:
1919
1920 MOV xd, #(oldloc + offset)
1921 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1922 ...
1923
1924 LDR xd, [xd] ; or LDRSW xd, [xd]
1925
1926 */
1927
1928 if (is_sw)
1929 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1930 aarch64_register (rt, 1),
1931 aarch64_register (rt, 1),
1932 offset_memory_operand (0));
bb903df0 1933 else
0badd99f
YQ
1934 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1935 aarch64_register (rt, is64),
1936 aarch64_register (rt, 1),
1937 offset_memory_operand (0));
1938}
1939
1940/* Implementation of aarch64_insn_visitor method "others". */
1941
1942static void
1943aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1944 struct aarch64_insn_data *data)
1945{
1946 struct aarch64_insn_relocation_data *insn_reloc
1947 = (struct aarch64_insn_relocation_data *) data;
bb903df0 1948
0badd99f
YQ
1949 /* The instruction is not PC relative. Just re-emit it at the new
1950 location. */
e1c587c3 1951 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
0badd99f
YQ
1952}
1953
1954static const struct aarch64_insn_visitor visitor =
1955{
1956 aarch64_ftrace_insn_reloc_b,
1957 aarch64_ftrace_insn_reloc_b_cond,
1958 aarch64_ftrace_insn_reloc_cb,
1959 aarch64_ftrace_insn_reloc_tb,
1960 aarch64_ftrace_insn_reloc_adr,
1961 aarch64_ftrace_insn_reloc_ldr_literal,
1962 aarch64_ftrace_insn_reloc_others,
1963};
1964
bb903df0
PL
1965/* Implementation of linux_target_ops method
1966 "install_fast_tracepoint_jump_pad". */
1967
1968static int
1969aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1970 CORE_ADDR tpaddr,
1971 CORE_ADDR collector,
1972 CORE_ADDR lockaddr,
1973 ULONGEST orig_size,
1974 CORE_ADDR *jump_entry,
1975 CORE_ADDR *trampoline,
1976 ULONGEST *trampoline_size,
1977 unsigned char *jjump_pad_insn,
1978 ULONGEST *jjump_pad_insn_size,
1979 CORE_ADDR *adjusted_insn_addr,
1980 CORE_ADDR *adjusted_insn_addr_end,
1981 char *err)
1982{
1983 uint32_t buf[256];
1984 uint32_t *p = buf;
2ac09a5b 1985 int64_t offset;
bb903df0 1986 int i;
70b439f0 1987 uint32_t insn;
bb903df0 1988 CORE_ADDR buildaddr = *jump_entry;
0badd99f 1989 struct aarch64_insn_relocation_data insn_data;
bb903df0
PL
1990
1991 /* We need to save the current state on the stack both to restore it
1992 later and to collect register values when the tracepoint is hit.
1993
1994 The saved registers are pushed in a layout that needs to be in sync
1995 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1996 the supply_fast_tracepoint_registers function will fill in the
1997 register cache from a pointer to saved registers on the stack we build
1998 here.
1999
2000 For simplicity, we set the size of each cell on the stack to 16 bytes.
2001 This way one cell can hold any register type, from system registers
2002 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
2003 has to be 16 bytes aligned anyway.
2004
2005 Note that the CPSR register does not exist on AArch64. Instead we
2006 can access system bits describing the process state with the
2007 MRS/MSR instructions, namely the condition flags. We save them as
2008 if they are part of a CPSR register because that's how GDB
2009 interprets these system bits. At the moment, only the condition
2010 flags are saved in CPSR (NZCV).
2011
2012 Stack layout, each cell is 16 bytes (descending):
2013
2014 High *-------- SIMD&FP registers from 31 down to 0. --------*
2015 | q31 |
2016 . .
2017 . . 32 cells
2018 . .
2019 | q0 |
2020 *---- General purpose registers from 30 down to 0. ----*
2021 | x30 |
2022 . .
2023 . . 31 cells
2024 . .
2025 | x0 |
2026 *------------- Special purpose registers. -------------*
2027 | SP |
2028 | PC |
2029 | CPSR (NZCV) | 5 cells
2030 | FPSR |
2031 | FPCR | <- SP + 16
2032 *------------- collecting_t object --------------------*
2033 | TPIDR_EL0 | struct tracepoint * |
2034 Low *------------------------------------------------------*
2035
2036 After this stack is set up, we issue a call to the collector, passing
2037 it the saved registers at (SP + 16). */
2038
2039 /* Push SIMD&FP registers on the stack:
2040
2041 SUB sp, sp, #(32 * 16)
2042
2043 STP q30, q31, [sp, #(30 * 16)]
2044 ...
2045 STP q0, q1, [sp]
2046
2047 */
2048 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
2049 for (i = 30; i >= 0; i -= 2)
2050 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
2051
30baf67b 2052 /* Push general purpose registers on the stack. Note that we do not need
bb903df0
PL
2053 to push x31 as it represents the xzr register and not the stack
2054 pointer in a STR instruction.
2055
2056 SUB sp, sp, #(31 * 16)
2057
2058 STR x30, [sp, #(30 * 16)]
2059 ...
2060 STR x0, [sp]
2061
2062 */
2063 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
2064 for (i = 30; i >= 0; i -= 1)
2065 p += emit_str (p, aarch64_register (i, 1), sp,
2066 offset_memory_operand (i * 16));
2067
2068 /* Make space for 5 more cells.
2069
2070 SUB sp, sp, #(5 * 16)
2071
2072 */
2073 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
2074
2075
2076 /* Save SP:
2077
2078 ADD x4, sp, #((32 + 31 + 5) * 16)
2079 STR x4, [sp, #(4 * 16)]
2080
2081 */
2082 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
2083 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
2084
2085 /* Save PC (tracepoint address):
2086
2087 MOV x3, #(tpaddr)
2088 ...
2089
2090 STR x3, [sp, #(3 * 16)]
2091
2092 */
2093
2094 p += emit_mov_addr (p, x3, tpaddr);
2095 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
2096
2097 /* Save CPSR (NZCV), FPSR and FPCR:
2098
2099 MRS x2, nzcv
2100 MRS x1, fpsr
2101 MRS x0, fpcr
2102
2103 STR x2, [sp, #(2 * 16)]
2104 STR x1, [sp, #(1 * 16)]
2105 STR x0, [sp, #(0 * 16)]
2106
2107 */
2108 p += emit_mrs (p, x2, NZCV);
2109 p += emit_mrs (p, x1, FPSR);
2110 p += emit_mrs (p, x0, FPCR);
2111 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2112 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2113 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2114
2115 /* Push the collecting_t object. It consist of the address of the
2116 tracepoint and an ID for the current thread. We get the latter by
2117 reading the tpidr_el0 system register. It corresponds to the
2118 NT_ARM_TLS register accessible with ptrace.
2119
2120 MOV x0, #(tpoint)
2121 ...
2122
2123 MRS x1, tpidr_el0
2124
2125 STP x0, x1, [sp, #-16]!
2126
2127 */
2128
2129 p += emit_mov_addr (p, x0, tpoint);
2130 p += emit_mrs (p, x1, TPIDR_EL0);
2131 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2132
2133 /* Spin-lock:
2134
2135 The shared memory for the lock is at lockaddr. It will hold zero
2136 if no-one is holding the lock, otherwise it contains the address of
2137 the collecting_t object on the stack of the thread which acquired it.
2138
2139 At this stage, the stack pointer points to this thread's collecting_t
2140 object.
2141
2142 We use the following registers:
2143 - x0: Address of the lock.
2144 - x1: Pointer to collecting_t object.
2145 - x2: Scratch register.
2146
2147 MOV x0, #(lockaddr)
2148 ...
2149 MOV x1, sp
2150
2151 ; Trigger an event local to this core. So the following WFE
2152 ; instruction is ignored.
2153 SEVL
2154 again:
2155 ; Wait for an event. The event is triggered by either the SEVL
2156 ; or STLR instructions (store release).
2157 WFE
2158
2159 ; Atomically read at lockaddr. This marks the memory location as
2160 ; exclusive. This instruction also has memory constraints which
2161 ; make sure all previous data reads and writes are done before
2162 ; executing it.
2163 LDAXR x2, [x0]
2164
2165 ; Try again if another thread holds the lock.
2166 CBNZ x2, again
2167
2168 ; We can lock it! Write the address of the collecting_t object.
2169 ; This instruction will fail if the memory location is not marked
2170 ; as exclusive anymore. If it succeeds, it will remove the
2171 ; exclusive mark on the memory location. This way, if another
2172 ; thread executes this instruction before us, we will fail and try
2173 ; all over again.
2174 STXR w2, x1, [x0]
2175 CBNZ w2, again
2176
2177 */
2178
2179 p += emit_mov_addr (p, x0, lockaddr);
2180 p += emit_mov (p, x1, register_operand (sp));
2181
2182 p += emit_sevl (p);
2183 p += emit_wfe (p);
2184 p += emit_ldaxr (p, x2, x0);
2185 p += emit_cb (p, 1, w2, -2 * 4);
2186 p += emit_stxr (p, w2, x1, x0);
2187 p += emit_cb (p, 1, x2, -4 * 4);
2188
2189 /* Call collector (struct tracepoint *, unsigned char *):
2190
2191 MOV x0, #(tpoint)
2192 ...
2193
2194 ; Saved registers start after the collecting_t object.
2195 ADD x1, sp, #16
2196
2197 ; We use an intra-procedure-call scratch register.
2198 MOV ip0, #(collector)
2199 ...
2200
2201 ; And call back to C!
2202 BLR ip0
2203
2204 */
2205
2206 p += emit_mov_addr (p, x0, tpoint);
2207 p += emit_add (p, x1, sp, immediate_operand (16));
2208
2209 p += emit_mov_addr (p, ip0, collector);
2210 p += emit_blr (p, ip0);
2211
2212 /* Release the lock.
2213
2214 MOV x0, #(lockaddr)
2215 ...
2216
2217 ; This instruction is a normal store with memory ordering
2218 ; constraints. Thanks to this we do not have to put a data
2219 ; barrier instruction to make sure all data read and writes are done
30baf67b 2220 ; before this instruction is executed. Furthermore, this instruction
bb903df0
PL
2221 ; will trigger an event, letting other threads know they can grab
2222 ; the lock.
2223 STLR xzr, [x0]
2224
2225 */
2226 p += emit_mov_addr (p, x0, lockaddr);
2227 p += emit_stlr (p, xzr, x0);
2228
2229 /* Free collecting_t object:
2230
2231 ADD sp, sp, #16
2232
2233 */
2234 p += emit_add (p, sp, sp, immediate_operand (16));
2235
2236 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2237 registers from the stack.
2238
2239 LDR x2, [sp, #(2 * 16)]
2240 LDR x1, [sp, #(1 * 16)]
2241 LDR x0, [sp, #(0 * 16)]
2242
2243 MSR NZCV, x2
2244 MSR FPSR, x1
2245 MSR FPCR, x0
2246
2247 ADD sp, sp #(5 * 16)
2248
2249 */
2250 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2251 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2252 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2253 p += emit_msr (p, NZCV, x2);
2254 p += emit_msr (p, FPSR, x1);
2255 p += emit_msr (p, FPCR, x0);
2256
2257 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2258
2259 /* Pop general purpose registers:
2260
2261 LDR x0, [sp]
2262 ...
2263 LDR x30, [sp, #(30 * 16)]
2264
2265 ADD sp, sp, #(31 * 16)
2266
2267 */
2268 for (i = 0; i <= 30; i += 1)
2269 p += emit_ldr (p, aarch64_register (i, 1), sp,
2270 offset_memory_operand (i * 16));
2271 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2272
2273 /* Pop SIMD&FP registers:
2274
2275 LDP q0, q1, [sp]
2276 ...
2277 LDP q30, q31, [sp, #(30 * 16)]
2278
2279 ADD sp, sp, #(32 * 16)
2280
2281 */
2282 for (i = 0; i <= 30; i += 2)
2283 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2284 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2285
2286 /* Write the code into the inferior memory. */
2287 append_insns (&buildaddr, p - buf, buf);
2288
2289 /* Now emit the relocated instruction. */
2290 *adjusted_insn_addr = buildaddr;
70b439f0 2291 target_read_uint32 (tpaddr, &insn);
0badd99f
YQ
2292
2293 insn_data.base.insn_addr = tpaddr;
2294 insn_data.new_addr = buildaddr;
2295 insn_data.insn_ptr = buf;
2296
2297 aarch64_relocate_instruction (insn, &visitor,
2298 (struct aarch64_insn_data *) &insn_data);
2299
bb903df0 2300 /* We may not have been able to relocate the instruction. */
0badd99f 2301 if (insn_data.insn_ptr == buf)
bb903df0
PL
2302 {
2303 sprintf (err,
2304 "E.Could not relocate instruction from %s to %s.",
2305 core_addr_to_string_nz (tpaddr),
2306 core_addr_to_string_nz (buildaddr));
2307 return 1;
2308 }
dfaffe9d 2309 else
0badd99f 2310 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
dfaffe9d 2311 *adjusted_insn_addr_end = buildaddr;
bb903df0
PL
2312
2313 /* Go back to the start of the buffer. */
2314 p = buf;
2315
2316 /* Emit a branch back from the jump pad. */
2317 offset = (tpaddr + orig_size - buildaddr);
2318 if (!can_encode_int32 (offset, 28))
2319 {
2320 sprintf (err,
2321 "E.Jump back from jump pad too far from tracepoint "
2ac09a5b 2322 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2323 offset);
2324 return 1;
2325 }
2326
2327 p += emit_b (p, 0, offset);
2328 append_insns (&buildaddr, p - buf, buf);
2329
2330 /* Give the caller a branch instruction into the jump pad. */
2331 offset = (*jump_entry - tpaddr);
2332 if (!can_encode_int32 (offset, 28))
2333 {
2334 sprintf (err,
2335 "E.Jump pad too far from tracepoint "
2ac09a5b 2336 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2337 offset);
2338 return 1;
2339 }
2340
2341 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2342 *jjump_pad_insn_size = 4;
2343
2344 /* Return the end address of our pad. */
2345 *jump_entry = buildaddr;
2346
2347 return 0;
2348}
2349
afbe19f8
PL
2350/* Helper function writing LEN instructions from START into
2351 current_insn_ptr. */
2352
2353static void
2354emit_ops_insns (const uint32_t *start, int len)
2355{
2356 CORE_ADDR buildaddr = current_insn_ptr;
2357
2358 if (debug_threads)
2359 debug_printf ("Adding %d instrucions at %s\n",
2360 len, paddress (buildaddr));
2361
2362 append_insns (&buildaddr, len, start);
2363 current_insn_ptr = buildaddr;
2364}
2365
2366/* Pop a register from the stack. */
2367
2368static int
2369emit_pop (uint32_t *buf, struct aarch64_register rt)
2370{
2371 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2372}
2373
2374/* Push a register on the stack. */
2375
2376static int
2377emit_push (uint32_t *buf, struct aarch64_register rt)
2378{
2379 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2380}
2381
2382/* Implementation of emit_ops method "emit_prologue". */
2383
2384static void
2385aarch64_emit_prologue (void)
2386{
2387 uint32_t buf[16];
2388 uint32_t *p = buf;
2389
2390 /* This function emit a prologue for the following function prototype:
2391
2392 enum eval_result_type f (unsigned char *regs,
2393 ULONGEST *value);
2394
2395 The first argument is a buffer of raw registers. The second
2396 argument is the result of
2397 evaluating the expression, which will be set to whatever is on top of
2398 the stack at the end.
2399
2400 The stack set up by the prologue is as such:
2401
2402 High *------------------------------------------------------*
2403 | LR |
2404 | FP | <- FP
2405 | x1 (ULONGEST *value) |
2406 | x0 (unsigned char *regs) |
2407 Low *------------------------------------------------------*
2408
2409 As we are implementing a stack machine, each opcode can expand the
2410 stack so we never know how far we are from the data saved by this
2411 prologue. In order to be able refer to value and regs later, we save
2412 the current stack pointer in the frame pointer. This way, it is not
2413 clobbered when calling C functions.
2414
30baf67b 2415 Finally, throughout every operation, we are using register x0 as the
afbe19f8
PL
2416 top of the stack, and x1 as a scratch register. */
2417
2418 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2419 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2420 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2421
2422 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2423
2424
2425 emit_ops_insns (buf, p - buf);
2426}
2427
2428/* Implementation of emit_ops method "emit_epilogue". */
2429
2430static void
2431aarch64_emit_epilogue (void)
2432{
2433 uint32_t buf[16];
2434 uint32_t *p = buf;
2435
2436 /* Store the result of the expression (x0) in *value. */
2437 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2438 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2439 p += emit_str (p, x0, x1, offset_memory_operand (0));
2440
2441 /* Restore the previous state. */
2442 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2443 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2444
2445 /* Return expr_eval_no_error. */
2446 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2447 p += emit_ret (p, lr);
2448
2449 emit_ops_insns (buf, p - buf);
2450}
2451
2452/* Implementation of emit_ops method "emit_add". */
2453
2454static void
2455aarch64_emit_add (void)
2456{
2457 uint32_t buf[16];
2458 uint32_t *p = buf;
2459
2460 p += emit_pop (p, x1);
45e3745e 2461 p += emit_add (p, x0, x1, register_operand (x0));
afbe19f8
PL
2462
2463 emit_ops_insns (buf, p - buf);
2464}
2465
2466/* Implementation of emit_ops method "emit_sub". */
2467
2468static void
2469aarch64_emit_sub (void)
2470{
2471 uint32_t buf[16];
2472 uint32_t *p = buf;
2473
2474 p += emit_pop (p, x1);
45e3745e 2475 p += emit_sub (p, x0, x1, register_operand (x0));
afbe19f8
PL
2476
2477 emit_ops_insns (buf, p - buf);
2478}
2479
2480/* Implementation of emit_ops method "emit_mul". */
2481
2482static void
2483aarch64_emit_mul (void)
2484{
2485 uint32_t buf[16];
2486 uint32_t *p = buf;
2487
2488 p += emit_pop (p, x1);
2489 p += emit_mul (p, x0, x1, x0);
2490
2491 emit_ops_insns (buf, p - buf);
2492}
2493
2494/* Implementation of emit_ops method "emit_lsh". */
2495
2496static void
2497aarch64_emit_lsh (void)
2498{
2499 uint32_t buf[16];
2500 uint32_t *p = buf;
2501
2502 p += emit_pop (p, x1);
2503 p += emit_lslv (p, x0, x1, x0);
2504
2505 emit_ops_insns (buf, p - buf);
2506}
2507
2508/* Implementation of emit_ops method "emit_rsh_signed". */
2509
2510static void
2511aarch64_emit_rsh_signed (void)
2512{
2513 uint32_t buf[16];
2514 uint32_t *p = buf;
2515
2516 p += emit_pop (p, x1);
2517 p += emit_asrv (p, x0, x1, x0);
2518
2519 emit_ops_insns (buf, p - buf);
2520}
2521
2522/* Implementation of emit_ops method "emit_rsh_unsigned". */
2523
2524static void
2525aarch64_emit_rsh_unsigned (void)
2526{
2527 uint32_t buf[16];
2528 uint32_t *p = buf;
2529
2530 p += emit_pop (p, x1);
2531 p += emit_lsrv (p, x0, x1, x0);
2532
2533 emit_ops_insns (buf, p - buf);
2534}
2535
2536/* Implementation of emit_ops method "emit_ext". */
2537
2538static void
2539aarch64_emit_ext (int arg)
2540{
2541 uint32_t buf[16];
2542 uint32_t *p = buf;
2543
2544 p += emit_sbfx (p, x0, x0, 0, arg);
2545
2546 emit_ops_insns (buf, p - buf);
2547}
2548
2549/* Implementation of emit_ops method "emit_log_not". */
2550
2551static void
2552aarch64_emit_log_not (void)
2553{
2554 uint32_t buf[16];
2555 uint32_t *p = buf;
2556
2557 /* If the top of the stack is 0, replace it with 1. Else replace it with
2558 0. */
2559
2560 p += emit_cmp (p, x0, immediate_operand (0));
2561 p += emit_cset (p, x0, EQ);
2562
2563 emit_ops_insns (buf, p - buf);
2564}
2565
2566/* Implementation of emit_ops method "emit_bit_and". */
2567
2568static void
2569aarch64_emit_bit_and (void)
2570{
2571 uint32_t buf[16];
2572 uint32_t *p = buf;
2573
2574 p += emit_pop (p, x1);
2575 p += emit_and (p, x0, x0, x1);
2576
2577 emit_ops_insns (buf, p - buf);
2578}
2579
2580/* Implementation of emit_ops method "emit_bit_or". */
2581
2582static void
2583aarch64_emit_bit_or (void)
2584{
2585 uint32_t buf[16];
2586 uint32_t *p = buf;
2587
2588 p += emit_pop (p, x1);
2589 p += emit_orr (p, x0, x0, x1);
2590
2591 emit_ops_insns (buf, p - buf);
2592}
2593
2594/* Implementation of emit_ops method "emit_bit_xor". */
2595
2596static void
2597aarch64_emit_bit_xor (void)
2598{
2599 uint32_t buf[16];
2600 uint32_t *p = buf;
2601
2602 p += emit_pop (p, x1);
2603 p += emit_eor (p, x0, x0, x1);
2604
2605 emit_ops_insns (buf, p - buf);
2606}
2607
2608/* Implementation of emit_ops method "emit_bit_not". */
2609
2610static void
2611aarch64_emit_bit_not (void)
2612{
2613 uint32_t buf[16];
2614 uint32_t *p = buf;
2615
2616 p += emit_mvn (p, x0, x0);
2617
2618 emit_ops_insns (buf, p - buf);
2619}
2620
2621/* Implementation of emit_ops method "emit_equal". */
2622
2623static void
2624aarch64_emit_equal (void)
2625{
2626 uint32_t buf[16];
2627 uint32_t *p = buf;
2628
2629 p += emit_pop (p, x1);
2630 p += emit_cmp (p, x0, register_operand (x1));
2631 p += emit_cset (p, x0, EQ);
2632
2633 emit_ops_insns (buf, p - buf);
2634}
2635
2636/* Implementation of emit_ops method "emit_less_signed". */
2637
2638static void
2639aarch64_emit_less_signed (void)
2640{
2641 uint32_t buf[16];
2642 uint32_t *p = buf;
2643
2644 p += emit_pop (p, x1);
2645 p += emit_cmp (p, x1, register_operand (x0));
2646 p += emit_cset (p, x0, LT);
2647
2648 emit_ops_insns (buf, p - buf);
2649}
2650
2651/* Implementation of emit_ops method "emit_less_unsigned". */
2652
2653static void
2654aarch64_emit_less_unsigned (void)
2655{
2656 uint32_t buf[16];
2657 uint32_t *p = buf;
2658
2659 p += emit_pop (p, x1);
2660 p += emit_cmp (p, x1, register_operand (x0));
2661 p += emit_cset (p, x0, LO);
2662
2663 emit_ops_insns (buf, p - buf);
2664}
2665
2666/* Implementation of emit_ops method "emit_ref". */
2667
2668static void
2669aarch64_emit_ref (int size)
2670{
2671 uint32_t buf[16];
2672 uint32_t *p = buf;
2673
2674 switch (size)
2675 {
2676 case 1:
2677 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2678 break;
2679 case 2:
2680 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2681 break;
2682 case 4:
2683 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2684 break;
2685 case 8:
2686 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2687 break;
2688 default:
2689 /* Unknown size, bail on compilation. */
2690 emit_error = 1;
2691 break;
2692 }
2693
2694 emit_ops_insns (buf, p - buf);
2695}
2696
2697/* Implementation of emit_ops method "emit_if_goto". */
2698
2699static void
2700aarch64_emit_if_goto (int *offset_p, int *size_p)
2701{
2702 uint32_t buf[16];
2703 uint32_t *p = buf;
2704
2705 /* The Z flag is set or cleared here. */
2706 p += emit_cmp (p, x0, immediate_operand (0));
2707 /* This instruction must not change the Z flag. */
2708 p += emit_pop (p, x0);
2709 /* Branch over the next instruction if x0 == 0. */
2710 p += emit_bcond (p, EQ, 8);
2711
2712 /* The NOP instruction will be patched with an unconditional branch. */
2713 if (offset_p)
2714 *offset_p = (p - buf) * 4;
2715 if (size_p)
2716 *size_p = 4;
2717 p += emit_nop (p);
2718
2719 emit_ops_insns (buf, p - buf);
2720}
2721
2722/* Implementation of emit_ops method "emit_goto". */
2723
2724static void
2725aarch64_emit_goto (int *offset_p, int *size_p)
2726{
2727 uint32_t buf[16];
2728 uint32_t *p = buf;
2729
2730 /* The NOP instruction will be patched with an unconditional branch. */
2731 if (offset_p)
2732 *offset_p = 0;
2733 if (size_p)
2734 *size_p = 4;
2735 p += emit_nop (p);
2736
2737 emit_ops_insns (buf, p - buf);
2738}
2739
2740/* Implementation of emit_ops method "write_goto_address". */
2741
bb1183e2 2742static void
afbe19f8
PL
2743aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2744{
2745 uint32_t insn;
2746
2747 emit_b (&insn, 0, to - from);
2748 append_insns (&from, 1, &insn);
2749}
2750
2751/* Implementation of emit_ops method "emit_const". */
2752
2753static void
2754aarch64_emit_const (LONGEST num)
2755{
2756 uint32_t buf[16];
2757 uint32_t *p = buf;
2758
2759 p += emit_mov_addr (p, x0, num);
2760
2761 emit_ops_insns (buf, p - buf);
2762}
2763
2764/* Implementation of emit_ops method "emit_call". */
2765
2766static void
2767aarch64_emit_call (CORE_ADDR fn)
2768{
2769 uint32_t buf[16];
2770 uint32_t *p = buf;
2771
2772 p += emit_mov_addr (p, ip0, fn);
2773 p += emit_blr (p, ip0);
2774
2775 emit_ops_insns (buf, p - buf);
2776}
2777
2778/* Implementation of emit_ops method "emit_reg". */
2779
2780static void
2781aarch64_emit_reg (int reg)
2782{
2783 uint32_t buf[16];
2784 uint32_t *p = buf;
2785
2786 /* Set x0 to unsigned char *regs. */
2787 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2788 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2789 p += emit_mov (p, x1, immediate_operand (reg));
2790
2791 emit_ops_insns (buf, p - buf);
2792
2793 aarch64_emit_call (get_raw_reg_func_addr ());
2794}
2795
2796/* Implementation of emit_ops method "emit_pop". */
2797
2798static void
2799aarch64_emit_pop (void)
2800{
2801 uint32_t buf[16];
2802 uint32_t *p = buf;
2803
2804 p += emit_pop (p, x0);
2805
2806 emit_ops_insns (buf, p - buf);
2807}
2808
2809/* Implementation of emit_ops method "emit_stack_flush". */
2810
2811static void
2812aarch64_emit_stack_flush (void)
2813{
2814 uint32_t buf[16];
2815 uint32_t *p = buf;
2816
2817 p += emit_push (p, x0);
2818
2819 emit_ops_insns (buf, p - buf);
2820}
2821
2822/* Implementation of emit_ops method "emit_zero_ext". */
2823
2824static void
2825aarch64_emit_zero_ext (int arg)
2826{
2827 uint32_t buf[16];
2828 uint32_t *p = buf;
2829
2830 p += emit_ubfx (p, x0, x0, 0, arg);
2831
2832 emit_ops_insns (buf, p - buf);
2833}
2834
2835/* Implementation of emit_ops method "emit_swap". */
2836
2837static void
2838aarch64_emit_swap (void)
2839{
2840 uint32_t buf[16];
2841 uint32_t *p = buf;
2842
2843 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2844 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2845 p += emit_mov (p, x0, register_operand (x1));
2846
2847 emit_ops_insns (buf, p - buf);
2848}
2849
2850/* Implementation of emit_ops method "emit_stack_adjust". */
2851
2852static void
2853aarch64_emit_stack_adjust (int n)
2854{
2855 /* This is not needed with our design. */
2856 uint32_t buf[16];
2857 uint32_t *p = buf;
2858
2859 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2860
2861 emit_ops_insns (buf, p - buf);
2862}
2863
2864/* Implementation of emit_ops method "emit_int_call_1". */
2865
2866static void
2867aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2868{
2869 uint32_t buf[16];
2870 uint32_t *p = buf;
2871
2872 p += emit_mov (p, x0, immediate_operand (arg1));
2873
2874 emit_ops_insns (buf, p - buf);
2875
2876 aarch64_emit_call (fn);
2877}
2878
2879/* Implementation of emit_ops method "emit_void_call_2". */
2880
2881static void
2882aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2883{
2884 uint32_t buf[16];
2885 uint32_t *p = buf;
2886
2887 /* Push x0 on the stack. */
2888 aarch64_emit_stack_flush ();
2889
2890 /* Setup arguments for the function call:
2891
2892 x0: arg1
2893 x1: top of the stack
2894
2895 MOV x1, x0
2896 MOV x0, #arg1 */
2897
2898 p += emit_mov (p, x1, register_operand (x0));
2899 p += emit_mov (p, x0, immediate_operand (arg1));
2900
2901 emit_ops_insns (buf, p - buf);
2902
2903 aarch64_emit_call (fn);
2904
2905 /* Restore x0. */
2906 aarch64_emit_pop ();
2907}
2908
2909/* Implementation of emit_ops method "emit_eq_goto". */
2910
2911static void
2912aarch64_emit_eq_goto (int *offset_p, int *size_p)
2913{
2914 uint32_t buf[16];
2915 uint32_t *p = buf;
2916
2917 p += emit_pop (p, x1);
2918 p += emit_cmp (p, x1, register_operand (x0));
2919 /* Branch over the next instruction if x0 != x1. */
2920 p += emit_bcond (p, NE, 8);
2921 /* The NOP instruction will be patched with an unconditional branch. */
2922 if (offset_p)
2923 *offset_p = (p - buf) * 4;
2924 if (size_p)
2925 *size_p = 4;
2926 p += emit_nop (p);
2927
2928 emit_ops_insns (buf, p - buf);
2929}
2930
2931/* Implementation of emit_ops method "emit_ne_goto". */
2932
2933static void
2934aarch64_emit_ne_goto (int *offset_p, int *size_p)
2935{
2936 uint32_t buf[16];
2937 uint32_t *p = buf;
2938
2939 p += emit_pop (p, x1);
2940 p += emit_cmp (p, x1, register_operand (x0));
2941 /* Branch over the next instruction if x0 == x1. */
2942 p += emit_bcond (p, EQ, 8);
2943 /* The NOP instruction will be patched with an unconditional branch. */
2944 if (offset_p)
2945 *offset_p = (p - buf) * 4;
2946 if (size_p)
2947 *size_p = 4;
2948 p += emit_nop (p);
2949
2950 emit_ops_insns (buf, p - buf);
2951}
2952
2953/* Implementation of emit_ops method "emit_lt_goto". */
2954
2955static void
2956aarch64_emit_lt_goto (int *offset_p, int *size_p)
2957{
2958 uint32_t buf[16];
2959 uint32_t *p = buf;
2960
2961 p += emit_pop (p, x1);
2962 p += emit_cmp (p, x1, register_operand (x0));
2963 /* Branch over the next instruction if x0 >= x1. */
2964 p += emit_bcond (p, GE, 8);
2965 /* The NOP instruction will be patched with an unconditional branch. */
2966 if (offset_p)
2967 *offset_p = (p - buf) * 4;
2968 if (size_p)
2969 *size_p = 4;
2970 p += emit_nop (p);
2971
2972 emit_ops_insns (buf, p - buf);
2973}
2974
2975/* Implementation of emit_ops method "emit_le_goto". */
2976
2977static void
2978aarch64_emit_le_goto (int *offset_p, int *size_p)
2979{
2980 uint32_t buf[16];
2981 uint32_t *p = buf;
2982
2983 p += emit_pop (p, x1);
2984 p += emit_cmp (p, x1, register_operand (x0));
2985 /* Branch over the next instruction if x0 > x1. */
2986 p += emit_bcond (p, GT, 8);
2987 /* The NOP instruction will be patched with an unconditional branch. */
2988 if (offset_p)
2989 *offset_p = (p - buf) * 4;
2990 if (size_p)
2991 *size_p = 4;
2992 p += emit_nop (p);
2993
2994 emit_ops_insns (buf, p - buf);
2995}
2996
2997/* Implementation of emit_ops method "emit_gt_goto". */
2998
2999static void
3000aarch64_emit_gt_goto (int *offset_p, int *size_p)
3001{
3002 uint32_t buf[16];
3003 uint32_t *p = buf;
3004
3005 p += emit_pop (p, x1);
3006 p += emit_cmp (p, x1, register_operand (x0));
3007 /* Branch over the next instruction if x0 <= x1. */
3008 p += emit_bcond (p, LE, 8);
3009 /* The NOP instruction will be patched with an unconditional branch. */
3010 if (offset_p)
3011 *offset_p = (p - buf) * 4;
3012 if (size_p)
3013 *size_p = 4;
3014 p += emit_nop (p);
3015
3016 emit_ops_insns (buf, p - buf);
3017}
3018
3019/* Implementation of emit_ops method "emit_ge_got". */
3020
3021static void
3022aarch64_emit_ge_got (int *offset_p, int *size_p)
3023{
3024 uint32_t buf[16];
3025 uint32_t *p = buf;
3026
3027 p += emit_pop (p, x1);
3028 p += emit_cmp (p, x1, register_operand (x0));
3029 /* Branch over the next instruction if x0 <= x1. */
3030 p += emit_bcond (p, LT, 8);
3031 /* The NOP instruction will be patched with an unconditional branch. */
3032 if (offset_p)
3033 *offset_p = (p - buf) * 4;
3034 if (size_p)
3035 *size_p = 4;
3036 p += emit_nop (p);
3037
3038 emit_ops_insns (buf, p - buf);
3039}
3040
3041static struct emit_ops aarch64_emit_ops_impl =
3042{
3043 aarch64_emit_prologue,
3044 aarch64_emit_epilogue,
3045 aarch64_emit_add,
3046 aarch64_emit_sub,
3047 aarch64_emit_mul,
3048 aarch64_emit_lsh,
3049 aarch64_emit_rsh_signed,
3050 aarch64_emit_rsh_unsigned,
3051 aarch64_emit_ext,
3052 aarch64_emit_log_not,
3053 aarch64_emit_bit_and,
3054 aarch64_emit_bit_or,
3055 aarch64_emit_bit_xor,
3056 aarch64_emit_bit_not,
3057 aarch64_emit_equal,
3058 aarch64_emit_less_signed,
3059 aarch64_emit_less_unsigned,
3060 aarch64_emit_ref,
3061 aarch64_emit_if_goto,
3062 aarch64_emit_goto,
3063 aarch64_write_goto_address,
3064 aarch64_emit_const,
3065 aarch64_emit_call,
3066 aarch64_emit_reg,
3067 aarch64_emit_pop,
3068 aarch64_emit_stack_flush,
3069 aarch64_emit_zero_ext,
3070 aarch64_emit_swap,
3071 aarch64_emit_stack_adjust,
3072 aarch64_emit_int_call_1,
3073 aarch64_emit_void_call_2,
3074 aarch64_emit_eq_goto,
3075 aarch64_emit_ne_goto,
3076 aarch64_emit_lt_goto,
3077 aarch64_emit_le_goto,
3078 aarch64_emit_gt_goto,
3079 aarch64_emit_ge_got,
3080};
3081
3082/* Implementation of linux_target_ops method "emit_ops". */
3083
3084static struct emit_ops *
3085aarch64_emit_ops (void)
3086{
3087 return &aarch64_emit_ops_impl;
3088}
3089
bb903df0
PL
3090/* Implementation of linux_target_ops method
3091 "get_min_fast_tracepoint_insn_len". */
3092
3093static int
3094aarch64_get_min_fast_tracepoint_insn_len (void)
3095{
3096 return 4;
3097}
3098
d1d0aea1
PL
3099/* Implementation of linux_target_ops method "supports_range_stepping". */
3100
3101static int
3102aarch64_supports_range_stepping (void)
3103{
3104 return 1;
3105}
3106
3ca4edb6 3107/* Implementation of target ops method "sw_breakpoint_from_kind". */
dd373349 3108
3ca4edb6
TBA
3109const gdb_byte *
3110aarch64_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349 3111{
17b1509a
YQ
3112 if (is_64bit_tdesc ())
3113 {
3114 *size = aarch64_breakpoint_len;
3115 return aarch64_breakpoint;
3116 }
3117 else
3118 return arm_sw_breakpoint_from_kind (kind, size);
3119}
3120
06250e4e 3121/* Implementation of target ops method "breakpoint_kind_from_pc". */
17b1509a 3122
06250e4e
TBA
3123int
3124aarch64_target::breakpoint_kind_from_pc (CORE_ADDR *pcptr)
17b1509a
YQ
3125{
3126 if (is_64bit_tdesc ())
3127 return aarch64_breakpoint_len;
3128 else
3129 return arm_breakpoint_kind_from_pc (pcptr);
3130}
3131
06250e4e 3132/* Implementation of the target ops method
17b1509a
YQ
3133 "breakpoint_kind_from_current_state". */
3134
06250e4e
TBA
3135int
3136aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
17b1509a
YQ
3137{
3138 if (is_64bit_tdesc ())
3139 return aarch64_breakpoint_len;
3140 else
3141 return arm_breakpoint_kind_from_current_state (pcptr);
dd373349
AT
3142}
3143
7d00775e
AT
3144/* Support for hardware single step. */
3145
3146static int
3147aarch64_supports_hardware_single_step (void)
3148{
3149 return 1;
3150}
3151
176eb98c
MS
3152struct linux_target_ops the_low_target =
3153{
bb903df0 3154 aarch64_install_fast_tracepoint_jump_pad,
afbe19f8 3155 aarch64_emit_ops,
bb903df0 3156 aarch64_get_min_fast_tracepoint_insn_len,
d1d0aea1 3157 aarch64_supports_range_stepping,
7d00775e 3158 aarch64_supports_hardware_single_step,
061fc021 3159 aarch64_get_syscall_trapinfo,
176eb98c 3160};
3aee8918 3161
ef0478f6
TBA
3162/* The linux target ops object. */
3163
3164linux_process_target *the_linux_target = &the_aarch64_target;
3165
3aee8918
PA
3166void
3167initialize_low_arch (void)
3168{
3b53ae99
YQ
3169 initialize_low_arch_aarch32 ();
3170
3aee8918 3171 initialize_regsets_info (&aarch64_regsets_info);
02895270 3172 initialize_regsets_info (&aarch64_sve_regsets_info);
3aee8918 3173}
This page took 0.82294 seconds and 4 git commands to generate.