Automatic Copyright Year update after running gdb/copyright.py
[deliverable/binutils-gdb.git] / gdbserver / linux-aarch64-low.cc
CommitLineData
176eb98c
MS
1/* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
88b9d363 4 Copyright (C) 2009-2022 Free Software Foundation, Inc.
176eb98c
MS
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "server.h"
23#include "linux-low.h"
db3cb7cb 24#include "nat/aarch64-linux.h"
554717a3 25#include "nat/aarch64-linux-hw-point.h"
bb903df0 26#include "arch/aarch64-insn.h"
3b53ae99 27#include "linux-aarch32-low.h"
176eb98c 28#include "elf/common.h"
afbe19f8
PL
29#include "ax.h"
30#include "tracepoint.h"
f9d949fb 31#include "debug.h"
176eb98c
MS
32
33#include <signal.h>
34#include <sys/user.h>
5826e159 35#include "nat/gdb_ptrace.h"
e9dae05e 36#include <asm/ptrace.h>
bb903df0
PL
37#include <inttypes.h>
38#include <endian.h>
39#include <sys/uio.h>
176eb98c
MS
40
41#include "gdb_proc_service.h"
cc628f3d 42#include "arch/aarch64.h"
04245125 43#include "arch/aarch64-mte-linux.h"
7cc17433 44#include "linux-aarch32-tdesc.h"
d6d7ce56 45#include "linux-aarch64-tdesc.h"
41919a58 46#include "nat/aarch64-mte-linux-ptrace.h"
fefa175e 47#include "nat/aarch64-sve-linux-ptrace.h"
02895270 48#include "tdesc.h"
176eb98c 49
176eb98c
MS
50#ifdef HAVE_SYS_REG_H
51#include <sys/reg.h>
52#endif
53
41919a58
LM
54#ifdef HAVE_GETAUXVAL
55#include <sys/auxv.h>
56#endif
57
ef0478f6
TBA
58/* Linux target op definitions for the AArch64 architecture. */
59
60class aarch64_target : public linux_process_target
61{
62public:
63
aa8d21c9
TBA
64 const regs_info *get_regs_info () override;
65
06250e4e
TBA
66 int breakpoint_kind_from_pc (CORE_ADDR *pcptr) override;
67
68 int breakpoint_kind_from_current_state (CORE_ADDR *pcptr) override;
69
3ca4edb6
TBA
70 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
71
007c9b97
TBA
72 bool supports_z_point_type (char z_type) override;
73
47f70aa7
TBA
74 bool supports_tracepoints () override;
75
809a0c35
TBA
76 bool supports_fast_tracepoints () override;
77
78 int install_fast_tracepoint_jump_pad
79 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
80 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
81 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
82 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
83 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
84 char *err) override;
85
86 int get_min_fast_tracepoint_insn_len () override;
87
ab64c999
TBA
88 struct emit_ops *emit_ops () override;
89
41919a58
LM
90 bool supports_memory_tagging () override;
91
92 bool fetch_memtags (CORE_ADDR address, size_t len,
93 gdb::byte_vector &tags, int type) override;
94
95 bool store_memtags (CORE_ADDR address, size_t len,
96 const gdb::byte_vector &tags, int type) override;
97
797bcff5
TBA
98protected:
99
100 void low_arch_setup () override;
daca57a7
TBA
101
102 bool low_cannot_fetch_register (int regno) override;
103
104 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
105
106 bool low_supports_breakpoints () override;
107
108 CORE_ADDR low_get_pc (regcache *regcache) override;
109
110 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
d7146cda
TBA
111
112 bool low_breakpoint_at (CORE_ADDR pc) override;
9db9aa23
TBA
113
114 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
115 int size, raw_breakpoint *bp) override;
116
117 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
118 int size, raw_breakpoint *bp) override;
ac1bbaca
TBA
119
120 bool low_stopped_by_watchpoint () override;
121
122 CORE_ADDR low_stopped_data_address () override;
cb63de7c
TBA
123
124 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
125 int direction) override;
fd000fb3
TBA
126
127 arch_process_info *low_new_process () override;
128
129 void low_delete_process (arch_process_info *info) override;
130
131 void low_new_thread (lwp_info *) override;
132
133 void low_delete_thread (arch_lwp_info *) override;
134
135 void low_new_fork (process_info *parent, process_info *child) override;
d7599cc0
TBA
136
137 void low_prepare_to_resume (lwp_info *lwp) override;
13e567af
TBA
138
139 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
9cfd8715
TBA
140
141 bool low_supports_range_stepping () override;
9eedd27d
TBA
142
143 bool low_supports_catch_syscall () override;
144
145 void low_get_syscall_trapinfo (regcache *regcache, int *sysno) override;
ef0478f6
TBA
146};
147
148/* The singleton target ops object. */
149
150static aarch64_target the_aarch64_target;
151
daca57a7
TBA
152bool
153aarch64_target::low_cannot_fetch_register (int regno)
154{
155 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
156 "is not implemented by the target");
157}
158
159bool
160aarch64_target::low_cannot_store_register (int regno)
161{
162 gdb_assert_not_reached ("linux target op low_cannot_store_register "
163 "is not implemented by the target");
164}
165
d7599cc0
TBA
166void
167aarch64_target::low_prepare_to_resume (lwp_info *lwp)
168{
169 aarch64_linux_prepare_to_resume (lwp);
170}
171
176eb98c
MS
172/* Per-process arch-specific data we want to keep. */
173
174struct arch_process_info
175{
176 /* Hardware breakpoint/watchpoint data.
177 The reason for them to be per-process rather than per-thread is
178 due to the lack of information in the gdbserver environment;
179 gdbserver is not told that whether a requested hardware
180 breakpoint/watchpoint is thread specific or not, so it has to set
181 each hw bp/wp for every thread in the current process. The
182 higher level bp/wp management in gdb will resume a thread if a hw
183 bp/wp trap is not expected for it. Since the hw bp/wp setting is
184 same for each thread, it is reasonable for the data to live here.
185 */
186 struct aarch64_debug_reg_state debug_reg_state;
187};
188
3b53ae99
YQ
189/* Return true if the size of register 0 is 8 byte. */
190
191static int
192is_64bit_tdesc (void)
193{
194 struct regcache *regcache = get_thread_regcache (current_thread, 0);
195
196 return register_size (regcache->tdesc, 0) == 8;
197}
198
176eb98c
MS
199static void
200aarch64_fill_gregset (struct regcache *regcache, void *buf)
201{
6a69a054 202 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
176eb98c
MS
203 int i;
204
205 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
206 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
207 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
208 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
209 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
210}
211
212static void
213aarch64_store_gregset (struct regcache *regcache, const void *buf)
214{
6a69a054 215 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
176eb98c
MS
216 int i;
217
218 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
219 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
220 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
221 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
222 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
223}
224
225static void
226aarch64_fill_fpregset (struct regcache *regcache, void *buf)
227{
9caa3311 228 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
176eb98c
MS
229 int i;
230
231 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
232 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
233 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
234 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
235}
236
237static void
238aarch64_store_fpregset (struct regcache *regcache, const void *buf)
239{
9caa3311
YQ
240 const struct user_fpsimd_state *regset
241 = (const struct user_fpsimd_state *) buf;
176eb98c
MS
242 int i;
243
244 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
245 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
246 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
247 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
248}
249
1ef53e6b
AH
250/* Store the pauth registers to regcache. */
251
252static void
253aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
254{
255 uint64_t *pauth_regset = (uint64_t *) buf;
256 int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
257
258 if (pauth_base == 0)
259 return;
260
261 supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
262 &pauth_regset[0]);
263 supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
264 &pauth_regset[1]);
265}
266
5e984dbf
LM
267/* Fill BUF with the MTE registers from the regcache. */
268
269static void
270aarch64_fill_mteregset (struct regcache *regcache, void *buf)
271{
272 uint64_t *mte_regset = (uint64_t *) buf;
273 int mte_base = find_regno (regcache->tdesc, "tag_ctl");
274
275 collect_register (regcache, mte_base, mte_regset);
276}
277
278/* Store the MTE registers to regcache. */
279
280static void
281aarch64_store_mteregset (struct regcache *regcache, const void *buf)
282{
283 uint64_t *mte_regset = (uint64_t *) buf;
284 int mte_base = find_regno (regcache->tdesc, "tag_ctl");
285
286 /* Tag Control register */
287 supply_register (regcache, mte_base, mte_regset);
288}
289
bf9ae9d8
TBA
290bool
291aarch64_target::low_supports_breakpoints ()
292{
293 return true;
294}
295
296/* Implementation of linux target ops method "low_get_pc". */
421530db 297
bf9ae9d8
TBA
298CORE_ADDR
299aarch64_target::low_get_pc (regcache *regcache)
176eb98c 300{
8a7e4587 301 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 302 return linux_get_pc_64bit (regcache);
8a7e4587 303 else
a5652c21 304 return linux_get_pc_32bit (regcache);
176eb98c
MS
305}
306
bf9ae9d8 307/* Implementation of linux target ops method "low_set_pc". */
421530db 308
bf9ae9d8
TBA
309void
310aarch64_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
176eb98c 311{
8a7e4587 312 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 313 linux_set_pc_64bit (regcache, pc);
8a7e4587 314 else
a5652c21 315 linux_set_pc_32bit (regcache, pc);
176eb98c
MS
316}
317
176eb98c
MS
318#define aarch64_breakpoint_len 4
319
37d66942
PL
320/* AArch64 BRK software debug mode instruction.
321 This instruction needs to match gdb/aarch64-tdep.c
322 (aarch64_default_breakpoint). */
323static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
176eb98c 324
d7146cda 325/* Implementation of linux target ops method "low_breakpoint_at". */
421530db 326
d7146cda
TBA
327bool
328aarch64_target::low_breakpoint_at (CORE_ADDR where)
176eb98c 329{
db91f502
YQ
330 if (is_64bit_tdesc ())
331 {
332 gdb_byte insn[aarch64_breakpoint_len];
176eb98c 333
d7146cda 334 read_memory (where, (unsigned char *) &insn, aarch64_breakpoint_len);
db91f502 335 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
d7146cda 336 return true;
176eb98c 337
d7146cda 338 return false;
db91f502
YQ
339 }
340 else
341 return arm_breakpoint_at (where);
176eb98c
MS
342}
343
176eb98c
MS
344static void
345aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
346{
347 int i;
348
349 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
350 {
351 state->dr_addr_bp[i] = 0;
352 state->dr_ctrl_bp[i] = 0;
353 state->dr_ref_count_bp[i] = 0;
354 }
355
356 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
357 {
358 state->dr_addr_wp[i] = 0;
359 state->dr_ctrl_wp[i] = 0;
360 state->dr_ref_count_wp[i] = 0;
361 }
362}
363
176eb98c
MS
364/* Return the pointer to the debug register state structure in the
365 current process' arch-specific data area. */
366
db3cb7cb 367struct aarch64_debug_reg_state *
88e2cf7e 368aarch64_get_debug_reg_state (pid_t pid)
176eb98c 369{
88e2cf7e 370 struct process_info *proc = find_process_pid (pid);
176eb98c 371
fe978cb0 372 return &proc->priv->arch_private->debug_reg_state;
176eb98c
MS
373}
374
007c9b97 375/* Implementation of target ops method "supports_z_point_type". */
421530db 376
007c9b97
TBA
377bool
378aarch64_target::supports_z_point_type (char z_type)
4ff0d3d8
PA
379{
380 switch (z_type)
381 {
96c97461 382 case Z_PACKET_SW_BP:
4ff0d3d8
PA
383 case Z_PACKET_HW_BP:
384 case Z_PACKET_WRITE_WP:
385 case Z_PACKET_READ_WP:
386 case Z_PACKET_ACCESS_WP:
007c9b97 387 return true;
4ff0d3d8 388 default:
007c9b97 389 return false;
4ff0d3d8
PA
390 }
391}
392
9db9aa23 393/* Implementation of linux target ops method "low_insert_point".
176eb98c 394
421530db
PL
395 It actually only records the info of the to-be-inserted bp/wp;
396 the actual insertion will happen when threads are resumed. */
176eb98c 397
9db9aa23
TBA
398int
399aarch64_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
400 int len, raw_breakpoint *bp)
176eb98c
MS
401{
402 int ret;
4ff0d3d8 403 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
404 struct aarch64_debug_reg_state *state
405 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 406
c5e92cca 407 if (show_debug_regs)
176eb98c
MS
408 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
409 (unsigned long) addr, len);
410
802e8e6d
PA
411 /* Determine the type from the raw breakpoint type. */
412 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
413
414 if (targ_type != hw_execute)
39edd165
YQ
415 {
416 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
417 ret = aarch64_handle_watchpoint (targ_type, addr, len,
418 1 /* is_insert */, state);
419 else
420 ret = -1;
421 }
176eb98c 422 else
8d689ee5
YQ
423 {
424 if (len == 3)
425 {
426 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
427 instruction. Set it to 2 to correctly encode length bit
428 mask in hardware/watchpoint control register. */
429 len = 2;
430 }
431 ret = aarch64_handle_breakpoint (targ_type, addr, len,
432 1 /* is_insert */, state);
433 }
176eb98c 434
60a191ed 435 if (show_debug_regs)
88e2cf7e
YQ
436 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
437 targ_type);
176eb98c
MS
438
439 return ret;
440}
441
9db9aa23 442/* Implementation of linux target ops method "low_remove_point".
176eb98c 443
421530db
PL
444 It actually only records the info of the to-be-removed bp/wp,
445 the actual removal will be done when threads are resumed. */
176eb98c 446
9db9aa23
TBA
447int
448aarch64_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
449 int len, raw_breakpoint *bp)
176eb98c
MS
450{
451 int ret;
4ff0d3d8 452 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
453 struct aarch64_debug_reg_state *state
454 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 455
c5e92cca 456 if (show_debug_regs)
176eb98c
MS
457 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
458 (unsigned long) addr, len);
459
802e8e6d
PA
460 /* Determine the type from the raw breakpoint type. */
461 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
462
463 /* Set up state pointers. */
464 if (targ_type != hw_execute)
465 ret =
c67ca4de
YQ
466 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
467 state);
176eb98c 468 else
8d689ee5
YQ
469 {
470 if (len == 3)
471 {
472 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
473 instruction. Set it to 2 to correctly encode length bit
474 mask in hardware/watchpoint control register. */
475 len = 2;
476 }
477 ret = aarch64_handle_breakpoint (targ_type, addr, len,
478 0 /* is_insert */, state);
479 }
176eb98c 480
60a191ed 481 if (show_debug_regs)
88e2cf7e
YQ
482 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
483 targ_type);
176eb98c
MS
484
485 return ret;
486}
487
19007d95
LM
488/* Return the address only having significant bits. This is used to ignore
489 the top byte (TBI). */
490
491static CORE_ADDR
492address_significant (CORE_ADDR addr)
493{
494 /* Clear insignificant bits of a target address and sign extend resulting
495 address. */
496 int addr_bit = 56;
497
498 CORE_ADDR sign = (CORE_ADDR) 1 << (addr_bit - 1);
499 addr &= ((CORE_ADDR) 1 << addr_bit) - 1;
500 addr = (addr ^ sign) - sign;
501
502 return addr;
503}
504
ac1bbaca 505/* Implementation of linux target ops method "low_stopped_data_address". */
176eb98c 506
ac1bbaca
TBA
507CORE_ADDR
508aarch64_target::low_stopped_data_address ()
176eb98c
MS
509{
510 siginfo_t siginfo;
511 int pid, i;
512 struct aarch64_debug_reg_state *state;
513
0bfdf32f 514 pid = lwpid_of (current_thread);
176eb98c
MS
515
516 /* Get the siginfo. */
517 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
518 return (CORE_ADDR) 0;
519
520 /* Need to be a hardware breakpoint/watchpoint trap. */
521 if (siginfo.si_signo != SIGTRAP
522 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
523 return (CORE_ADDR) 0;
524
19007d95
LM
525 /* Make sure to ignore the top byte, otherwise we may not recognize a
526 hardware watchpoint hit. The stopped data addresses coming from the
527 kernel can potentially be tagged addresses. */
528 const CORE_ADDR addr_trap
529 = address_significant ((CORE_ADDR) siginfo.si_addr);
530
176eb98c 531 /* Check if the address matches any watched address. */
88e2cf7e 532 state = aarch64_get_debug_reg_state (pid_of (current_thread));
176eb98c
MS
533 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
534 {
a3b60e45
JK
535 const unsigned int offset
536 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
176eb98c 537 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
a3b60e45
JK
538 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
539 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
540 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
541
176eb98c
MS
542 if (state->dr_ref_count_wp[i]
543 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
a3b60e45 544 && addr_trap >= addr_watch_aligned
176eb98c 545 && addr_trap < addr_watch + len)
a3b60e45
JK
546 {
547 /* ADDR_TRAP reports the first address of the memory range
548 accessed by the CPU, regardless of what was the memory
549 range watched. Thus, a large CPU access that straddles
550 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
551 ADDR_TRAP that is lower than the
552 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
553
554 addr: | 4 | 5 | 6 | 7 | 8 |
555 |---- range watched ----|
556 |----------- range accessed ------------|
557
558 In this case, ADDR_TRAP will be 4.
559
560 To match a watchpoint known to GDB core, we must never
561 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
562 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
563 positive on kernels older than 4.10. See PR
564 external/20207. */
565 return addr_orig;
566 }
176eb98c
MS
567 }
568
569 return (CORE_ADDR) 0;
570}
571
ac1bbaca 572/* Implementation of linux target ops method "low_stopped_by_watchpoint". */
176eb98c 573
ac1bbaca
TBA
574bool
575aarch64_target::low_stopped_by_watchpoint ()
176eb98c 576{
ac1bbaca 577 return (low_stopped_data_address () != 0);
176eb98c
MS
578}
579
580/* Fetch the thread-local storage pointer for libthread_db. */
581
582ps_err_e
754653a7 583ps_get_thread_area (struct ps_prochandle *ph,
176eb98c
MS
584 lwpid_t lwpid, int idx, void **base)
585{
a0cc84cd
YQ
586 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
587 is_64bit_tdesc ());
176eb98c
MS
588}
589
cb63de7c 590/* Implementation of linux target ops method "low_siginfo_fixup". */
ade90bde 591
cb63de7c
TBA
592bool
593aarch64_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
594 int direction)
ade90bde
YQ
595{
596 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
597 if (!is_64bit_tdesc ())
598 {
599 if (direction == 0)
600 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
601 native);
602 else
603 aarch64_siginfo_from_compat_siginfo (native,
604 (struct compat_siginfo *) inf);
605
cb63de7c 606 return true;
ade90bde
YQ
607 }
608
cb63de7c 609 return false;
ade90bde
YQ
610}
611
fd000fb3 612/* Implementation of linux target ops method "low_new_process". */
176eb98c 613
fd000fb3
TBA
614arch_process_info *
615aarch64_target::low_new_process ()
176eb98c 616{
8d749320 617 struct arch_process_info *info = XCNEW (struct arch_process_info);
176eb98c
MS
618
619 aarch64_init_debug_reg_state (&info->debug_reg_state);
620
621 return info;
622}
623
fd000fb3 624/* Implementation of linux target ops method "low_delete_process". */
04ec7890 625
fd000fb3
TBA
626void
627aarch64_target::low_delete_process (arch_process_info *info)
04ec7890
SM
628{
629 xfree (info);
630}
631
fd000fb3
TBA
632void
633aarch64_target::low_new_thread (lwp_info *lwp)
634{
635 aarch64_linux_new_thread (lwp);
636}
421530db 637
fd000fb3
TBA
638void
639aarch64_target::low_delete_thread (arch_lwp_info *arch_lwp)
640{
641 aarch64_linux_delete_thread (arch_lwp);
642}
643
644/* Implementation of linux target ops method "low_new_fork". */
645
646void
647aarch64_target::low_new_fork (process_info *parent,
648 process_info *child)
3a8a0396
DB
649{
650 /* These are allocated by linux_add_process. */
61a7418c
DB
651 gdb_assert (parent->priv != NULL
652 && parent->priv->arch_private != NULL);
653 gdb_assert (child->priv != NULL
654 && child->priv->arch_private != NULL);
3a8a0396
DB
655
656 /* Linux kernel before 2.6.33 commit
657 72f674d203cd230426437cdcf7dd6f681dad8b0d
658 will inherit hardware debug registers from parent
659 on fork/vfork/clone. Newer Linux kernels create such tasks with
660 zeroed debug registers.
661
662 GDB core assumes the child inherits the watchpoints/hw
663 breakpoints of the parent, and will remove them all from the
664 forked off process. Copy the debug registers mirrors into the
665 new process so that all breakpoints and watchpoints can be
666 removed together. The debug registers mirror will become zeroed
667 in the end before detaching the forked off process, thus making
668 this compatible with older Linux kernels too. */
669
61a7418c 670 *child->priv->arch_private = *parent->priv->arch_private;
3a8a0396
DB
671}
672
02895270
AH
673/* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
674
675static void
676aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
677{
678 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
679}
680
681/* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
682
683static void
684aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
685{
686 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
687}
688
eb79b231
LM
689/* Array containing all the possible register sets for AArch64/Linux. During
690 architecture setup, these will be checked against the HWCAP/HWCAP2 bits for
691 validity and enabled/disabled accordingly.
692
693 Their sizes are set to 0 here, but they will be adjusted later depending
694 on whether each register set is available or not. */
3aee8918 695static struct regset_info aarch64_regsets[] =
176eb98c 696{
eb79b231 697 /* GPR registers. */
176eb98c 698 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
eb79b231 699 0, GENERAL_REGS,
176eb98c 700 aarch64_fill_gregset, aarch64_store_gregset },
eb79b231 701 /* Floating Point (FPU) registers. */
176eb98c 702 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
eb79b231 703 0, FP_REGS,
176eb98c
MS
704 aarch64_fill_fpregset, aarch64_store_fpregset
705 },
eb79b231
LM
706 /* Scalable Vector Extension (SVE) registers. */
707 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
708 0, EXTENDED_REGS,
709 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
710 },
711 /* PAC registers. */
1ef53e6b 712 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
eb79b231
LM
713 0, OPTIONAL_REGS,
714 nullptr, aarch64_store_pauthregset },
715 /* Tagged address control / MTE registers. */
5e984dbf 716 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_TAGGED_ADDR_CTRL,
eb79b231
LM
717 0, OPTIONAL_REGS,
718 aarch64_fill_mteregset, aarch64_store_mteregset },
50bc912a 719 NULL_REGSET
176eb98c
MS
720};
721
3aee8918
PA
722static struct regsets_info aarch64_regsets_info =
723 {
724 aarch64_regsets, /* regsets */
725 0, /* num_regsets */
eb79b231 726 nullptr, /* disabled_regsets */
3aee8918
PA
727 };
728
3b53ae99 729static struct regs_info regs_info_aarch64 =
3aee8918 730 {
eb79b231
LM
731 nullptr, /* regset_bitmap */
732 nullptr, /* usrregs */
3aee8918
PA
733 &aarch64_regsets_info,
734 };
735
eb79b231
LM
736/* Given FEATURES, adjust the available register sets by setting their
737 sizes. A size of 0 means the register set is disabled and won't be
738 used. */
739
740static void
741aarch64_adjust_register_sets (const struct aarch64_features &features)
02895270 742{
eb79b231 743 struct regset_info *regset;
02895270 744
eb79b231
LM
745 for (regset = aarch64_regsets; regset->size >= 0; regset++)
746 {
747 switch (regset->nt_type)
748 {
749 case NT_PRSTATUS:
750 /* General purpose registers are always present. */
751 regset->size = sizeof (struct user_pt_regs);
752 break;
753 case NT_FPREGSET:
754 /* This is unavailable when SVE is present. */
755 if (!features.sve)
756 regset->size = sizeof (struct user_fpsimd_state);
757 break;
758 case NT_ARM_SVE:
759 if (features.sve)
760 regset->size = SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE);
761 break;
762 case NT_ARM_PAC_MASK:
763 if (features.pauth)
764 regset->size = AARCH64_PAUTH_REGS_SIZE;
765 break;
766 case NT_ARM_TAGGED_ADDR_CTRL:
767 if (features.mte)
768 regset->size = AARCH64_LINUX_SIZEOF_MTE;
769 break;
770 default:
771 gdb_assert_not_reached ("Unknown register set found.");
772 }
773 }
774}
02895270 775
eb79b231
LM
776/* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
777#define AARCH64_HWCAP_PACA (1 << 30)
778
779/* Implementation of linux target ops method "low_arch_setup". */
780
781void
782aarch64_target::low_arch_setup ()
783{
784 unsigned int machine;
785 int is_elf64;
786 int tid;
787
788 tid = lwpid_of (current_thread);
789
790 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
791
792 if (is_elf64)
793 {
794 struct aarch64_features features;
795
796 uint64_t vq = aarch64_sve_get_vq (tid);
797 features.sve = (vq > 0);
798 /* A-profile PAC is 64-bit only. */
799 features.pauth = linux_get_hwcap (8) & AARCH64_HWCAP_PACA;
800 /* A-profile MTE is 64-bit only. */
801 features.mte = linux_get_hwcap2 (8) & HWCAP2_MTE;
802
803 current_process ()->tdesc
804 = aarch64_linux_read_description (vq, features.pauth, features.mte);
805
806 /* Adjust the register sets we should use for this particular set of
807 features. */
808 aarch64_adjust_register_sets (features);
809 }
810 else
811 current_process ()->tdesc = aarch32_linux_read_description ();
812
813 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
814}
02895270 815
aa8d21c9 816/* Implementation of linux target ops method "get_regs_info". */
421530db 817
aa8d21c9
TBA
818const regs_info *
819aarch64_target::get_regs_info ()
3aee8918 820{
02895270 821 if (!is_64bit_tdesc ())
3b53ae99 822 return &regs_info_aarch32;
02895270 823
eb79b231 824 /* AArch64 64-bit registers. */
02895270 825 return &regs_info_aarch64;
3aee8918
PA
826}
827
47f70aa7 828/* Implementation of target ops method "supports_tracepoints". */
7671bf47 829
47f70aa7
TBA
830bool
831aarch64_target::supports_tracepoints ()
7671bf47 832{
524b57e6 833 if (current_thread == NULL)
47f70aa7 834 return true;
524b57e6
YQ
835 else
836 {
837 /* We don't support tracepoints on aarch32 now. */
838 return is_64bit_tdesc ();
839 }
7671bf47
PL
840}
841
13e567af 842/* Implementation of linux target ops method "low_get_thread_area". */
bb903df0 843
13e567af
TBA
844int
845aarch64_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
bb903df0
PL
846{
847 struct iovec iovec;
848 uint64_t reg;
849
850 iovec.iov_base = &reg;
851 iovec.iov_len = sizeof (reg);
852
853 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
854 return -1;
855
856 *addrp = reg;
857
858 return 0;
859}
860
9eedd27d
TBA
861bool
862aarch64_target::low_supports_catch_syscall ()
863{
864 return true;
865}
061fc021 866
9eedd27d
TBA
867/* Implementation of linux target ops method "low_get_syscall_trapinfo". */
868
869void
870aarch64_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
061fc021
YQ
871{
872 int use_64bit = register_size (regcache->tdesc, 0) == 8;
873
874 if (use_64bit)
875 {
876 long l_sysno;
877
878 collect_register_by_name (regcache, "x8", &l_sysno);
879 *sysno = (int) l_sysno;
880 }
881 else
882 collect_register_by_name (regcache, "r7", sysno);
883}
884
afbe19f8
PL
885/* List of condition codes that we need. */
886
887enum aarch64_condition_codes
888{
889 EQ = 0x0,
890 NE = 0x1,
891 LO = 0x3,
892 GE = 0xa,
893 LT = 0xb,
894 GT = 0xc,
895 LE = 0xd,
bb903df0
PL
896};
897
6c1c9a8b
YQ
898enum aarch64_operand_type
899{
900 OPERAND_IMMEDIATE,
901 OPERAND_REGISTER,
902};
903
bb903df0
PL
904/* Representation of an operand. At this time, it only supports register
905 and immediate types. */
906
907struct aarch64_operand
908{
909 /* Type of the operand. */
6c1c9a8b
YQ
910 enum aarch64_operand_type type;
911
bb903df0
PL
912 /* Value of the operand according to the type. */
913 union
914 {
915 uint32_t imm;
916 struct aarch64_register reg;
917 };
918};
919
920/* List of registers that we are currently using, we can add more here as
921 we need to use them. */
922
923/* General purpose scratch registers (64 bit). */
924static const struct aarch64_register x0 = { 0, 1 };
925static const struct aarch64_register x1 = { 1, 1 };
926static const struct aarch64_register x2 = { 2, 1 };
927static const struct aarch64_register x3 = { 3, 1 };
928static const struct aarch64_register x4 = { 4, 1 };
929
930/* General purpose scratch registers (32 bit). */
afbe19f8 931static const struct aarch64_register w0 = { 0, 0 };
bb903df0
PL
932static const struct aarch64_register w2 = { 2, 0 };
933
934/* Intra-procedure scratch registers. */
935static const struct aarch64_register ip0 = { 16, 1 };
936
937/* Special purpose registers. */
afbe19f8
PL
938static const struct aarch64_register fp = { 29, 1 };
939static const struct aarch64_register lr = { 30, 1 };
bb903df0
PL
940static const struct aarch64_register sp = { 31, 1 };
941static const struct aarch64_register xzr = { 31, 1 };
942
943/* Dynamically allocate a new register. If we know the register
944 statically, we should make it a global as above instead of using this
945 helper function. */
946
947static struct aarch64_register
948aarch64_register (unsigned num, int is64)
949{
950 return (struct aarch64_register) { num, is64 };
951}
952
953/* Helper function to create a register operand, for instructions with
954 different types of operands.
955
956 For example:
957 p += emit_mov (p, x0, register_operand (x1)); */
958
959static struct aarch64_operand
960register_operand (struct aarch64_register reg)
961{
962 struct aarch64_operand operand;
963
964 operand.type = OPERAND_REGISTER;
965 operand.reg = reg;
966
967 return operand;
968}
969
970/* Helper function to create an immediate operand, for instructions with
971 different types of operands.
972
973 For example:
974 p += emit_mov (p, x0, immediate_operand (12)); */
975
976static struct aarch64_operand
977immediate_operand (uint32_t imm)
978{
979 struct aarch64_operand operand;
980
981 operand.type = OPERAND_IMMEDIATE;
982 operand.imm = imm;
983
984 return operand;
985}
986
bb903df0
PL
987/* Helper function to create an offset memory operand.
988
989 For example:
990 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
991
992static struct aarch64_memory_operand
993offset_memory_operand (int32_t offset)
994{
995 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
996}
997
998/* Helper function to create a pre-index memory operand.
999
1000 For example:
1001 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
1002
1003static struct aarch64_memory_operand
1004preindex_memory_operand (int32_t index)
1005{
1006 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
1007}
1008
afbe19f8
PL
1009/* Helper function to create a post-index memory operand.
1010
1011 For example:
1012 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
1013
1014static struct aarch64_memory_operand
1015postindex_memory_operand (int32_t index)
1016{
1017 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
1018}
1019
bb903df0
PL
1020/* System control registers. These special registers can be written and
1021 read with the MRS and MSR instructions.
1022
1023 - NZCV: Condition flags. GDB refers to this register under the CPSR
1024 name.
1025 - FPSR: Floating-point status register.
1026 - FPCR: Floating-point control registers.
1027 - TPIDR_EL0: Software thread ID register. */
1028
1029enum aarch64_system_control_registers
1030{
1031 /* op0 op1 crn crm op2 */
1032 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
1033 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
1034 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
1035 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
1036};
1037
bb903df0
PL
1038/* Write a BLR instruction into *BUF.
1039
1040 BLR rn
1041
1042 RN is the register to branch to. */
1043
1044static int
1045emit_blr (uint32_t *buf, struct aarch64_register rn)
1046{
e1c587c3 1047 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
bb903df0
PL
1048}
1049
afbe19f8 1050/* Write a RET instruction into *BUF.
bb903df0 1051
afbe19f8 1052 RET xn
bb903df0 1053
afbe19f8 1054 RN is the register to branch to. */
bb903df0
PL
1055
1056static int
afbe19f8
PL
1057emit_ret (uint32_t *buf, struct aarch64_register rn)
1058{
e1c587c3 1059 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
afbe19f8
PL
1060}
1061
1062static int
1063emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
1064 struct aarch64_register rt,
1065 struct aarch64_register rt2,
1066 struct aarch64_register rn,
1067 struct aarch64_memory_operand operand)
bb903df0
PL
1068{
1069 uint32_t opc;
1070 uint32_t pre_index;
1071 uint32_t write_back;
1072
1073 if (rt.is64)
1074 opc = ENCODE (2, 2, 30);
1075 else
1076 opc = ENCODE (0, 2, 30);
1077
1078 switch (operand.type)
1079 {
1080 case MEMORY_OPERAND_OFFSET:
1081 {
1082 pre_index = ENCODE (1, 1, 24);
1083 write_back = ENCODE (0, 1, 23);
1084 break;
1085 }
afbe19f8
PL
1086 case MEMORY_OPERAND_POSTINDEX:
1087 {
1088 pre_index = ENCODE (0, 1, 24);
1089 write_back = ENCODE (1, 1, 23);
1090 break;
1091 }
bb903df0
PL
1092 case MEMORY_OPERAND_PREINDEX:
1093 {
1094 pre_index = ENCODE (1, 1, 24);
1095 write_back = ENCODE (1, 1, 23);
1096 break;
1097 }
1098 default:
1099 return 0;
1100 }
1101
e1c587c3
YQ
1102 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
1103 | ENCODE (operand.index >> 3, 7, 15)
1104 | ENCODE (rt2.num, 5, 10)
1105 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
1106}
1107
afbe19f8
PL
1108/* Write a STP instruction into *BUF.
1109
1110 STP rt, rt2, [rn, #offset]
1111 STP rt, rt2, [rn, #index]!
1112 STP rt, rt2, [rn], #index
1113
1114 RT and RT2 are the registers to store.
1115 RN is the base address register.
1116 OFFSET is the immediate to add to the base address. It is limited to a
1117 -512 .. 504 range (7 bits << 3). */
1118
1119static int
1120emit_stp (uint32_t *buf, struct aarch64_register rt,
1121 struct aarch64_register rt2, struct aarch64_register rn,
1122 struct aarch64_memory_operand operand)
1123{
1124 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
1125}
1126
1127/* Write a LDP instruction into *BUF.
1128
1129 LDP rt, rt2, [rn, #offset]
1130 LDP rt, rt2, [rn, #index]!
1131 LDP rt, rt2, [rn], #index
1132
1133 RT and RT2 are the registers to store.
1134 RN is the base address register.
1135 OFFSET is the immediate to add to the base address. It is limited to a
1136 -512 .. 504 range (7 bits << 3). */
1137
1138static int
1139emit_ldp (uint32_t *buf, struct aarch64_register rt,
1140 struct aarch64_register rt2, struct aarch64_register rn,
1141 struct aarch64_memory_operand operand)
1142{
1143 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
1144}
1145
bb903df0
PL
1146/* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
1147
1148 LDP qt, qt2, [rn, #offset]
1149
1150 RT and RT2 are the Q registers to store.
1151 RN is the base address register.
1152 OFFSET is the immediate to add to the base address. It is limited to
1153 -1024 .. 1008 range (7 bits << 4). */
1154
1155static int
1156emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1157 struct aarch64_register rn, int32_t offset)
1158{
1159 uint32_t opc = ENCODE (2, 2, 30);
1160 uint32_t pre_index = ENCODE (1, 1, 24);
1161
e1c587c3
YQ
1162 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
1163 | ENCODE (offset >> 4, 7, 15)
1164 | ENCODE (rt2, 5, 10)
1165 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1166}
1167
1168/* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1169
1170 STP qt, qt2, [rn, #offset]
1171
1172 RT and RT2 are the Q registers to store.
1173 RN is the base address register.
1174 OFFSET is the immediate to add to the base address. It is limited to
1175 -1024 .. 1008 range (7 bits << 4). */
1176
1177static int
1178emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1179 struct aarch64_register rn, int32_t offset)
1180{
1181 uint32_t opc = ENCODE (2, 2, 30);
1182 uint32_t pre_index = ENCODE (1, 1, 24);
1183
e1c587c3 1184 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
b6542f81
YQ
1185 | ENCODE (offset >> 4, 7, 15)
1186 | ENCODE (rt2, 5, 10)
1187 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1188}
1189
afbe19f8
PL
1190/* Write a LDRH instruction into *BUF.
1191
1192 LDRH wt, [xn, #offset]
1193 LDRH wt, [xn, #index]!
1194 LDRH wt, [xn], #index
1195
1196 RT is the register to store.
1197 RN is the base address register.
1198 OFFSET is the immediate to add to the base address. It is limited to
1199 0 .. 32760 range (12 bits << 3). */
1200
1201static int
1202emit_ldrh (uint32_t *buf, struct aarch64_register rt,
1203 struct aarch64_register rn,
1204 struct aarch64_memory_operand operand)
1205{
1c2e1515 1206 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
afbe19f8
PL
1207}
1208
1209/* Write a LDRB instruction into *BUF.
1210
1211 LDRB wt, [xn, #offset]
1212 LDRB wt, [xn, #index]!
1213 LDRB wt, [xn], #index
1214
1215 RT is the register to store.
1216 RN is the base address register.
1217 OFFSET is the immediate to add to the base address. It is limited to
1218 0 .. 32760 range (12 bits << 3). */
1219
1220static int
1221emit_ldrb (uint32_t *buf, struct aarch64_register rt,
1222 struct aarch64_register rn,
1223 struct aarch64_memory_operand operand)
1224{
1c2e1515 1225 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
afbe19f8
PL
1226}
1227
bb903df0 1228
bb903df0
PL
1229
1230/* Write a STR instruction into *BUF.
1231
1232 STR rt, [rn, #offset]
1233 STR rt, [rn, #index]!
afbe19f8 1234 STR rt, [rn], #index
bb903df0
PL
1235
1236 RT is the register to store.
1237 RN is the base address register.
1238 OFFSET is the immediate to add to the base address. It is limited to
1239 0 .. 32760 range (12 bits << 3). */
1240
1241static int
1242emit_str (uint32_t *buf, struct aarch64_register rt,
1243 struct aarch64_register rn,
1244 struct aarch64_memory_operand operand)
1245{
1c2e1515 1246 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
bb903df0
PL
1247}
1248
1249/* Helper function emitting an exclusive load or store instruction. */
1250
1251static int
1252emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1253 enum aarch64_opcodes opcode,
1254 struct aarch64_register rs,
1255 struct aarch64_register rt,
1256 struct aarch64_register rt2,
1257 struct aarch64_register rn)
1258{
e1c587c3
YQ
1259 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1260 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1261 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
1262}
1263
1264/* Write a LAXR instruction into *BUF.
1265
1266 LDAXR rt, [xn]
1267
1268 RT is the destination register.
1269 RN is the base address register. */
1270
1271static int
1272emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1273 struct aarch64_register rn)
1274{
1275 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1276 xzr, rn);
1277}
1278
1279/* Write a STXR instruction into *BUF.
1280
1281 STXR ws, rt, [xn]
1282
1283 RS is the result register, it indicates if the store succeeded or not.
1284 RT is the destination register.
1285 RN is the base address register. */
1286
1287static int
1288emit_stxr (uint32_t *buf, struct aarch64_register rs,
1289 struct aarch64_register rt, struct aarch64_register rn)
1290{
1291 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1292 xzr, rn);
1293}
1294
1295/* Write a STLR instruction into *BUF.
1296
1297 STLR rt, [xn]
1298
1299 RT is the register to store.
1300 RN is the base address register. */
1301
1302static int
1303emit_stlr (uint32_t *buf, struct aarch64_register rt,
1304 struct aarch64_register rn)
1305{
1306 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1307 xzr, rn);
1308}
1309
1310/* Helper function for data processing instructions with register sources. */
1311
1312static int
231c0592 1313emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
bb903df0
PL
1314 struct aarch64_register rd,
1315 struct aarch64_register rn,
1316 struct aarch64_register rm)
1317{
1318 uint32_t size = ENCODE (rd.is64, 1, 31);
1319
e1c587c3
YQ
1320 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1321 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1322}
1323
1324/* Helper function for data processing instructions taking either a register
1325 or an immediate. */
1326
1327static int
1328emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1329 struct aarch64_register rd,
1330 struct aarch64_register rn,
1331 struct aarch64_operand operand)
1332{
1333 uint32_t size = ENCODE (rd.is64, 1, 31);
1334 /* The opcode is different for register and immediate source operands. */
1335 uint32_t operand_opcode;
1336
1337 if (operand.type == OPERAND_IMMEDIATE)
1338 {
1339 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1340 operand_opcode = ENCODE (8, 4, 25);
1341
e1c587c3
YQ
1342 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1343 | ENCODE (operand.imm, 12, 10)
1344 | ENCODE (rn.num, 5, 5)
1345 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1346 }
1347 else
1348 {
1349 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1350 operand_opcode = ENCODE (5, 4, 25);
1351
1352 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1353 rn, operand.reg);
1354 }
1355}
1356
1357/* Write an ADD instruction into *BUF.
1358
1359 ADD rd, rn, #imm
1360 ADD rd, rn, rm
1361
1362 This function handles both an immediate and register add.
1363
1364 RD is the destination register.
1365 RN is the input register.
1366 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1367 OPERAND_REGISTER. */
1368
1369static int
1370emit_add (uint32_t *buf, struct aarch64_register rd,
1371 struct aarch64_register rn, struct aarch64_operand operand)
1372{
1373 return emit_data_processing (buf, ADD, rd, rn, operand);
1374}
1375
1376/* Write a SUB instruction into *BUF.
1377
1378 SUB rd, rn, #imm
1379 SUB rd, rn, rm
1380
1381 This function handles both an immediate and register sub.
1382
1383 RD is the destination register.
1384 RN is the input register.
1385 IMM is the immediate to substract to RN. */
1386
1387static int
1388emit_sub (uint32_t *buf, struct aarch64_register rd,
1389 struct aarch64_register rn, struct aarch64_operand operand)
1390{
1391 return emit_data_processing (buf, SUB, rd, rn, operand);
1392}
1393
1394/* Write a MOV instruction into *BUF.
1395
1396 MOV rd, #imm
1397 MOV rd, rm
1398
1399 This function handles both a wide immediate move and a register move,
1400 with the condition that the source register is not xzr. xzr and the
1401 stack pointer share the same encoding and this function only supports
1402 the stack pointer.
1403
1404 RD is the destination register.
1405 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1406 OPERAND_REGISTER. */
1407
1408static int
1409emit_mov (uint32_t *buf, struct aarch64_register rd,
1410 struct aarch64_operand operand)
1411{
1412 if (operand.type == OPERAND_IMMEDIATE)
1413 {
1414 uint32_t size = ENCODE (rd.is64, 1, 31);
1415 /* Do not shift the immediate. */
1416 uint32_t shift = ENCODE (0, 2, 21);
1417
e1c587c3
YQ
1418 return aarch64_emit_insn (buf, MOV | size | shift
1419 | ENCODE (operand.imm, 16, 5)
1420 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1421 }
1422 else
1423 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1424}
1425
1426/* Write a MOVK instruction into *BUF.
1427
1428 MOVK rd, #imm, lsl #shift
1429
1430 RD is the destination register.
1431 IMM is the immediate.
1432 SHIFT is the logical shift left to apply to IMM. */
1433
1434static int
7781c06f
YQ
1435emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1436 unsigned shift)
bb903df0
PL
1437{
1438 uint32_t size = ENCODE (rd.is64, 1, 31);
1439
e1c587c3
YQ
1440 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1441 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1442}
1443
1444/* Write instructions into *BUF in order to move ADDR into a register.
1445 ADDR can be a 64-bit value.
1446
1447 This function will emit a series of MOV and MOVK instructions, such as:
1448
1449 MOV xd, #(addr)
1450 MOVK xd, #(addr >> 16), lsl #16
1451 MOVK xd, #(addr >> 32), lsl #32
1452 MOVK xd, #(addr >> 48), lsl #48 */
1453
1454static int
1455emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1456{
1457 uint32_t *p = buf;
1458
1459 /* The MOV (wide immediate) instruction clears to top bits of the
1460 register. */
1461 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1462
1463 if ((addr >> 16) != 0)
1464 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1465 else
1466 return p - buf;
1467
1468 if ((addr >> 32) != 0)
1469 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1470 else
1471 return p - buf;
1472
1473 if ((addr >> 48) != 0)
1474 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1475
1476 return p - buf;
1477}
1478
afbe19f8
PL
1479/* Write a SUBS instruction into *BUF.
1480
1481 SUBS rd, rn, rm
1482
1483 This instruction update the condition flags.
1484
1485 RD is the destination register.
1486 RN and RM are the source registers. */
1487
1488static int
1489emit_subs (uint32_t *buf, struct aarch64_register rd,
1490 struct aarch64_register rn, struct aarch64_operand operand)
1491{
1492 return emit_data_processing (buf, SUBS, rd, rn, operand);
1493}
1494
1495/* Write a CMP instruction into *BUF.
1496
1497 CMP rn, rm
1498
1499 This instruction is an alias of SUBS xzr, rn, rm.
1500
1501 RN and RM are the registers to compare. */
1502
1503static int
1504emit_cmp (uint32_t *buf, struct aarch64_register rn,
1505 struct aarch64_operand operand)
1506{
1507 return emit_subs (buf, xzr, rn, operand);
1508}
1509
1510/* Write a AND instruction into *BUF.
1511
1512 AND rd, rn, rm
1513
1514 RD is the destination register.
1515 RN and RM are the source registers. */
1516
1517static int
1518emit_and (uint32_t *buf, struct aarch64_register rd,
1519 struct aarch64_register rn, struct aarch64_register rm)
1520{
1521 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1522}
1523
1524/* Write a ORR instruction into *BUF.
1525
1526 ORR rd, rn, rm
1527
1528 RD is the destination register.
1529 RN and RM are the source registers. */
1530
1531static int
1532emit_orr (uint32_t *buf, struct aarch64_register rd,
1533 struct aarch64_register rn, struct aarch64_register rm)
1534{
1535 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1536}
1537
1538/* Write a ORN instruction into *BUF.
1539
1540 ORN rd, rn, rm
1541
1542 RD is the destination register.
1543 RN and RM are the source registers. */
1544
1545static int
1546emit_orn (uint32_t *buf, struct aarch64_register rd,
1547 struct aarch64_register rn, struct aarch64_register rm)
1548{
1549 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1550}
1551
1552/* Write a EOR instruction into *BUF.
1553
1554 EOR rd, rn, rm
1555
1556 RD is the destination register.
1557 RN and RM are the source registers. */
1558
1559static int
1560emit_eor (uint32_t *buf, struct aarch64_register rd,
1561 struct aarch64_register rn, struct aarch64_register rm)
1562{
1563 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1564}
1565
1566/* Write a MVN instruction into *BUF.
1567
1568 MVN rd, rm
1569
1570 This is an alias for ORN rd, xzr, rm.
1571
1572 RD is the destination register.
1573 RM is the source register. */
1574
1575static int
1576emit_mvn (uint32_t *buf, struct aarch64_register rd,
1577 struct aarch64_register rm)
1578{
1579 return emit_orn (buf, rd, xzr, rm);
1580}
1581
1582/* Write a LSLV instruction into *BUF.
1583
1584 LSLV rd, rn, rm
1585
1586 RD is the destination register.
1587 RN and RM are the source registers. */
1588
1589static int
1590emit_lslv (uint32_t *buf, struct aarch64_register rd,
1591 struct aarch64_register rn, struct aarch64_register rm)
1592{
1593 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1594}
1595
1596/* Write a LSRV instruction into *BUF.
1597
1598 LSRV rd, rn, rm
1599
1600 RD is the destination register.
1601 RN and RM are the source registers. */
1602
1603static int
1604emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1605 struct aarch64_register rn, struct aarch64_register rm)
1606{
1607 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1608}
1609
1610/* Write a ASRV instruction into *BUF.
1611
1612 ASRV rd, rn, rm
1613
1614 RD is the destination register.
1615 RN and RM are the source registers. */
1616
1617static int
1618emit_asrv (uint32_t *buf, struct aarch64_register rd,
1619 struct aarch64_register rn, struct aarch64_register rm)
1620{
1621 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1622}
1623
1624/* Write a MUL instruction into *BUF.
1625
1626 MUL rd, rn, rm
1627
1628 RD is the destination register.
1629 RN and RM are the source registers. */
1630
1631static int
1632emit_mul (uint32_t *buf, struct aarch64_register rd,
1633 struct aarch64_register rn, struct aarch64_register rm)
1634{
1635 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1636}
1637
bb903df0
PL
1638/* Write a MRS instruction into *BUF. The register size is 64-bit.
1639
1640 MRS xt, system_reg
1641
1642 RT is the destination register.
1643 SYSTEM_REG is special purpose register to read. */
1644
1645static int
1646emit_mrs (uint32_t *buf, struct aarch64_register rt,
1647 enum aarch64_system_control_registers system_reg)
1648{
e1c587c3
YQ
1649 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1650 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1651}
1652
1653/* Write a MSR instruction into *BUF. The register size is 64-bit.
1654
1655 MSR system_reg, xt
1656
1657 SYSTEM_REG is special purpose register to write.
1658 RT is the input register. */
1659
1660static int
1661emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1662 struct aarch64_register rt)
1663{
e1c587c3
YQ
1664 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1665 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1666}
1667
1668/* Write a SEVL instruction into *BUF.
1669
1670 This is a hint instruction telling the hardware to trigger an event. */
1671
1672static int
1673emit_sevl (uint32_t *buf)
1674{
e1c587c3 1675 return aarch64_emit_insn (buf, SEVL);
bb903df0
PL
1676}
1677
1678/* Write a WFE instruction into *BUF.
1679
1680 This is a hint instruction telling the hardware to wait for an event. */
1681
1682static int
1683emit_wfe (uint32_t *buf)
1684{
e1c587c3 1685 return aarch64_emit_insn (buf, WFE);
bb903df0
PL
1686}
1687
afbe19f8
PL
1688/* Write a SBFM instruction into *BUF.
1689
1690 SBFM rd, rn, #immr, #imms
1691
1692 This instruction moves the bits from #immr to #imms into the
1693 destination, sign extending the result.
1694
1695 RD is the destination register.
1696 RN is the source register.
1697 IMMR is the bit number to start at (least significant bit).
1698 IMMS is the bit number to stop at (most significant bit). */
1699
1700static int
1701emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1702 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1703{
1704 uint32_t size = ENCODE (rd.is64, 1, 31);
1705 uint32_t n = ENCODE (rd.is64, 1, 22);
1706
e1c587c3
YQ
1707 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1708 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1709 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1710}
1711
1712/* Write a SBFX instruction into *BUF.
1713
1714 SBFX rd, rn, #lsb, #width
1715
1716 This instruction moves #width bits from #lsb into the destination, sign
1717 extending the result. This is an alias for:
1718
1719 SBFM rd, rn, #lsb, #(lsb + width - 1)
1720
1721 RD is the destination register.
1722 RN is the source register.
1723 LSB is the bit number to start at (least significant bit).
1724 WIDTH is the number of bits to move. */
1725
1726static int
1727emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1728 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1729{
1730 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1731}
1732
1733/* Write a UBFM instruction into *BUF.
1734
1735 UBFM rd, rn, #immr, #imms
1736
1737 This instruction moves the bits from #immr to #imms into the
1738 destination, extending the result with zeros.
1739
1740 RD is the destination register.
1741 RN is the source register.
1742 IMMR is the bit number to start at (least significant bit).
1743 IMMS is the bit number to stop at (most significant bit). */
1744
1745static int
1746emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1747 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1748{
1749 uint32_t size = ENCODE (rd.is64, 1, 31);
1750 uint32_t n = ENCODE (rd.is64, 1, 22);
1751
e1c587c3
YQ
1752 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1753 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1754 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1755}
1756
1757/* Write a UBFX instruction into *BUF.
1758
1759 UBFX rd, rn, #lsb, #width
1760
1761 This instruction moves #width bits from #lsb into the destination,
1762 extending the result with zeros. This is an alias for:
1763
1764 UBFM rd, rn, #lsb, #(lsb + width - 1)
1765
1766 RD is the destination register.
1767 RN is the source register.
1768 LSB is the bit number to start at (least significant bit).
1769 WIDTH is the number of bits to move. */
1770
1771static int
1772emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1773 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1774{
1775 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1776}
1777
1778/* Write a CSINC instruction into *BUF.
1779
1780 CSINC rd, rn, rm, cond
1781
1782 This instruction conditionally increments rn or rm and places the result
1783 in rd. rn is chosen is the condition is true.
1784
1785 RD is the destination register.
1786 RN and RM are the source registers.
1787 COND is the encoded condition. */
1788
1789static int
1790emit_csinc (uint32_t *buf, struct aarch64_register rd,
1791 struct aarch64_register rn, struct aarch64_register rm,
1792 unsigned cond)
1793{
1794 uint32_t size = ENCODE (rd.is64, 1, 31);
1795
e1c587c3
YQ
1796 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1797 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1798 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1799}
1800
1801/* Write a CSET instruction into *BUF.
1802
1803 CSET rd, cond
1804
1805 This instruction conditionally write 1 or 0 in the destination register.
1806 1 is written if the condition is true. This is an alias for:
1807
1808 CSINC rd, xzr, xzr, !cond
1809
1810 Note that the condition needs to be inverted.
1811
1812 RD is the destination register.
1813 RN and RM are the source registers.
1814 COND is the encoded condition. */
1815
1816static int
1817emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1818{
1819 /* The least significant bit of the condition needs toggling in order to
1820 invert it. */
1821 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1822}
1823
bb903df0
PL
1824/* Write LEN instructions from BUF into the inferior memory at *TO.
1825
1826 Note instructions are always little endian on AArch64, unlike data. */
1827
1828static void
1829append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1830{
1831 size_t byte_len = len * sizeof (uint32_t);
1832#if (__BYTE_ORDER == __BIG_ENDIAN)
cb93dc7f 1833 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
bb903df0
PL
1834 size_t i;
1835
1836 for (i = 0; i < len; i++)
1837 le_buf[i] = htole32 (buf[i]);
1838
4196ab2a 1839 target_write_memory (*to, (const unsigned char *) le_buf, byte_len);
bb903df0
PL
1840
1841 xfree (le_buf);
1842#else
4196ab2a 1843 target_write_memory (*to, (const unsigned char *) buf, byte_len);
bb903df0
PL
1844#endif
1845
1846 *to += byte_len;
1847}
1848
0badd99f
YQ
1849/* Sub-class of struct aarch64_insn_data, store information of
1850 instruction relocation for fast tracepoint. Visitor can
1851 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1852 the relocated instructions in buffer pointed by INSN_PTR. */
bb903df0 1853
0badd99f
YQ
1854struct aarch64_insn_relocation_data
1855{
1856 struct aarch64_insn_data base;
1857
1858 /* The new address the instruction is relocated to. */
1859 CORE_ADDR new_addr;
1860 /* Pointer to the buffer of relocated instruction(s). */
1861 uint32_t *insn_ptr;
1862};
1863
1864/* Implementation of aarch64_insn_visitor method "b". */
1865
1866static void
1867aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1868 struct aarch64_insn_data *data)
1869{
1870 struct aarch64_insn_relocation_data *insn_reloc
1871 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1872 int64_t new_offset
0badd99f
YQ
1873 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1874
1875 if (can_encode_int32 (new_offset, 28))
1876 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1877}
1878
1879/* Implementation of aarch64_insn_visitor method "b_cond". */
1880
1881static void
1882aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1883 struct aarch64_insn_data *data)
1884{
1885 struct aarch64_insn_relocation_data *insn_reloc
1886 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1887 int64_t new_offset
0badd99f
YQ
1888 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1889
1890 if (can_encode_int32 (new_offset, 21))
1891 {
1892 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1893 new_offset);
bb903df0 1894 }
0badd99f 1895 else if (can_encode_int32 (new_offset, 28))
bb903df0 1896 {
0badd99f
YQ
1897 /* The offset is out of range for a conditional branch
1898 instruction but not for a unconditional branch. We can use
1899 the following instructions instead:
bb903df0 1900
0badd99f
YQ
1901 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1902 B NOT_TAKEN ; Else jump over TAKEN and continue.
1903 TAKEN:
1904 B #(offset - 8)
1905 NOT_TAKEN:
1906
1907 */
1908
1909 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1910 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1911 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
bb903df0 1912 }
0badd99f 1913}
bb903df0 1914
0badd99f
YQ
1915/* Implementation of aarch64_insn_visitor method "cb". */
1916
1917static void
1918aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1919 const unsigned rn, int is64,
1920 struct aarch64_insn_data *data)
1921{
1922 struct aarch64_insn_relocation_data *insn_reloc
1923 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1924 int64_t new_offset
0badd99f
YQ
1925 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1926
1927 if (can_encode_int32 (new_offset, 21))
1928 {
1929 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1930 aarch64_register (rn, is64), new_offset);
bb903df0 1931 }
0badd99f 1932 else if (can_encode_int32 (new_offset, 28))
bb903df0 1933 {
0badd99f
YQ
1934 /* The offset is out of range for a compare and branch
1935 instruction but not for a unconditional branch. We can use
1936 the following instructions instead:
1937
1938 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1939 B NOT_TAKEN ; Else jump over TAKEN and continue.
1940 TAKEN:
1941 B #(offset - 8)
1942 NOT_TAKEN:
1943
1944 */
1945 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1946 aarch64_register (rn, is64), 8);
1947 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1948 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1949 }
1950}
bb903df0 1951
0badd99f 1952/* Implementation of aarch64_insn_visitor method "tb". */
bb903df0 1953
0badd99f
YQ
1954static void
1955aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1956 const unsigned rt, unsigned bit,
1957 struct aarch64_insn_data *data)
1958{
1959 struct aarch64_insn_relocation_data *insn_reloc
1960 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1961 int64_t new_offset
0badd99f
YQ
1962 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1963
1964 if (can_encode_int32 (new_offset, 16))
1965 {
1966 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1967 aarch64_register (rt, 1), new_offset);
bb903df0 1968 }
0badd99f 1969 else if (can_encode_int32 (new_offset, 28))
bb903df0 1970 {
0badd99f
YQ
1971 /* The offset is out of range for a test bit and branch
1972 instruction but not for a unconditional branch. We can use
1973 the following instructions instead:
1974
1975 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1976 B NOT_TAKEN ; Else jump over TAKEN and continue.
1977 TAKEN:
1978 B #(offset - 8)
1979 NOT_TAKEN:
1980
1981 */
1982 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1983 aarch64_register (rt, 1), 8);
1984 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1985 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1986 new_offset - 8);
1987 }
1988}
bb903df0 1989
0badd99f 1990/* Implementation of aarch64_insn_visitor method "adr". */
bb903df0 1991
0badd99f
YQ
1992static void
1993aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1994 const int is_adrp,
1995 struct aarch64_insn_data *data)
1996{
1997 struct aarch64_insn_relocation_data *insn_reloc
1998 = (struct aarch64_insn_relocation_data *) data;
1999 /* We know exactly the address the ADR{P,} instruction will compute.
2000 We can just write it to the destination register. */
2001 CORE_ADDR address = data->insn_addr + offset;
bb903df0 2002
0badd99f
YQ
2003 if (is_adrp)
2004 {
2005 /* Clear the lower 12 bits of the offset to get the 4K page. */
2006 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
2007 aarch64_register (rd, 1),
2008 address & ~0xfff);
2009 }
2010 else
2011 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
2012 aarch64_register (rd, 1), address);
2013}
bb903df0 2014
0badd99f 2015/* Implementation of aarch64_insn_visitor method "ldr_literal". */
bb903df0 2016
0badd99f
YQ
2017static void
2018aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
2019 const unsigned rt, const int is64,
2020 struct aarch64_insn_data *data)
2021{
2022 struct aarch64_insn_relocation_data *insn_reloc
2023 = (struct aarch64_insn_relocation_data *) data;
2024 CORE_ADDR address = data->insn_addr + offset;
2025
2026 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
2027 aarch64_register (rt, 1), address);
2028
2029 /* We know exactly what address to load from, and what register we
2030 can use:
2031
2032 MOV xd, #(oldloc + offset)
2033 MOVK xd, #((oldloc + offset) >> 16), lsl #16
2034 ...
2035
2036 LDR xd, [xd] ; or LDRSW xd, [xd]
2037
2038 */
2039
2040 if (is_sw)
2041 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
2042 aarch64_register (rt, 1),
2043 aarch64_register (rt, 1),
2044 offset_memory_operand (0));
bb903df0 2045 else
0badd99f
YQ
2046 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
2047 aarch64_register (rt, is64),
2048 aarch64_register (rt, 1),
2049 offset_memory_operand (0));
2050}
2051
2052/* Implementation of aarch64_insn_visitor method "others". */
2053
2054static void
2055aarch64_ftrace_insn_reloc_others (const uint32_t insn,
2056 struct aarch64_insn_data *data)
2057{
2058 struct aarch64_insn_relocation_data *insn_reloc
2059 = (struct aarch64_insn_relocation_data *) data;
bb903df0 2060
0badd99f
YQ
2061 /* The instruction is not PC relative. Just re-emit it at the new
2062 location. */
e1c587c3 2063 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
0badd99f
YQ
2064}
2065
2066static const struct aarch64_insn_visitor visitor =
2067{
2068 aarch64_ftrace_insn_reloc_b,
2069 aarch64_ftrace_insn_reloc_b_cond,
2070 aarch64_ftrace_insn_reloc_cb,
2071 aarch64_ftrace_insn_reloc_tb,
2072 aarch64_ftrace_insn_reloc_adr,
2073 aarch64_ftrace_insn_reloc_ldr_literal,
2074 aarch64_ftrace_insn_reloc_others,
2075};
2076
809a0c35
TBA
2077bool
2078aarch64_target::supports_fast_tracepoints ()
2079{
2080 return true;
2081}
2082
2083/* Implementation of target ops method
bb903df0
PL
2084 "install_fast_tracepoint_jump_pad". */
2085
809a0c35
TBA
2086int
2087aarch64_target::install_fast_tracepoint_jump_pad
2088 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
2089 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
2090 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
2091 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
2092 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
2093 char *err)
bb903df0
PL
2094{
2095 uint32_t buf[256];
2096 uint32_t *p = buf;
2ac09a5b 2097 int64_t offset;
bb903df0 2098 int i;
70b439f0 2099 uint32_t insn;
bb903df0 2100 CORE_ADDR buildaddr = *jump_entry;
0badd99f 2101 struct aarch64_insn_relocation_data insn_data;
bb903df0
PL
2102
2103 /* We need to save the current state on the stack both to restore it
2104 later and to collect register values when the tracepoint is hit.
2105
2106 The saved registers are pushed in a layout that needs to be in sync
2107 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
2108 the supply_fast_tracepoint_registers function will fill in the
2109 register cache from a pointer to saved registers on the stack we build
2110 here.
2111
2112 For simplicity, we set the size of each cell on the stack to 16 bytes.
2113 This way one cell can hold any register type, from system registers
2114 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
2115 has to be 16 bytes aligned anyway.
2116
2117 Note that the CPSR register does not exist on AArch64. Instead we
2118 can access system bits describing the process state with the
2119 MRS/MSR instructions, namely the condition flags. We save them as
2120 if they are part of a CPSR register because that's how GDB
2121 interprets these system bits. At the moment, only the condition
2122 flags are saved in CPSR (NZCV).
2123
2124 Stack layout, each cell is 16 bytes (descending):
2125
2126 High *-------- SIMD&FP registers from 31 down to 0. --------*
2127 | q31 |
2128 . .
2129 . . 32 cells
2130 . .
2131 | q0 |
2132 *---- General purpose registers from 30 down to 0. ----*
2133 | x30 |
2134 . .
2135 . . 31 cells
2136 . .
2137 | x0 |
2138 *------------- Special purpose registers. -------------*
2139 | SP |
2140 | PC |
2141 | CPSR (NZCV) | 5 cells
2142 | FPSR |
2143 | FPCR | <- SP + 16
2144 *------------- collecting_t object --------------------*
2145 | TPIDR_EL0 | struct tracepoint * |
2146 Low *------------------------------------------------------*
2147
2148 After this stack is set up, we issue a call to the collector, passing
2149 it the saved registers at (SP + 16). */
2150
2151 /* Push SIMD&FP registers on the stack:
2152
2153 SUB sp, sp, #(32 * 16)
2154
2155 STP q30, q31, [sp, #(30 * 16)]
2156 ...
2157 STP q0, q1, [sp]
2158
2159 */
2160 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
2161 for (i = 30; i >= 0; i -= 2)
2162 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
2163
30baf67b 2164 /* Push general purpose registers on the stack. Note that we do not need
bb903df0
PL
2165 to push x31 as it represents the xzr register and not the stack
2166 pointer in a STR instruction.
2167
2168 SUB sp, sp, #(31 * 16)
2169
2170 STR x30, [sp, #(30 * 16)]
2171 ...
2172 STR x0, [sp]
2173
2174 */
2175 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
2176 for (i = 30; i >= 0; i -= 1)
2177 p += emit_str (p, aarch64_register (i, 1), sp,
2178 offset_memory_operand (i * 16));
2179
2180 /* Make space for 5 more cells.
2181
2182 SUB sp, sp, #(5 * 16)
2183
2184 */
2185 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
2186
2187
2188 /* Save SP:
2189
2190 ADD x4, sp, #((32 + 31 + 5) * 16)
2191 STR x4, [sp, #(4 * 16)]
2192
2193 */
2194 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
2195 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
2196
2197 /* Save PC (tracepoint address):
2198
2199 MOV x3, #(tpaddr)
2200 ...
2201
2202 STR x3, [sp, #(3 * 16)]
2203
2204 */
2205
2206 p += emit_mov_addr (p, x3, tpaddr);
2207 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
2208
2209 /* Save CPSR (NZCV), FPSR and FPCR:
2210
2211 MRS x2, nzcv
2212 MRS x1, fpsr
2213 MRS x0, fpcr
2214
2215 STR x2, [sp, #(2 * 16)]
2216 STR x1, [sp, #(1 * 16)]
2217 STR x0, [sp, #(0 * 16)]
2218
2219 */
2220 p += emit_mrs (p, x2, NZCV);
2221 p += emit_mrs (p, x1, FPSR);
2222 p += emit_mrs (p, x0, FPCR);
2223 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2224 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2225 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2226
2227 /* Push the collecting_t object. It consist of the address of the
2228 tracepoint and an ID for the current thread. We get the latter by
2229 reading the tpidr_el0 system register. It corresponds to the
2230 NT_ARM_TLS register accessible with ptrace.
2231
2232 MOV x0, #(tpoint)
2233 ...
2234
2235 MRS x1, tpidr_el0
2236
2237 STP x0, x1, [sp, #-16]!
2238
2239 */
2240
2241 p += emit_mov_addr (p, x0, tpoint);
2242 p += emit_mrs (p, x1, TPIDR_EL0);
2243 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2244
2245 /* Spin-lock:
2246
2247 The shared memory for the lock is at lockaddr. It will hold zero
2248 if no-one is holding the lock, otherwise it contains the address of
2249 the collecting_t object on the stack of the thread which acquired it.
2250
2251 At this stage, the stack pointer points to this thread's collecting_t
2252 object.
2253
2254 We use the following registers:
2255 - x0: Address of the lock.
2256 - x1: Pointer to collecting_t object.
2257 - x2: Scratch register.
2258
2259 MOV x0, #(lockaddr)
2260 ...
2261 MOV x1, sp
2262
2263 ; Trigger an event local to this core. So the following WFE
2264 ; instruction is ignored.
2265 SEVL
2266 again:
2267 ; Wait for an event. The event is triggered by either the SEVL
2268 ; or STLR instructions (store release).
2269 WFE
2270
2271 ; Atomically read at lockaddr. This marks the memory location as
2272 ; exclusive. This instruction also has memory constraints which
2273 ; make sure all previous data reads and writes are done before
2274 ; executing it.
2275 LDAXR x2, [x0]
2276
2277 ; Try again if another thread holds the lock.
2278 CBNZ x2, again
2279
2280 ; We can lock it! Write the address of the collecting_t object.
2281 ; This instruction will fail if the memory location is not marked
2282 ; as exclusive anymore. If it succeeds, it will remove the
2283 ; exclusive mark on the memory location. This way, if another
2284 ; thread executes this instruction before us, we will fail and try
2285 ; all over again.
2286 STXR w2, x1, [x0]
2287 CBNZ w2, again
2288
2289 */
2290
2291 p += emit_mov_addr (p, x0, lockaddr);
2292 p += emit_mov (p, x1, register_operand (sp));
2293
2294 p += emit_sevl (p);
2295 p += emit_wfe (p);
2296 p += emit_ldaxr (p, x2, x0);
2297 p += emit_cb (p, 1, w2, -2 * 4);
2298 p += emit_stxr (p, w2, x1, x0);
2299 p += emit_cb (p, 1, x2, -4 * 4);
2300
2301 /* Call collector (struct tracepoint *, unsigned char *):
2302
2303 MOV x0, #(tpoint)
2304 ...
2305
2306 ; Saved registers start after the collecting_t object.
2307 ADD x1, sp, #16
2308
2309 ; We use an intra-procedure-call scratch register.
2310 MOV ip0, #(collector)
2311 ...
2312
2313 ; And call back to C!
2314 BLR ip0
2315
2316 */
2317
2318 p += emit_mov_addr (p, x0, tpoint);
2319 p += emit_add (p, x1, sp, immediate_operand (16));
2320
2321 p += emit_mov_addr (p, ip0, collector);
2322 p += emit_blr (p, ip0);
2323
2324 /* Release the lock.
2325
2326 MOV x0, #(lockaddr)
2327 ...
2328
2329 ; This instruction is a normal store with memory ordering
2330 ; constraints. Thanks to this we do not have to put a data
2331 ; barrier instruction to make sure all data read and writes are done
30baf67b 2332 ; before this instruction is executed. Furthermore, this instruction
bb903df0
PL
2333 ; will trigger an event, letting other threads know they can grab
2334 ; the lock.
2335 STLR xzr, [x0]
2336
2337 */
2338 p += emit_mov_addr (p, x0, lockaddr);
2339 p += emit_stlr (p, xzr, x0);
2340
2341 /* Free collecting_t object:
2342
2343 ADD sp, sp, #16
2344
2345 */
2346 p += emit_add (p, sp, sp, immediate_operand (16));
2347
2348 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2349 registers from the stack.
2350
2351 LDR x2, [sp, #(2 * 16)]
2352 LDR x1, [sp, #(1 * 16)]
2353 LDR x0, [sp, #(0 * 16)]
2354
2355 MSR NZCV, x2
2356 MSR FPSR, x1
2357 MSR FPCR, x0
2358
2359 ADD sp, sp #(5 * 16)
2360
2361 */
2362 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2363 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2364 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2365 p += emit_msr (p, NZCV, x2);
2366 p += emit_msr (p, FPSR, x1);
2367 p += emit_msr (p, FPCR, x0);
2368
2369 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2370
2371 /* Pop general purpose registers:
2372
2373 LDR x0, [sp]
2374 ...
2375 LDR x30, [sp, #(30 * 16)]
2376
2377 ADD sp, sp, #(31 * 16)
2378
2379 */
2380 for (i = 0; i <= 30; i += 1)
2381 p += emit_ldr (p, aarch64_register (i, 1), sp,
2382 offset_memory_operand (i * 16));
2383 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2384
2385 /* Pop SIMD&FP registers:
2386
2387 LDP q0, q1, [sp]
2388 ...
2389 LDP q30, q31, [sp, #(30 * 16)]
2390
2391 ADD sp, sp, #(32 * 16)
2392
2393 */
2394 for (i = 0; i <= 30; i += 2)
2395 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2396 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2397
2398 /* Write the code into the inferior memory. */
2399 append_insns (&buildaddr, p - buf, buf);
2400
2401 /* Now emit the relocated instruction. */
2402 *adjusted_insn_addr = buildaddr;
70b439f0 2403 target_read_uint32 (tpaddr, &insn);
0badd99f
YQ
2404
2405 insn_data.base.insn_addr = tpaddr;
2406 insn_data.new_addr = buildaddr;
2407 insn_data.insn_ptr = buf;
2408
2409 aarch64_relocate_instruction (insn, &visitor,
2410 (struct aarch64_insn_data *) &insn_data);
2411
bb903df0 2412 /* We may not have been able to relocate the instruction. */
0badd99f 2413 if (insn_data.insn_ptr == buf)
bb903df0
PL
2414 {
2415 sprintf (err,
2416 "E.Could not relocate instruction from %s to %s.",
2417 core_addr_to_string_nz (tpaddr),
2418 core_addr_to_string_nz (buildaddr));
2419 return 1;
2420 }
dfaffe9d 2421 else
0badd99f 2422 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
dfaffe9d 2423 *adjusted_insn_addr_end = buildaddr;
bb903df0
PL
2424
2425 /* Go back to the start of the buffer. */
2426 p = buf;
2427
2428 /* Emit a branch back from the jump pad. */
2429 offset = (tpaddr + orig_size - buildaddr);
2430 if (!can_encode_int32 (offset, 28))
2431 {
2432 sprintf (err,
2433 "E.Jump back from jump pad too far from tracepoint "
2ac09a5b 2434 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2435 offset);
2436 return 1;
2437 }
2438
2439 p += emit_b (p, 0, offset);
2440 append_insns (&buildaddr, p - buf, buf);
2441
2442 /* Give the caller a branch instruction into the jump pad. */
2443 offset = (*jump_entry - tpaddr);
2444 if (!can_encode_int32 (offset, 28))
2445 {
2446 sprintf (err,
2447 "E.Jump pad too far from tracepoint "
2ac09a5b 2448 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2449 offset);
2450 return 1;
2451 }
2452
2453 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2454 *jjump_pad_insn_size = 4;
2455
2456 /* Return the end address of our pad. */
2457 *jump_entry = buildaddr;
2458
2459 return 0;
2460}
2461
afbe19f8
PL
2462/* Helper function writing LEN instructions from START into
2463 current_insn_ptr. */
2464
2465static void
2466emit_ops_insns (const uint32_t *start, int len)
2467{
2468 CORE_ADDR buildaddr = current_insn_ptr;
2469
2470 if (debug_threads)
2471 debug_printf ("Adding %d instrucions at %s\n",
2472 len, paddress (buildaddr));
2473
2474 append_insns (&buildaddr, len, start);
2475 current_insn_ptr = buildaddr;
2476}
2477
2478/* Pop a register from the stack. */
2479
2480static int
2481emit_pop (uint32_t *buf, struct aarch64_register rt)
2482{
2483 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2484}
2485
2486/* Push a register on the stack. */
2487
2488static int
2489emit_push (uint32_t *buf, struct aarch64_register rt)
2490{
2491 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2492}
2493
2494/* Implementation of emit_ops method "emit_prologue". */
2495
2496static void
2497aarch64_emit_prologue (void)
2498{
2499 uint32_t buf[16];
2500 uint32_t *p = buf;
2501
2502 /* This function emit a prologue for the following function prototype:
2503
2504 enum eval_result_type f (unsigned char *regs,
2505 ULONGEST *value);
2506
2507 The first argument is a buffer of raw registers. The second
2508 argument is the result of
2509 evaluating the expression, which will be set to whatever is on top of
2510 the stack at the end.
2511
2512 The stack set up by the prologue is as such:
2513
2514 High *------------------------------------------------------*
2515 | LR |
2516 | FP | <- FP
2517 | x1 (ULONGEST *value) |
2518 | x0 (unsigned char *regs) |
2519 Low *------------------------------------------------------*
2520
2521 As we are implementing a stack machine, each opcode can expand the
2522 stack so we never know how far we are from the data saved by this
2523 prologue. In order to be able refer to value and regs later, we save
2524 the current stack pointer in the frame pointer. This way, it is not
2525 clobbered when calling C functions.
2526
30baf67b 2527 Finally, throughout every operation, we are using register x0 as the
afbe19f8
PL
2528 top of the stack, and x1 as a scratch register. */
2529
2530 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2531 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2532 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2533
2534 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2535
2536
2537 emit_ops_insns (buf, p - buf);
2538}
2539
2540/* Implementation of emit_ops method "emit_epilogue". */
2541
2542static void
2543aarch64_emit_epilogue (void)
2544{
2545 uint32_t buf[16];
2546 uint32_t *p = buf;
2547
2548 /* Store the result of the expression (x0) in *value. */
2549 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2550 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2551 p += emit_str (p, x0, x1, offset_memory_operand (0));
2552
2553 /* Restore the previous state. */
2554 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2555 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2556
2557 /* Return expr_eval_no_error. */
2558 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2559 p += emit_ret (p, lr);
2560
2561 emit_ops_insns (buf, p - buf);
2562}
2563
2564/* Implementation of emit_ops method "emit_add". */
2565
2566static void
2567aarch64_emit_add (void)
2568{
2569 uint32_t buf[16];
2570 uint32_t *p = buf;
2571
2572 p += emit_pop (p, x1);
45e3745e 2573 p += emit_add (p, x0, x1, register_operand (x0));
afbe19f8
PL
2574
2575 emit_ops_insns (buf, p - buf);
2576}
2577
2578/* Implementation of emit_ops method "emit_sub". */
2579
2580static void
2581aarch64_emit_sub (void)
2582{
2583 uint32_t buf[16];
2584 uint32_t *p = buf;
2585
2586 p += emit_pop (p, x1);
45e3745e 2587 p += emit_sub (p, x0, x1, register_operand (x0));
afbe19f8
PL
2588
2589 emit_ops_insns (buf, p - buf);
2590}
2591
2592/* Implementation of emit_ops method "emit_mul". */
2593
2594static void
2595aarch64_emit_mul (void)
2596{
2597 uint32_t buf[16];
2598 uint32_t *p = buf;
2599
2600 p += emit_pop (p, x1);
2601 p += emit_mul (p, x0, x1, x0);
2602
2603 emit_ops_insns (buf, p - buf);
2604}
2605
2606/* Implementation of emit_ops method "emit_lsh". */
2607
2608static void
2609aarch64_emit_lsh (void)
2610{
2611 uint32_t buf[16];
2612 uint32_t *p = buf;
2613
2614 p += emit_pop (p, x1);
2615 p += emit_lslv (p, x0, x1, x0);
2616
2617 emit_ops_insns (buf, p - buf);
2618}
2619
2620/* Implementation of emit_ops method "emit_rsh_signed". */
2621
2622static void
2623aarch64_emit_rsh_signed (void)
2624{
2625 uint32_t buf[16];
2626 uint32_t *p = buf;
2627
2628 p += emit_pop (p, x1);
2629 p += emit_asrv (p, x0, x1, x0);
2630
2631 emit_ops_insns (buf, p - buf);
2632}
2633
2634/* Implementation of emit_ops method "emit_rsh_unsigned". */
2635
2636static void
2637aarch64_emit_rsh_unsigned (void)
2638{
2639 uint32_t buf[16];
2640 uint32_t *p = buf;
2641
2642 p += emit_pop (p, x1);
2643 p += emit_lsrv (p, x0, x1, x0);
2644
2645 emit_ops_insns (buf, p - buf);
2646}
2647
2648/* Implementation of emit_ops method "emit_ext". */
2649
2650static void
2651aarch64_emit_ext (int arg)
2652{
2653 uint32_t buf[16];
2654 uint32_t *p = buf;
2655
2656 p += emit_sbfx (p, x0, x0, 0, arg);
2657
2658 emit_ops_insns (buf, p - buf);
2659}
2660
2661/* Implementation of emit_ops method "emit_log_not". */
2662
2663static void
2664aarch64_emit_log_not (void)
2665{
2666 uint32_t buf[16];
2667 uint32_t *p = buf;
2668
2669 /* If the top of the stack is 0, replace it with 1. Else replace it with
2670 0. */
2671
2672 p += emit_cmp (p, x0, immediate_operand (0));
2673 p += emit_cset (p, x0, EQ);
2674
2675 emit_ops_insns (buf, p - buf);
2676}
2677
2678/* Implementation of emit_ops method "emit_bit_and". */
2679
2680static void
2681aarch64_emit_bit_and (void)
2682{
2683 uint32_t buf[16];
2684 uint32_t *p = buf;
2685
2686 p += emit_pop (p, x1);
2687 p += emit_and (p, x0, x0, x1);
2688
2689 emit_ops_insns (buf, p - buf);
2690}
2691
2692/* Implementation of emit_ops method "emit_bit_or". */
2693
2694static void
2695aarch64_emit_bit_or (void)
2696{
2697 uint32_t buf[16];
2698 uint32_t *p = buf;
2699
2700 p += emit_pop (p, x1);
2701 p += emit_orr (p, x0, x0, x1);
2702
2703 emit_ops_insns (buf, p - buf);
2704}
2705
2706/* Implementation of emit_ops method "emit_bit_xor". */
2707
2708static void
2709aarch64_emit_bit_xor (void)
2710{
2711 uint32_t buf[16];
2712 uint32_t *p = buf;
2713
2714 p += emit_pop (p, x1);
2715 p += emit_eor (p, x0, x0, x1);
2716
2717 emit_ops_insns (buf, p - buf);
2718}
2719
2720/* Implementation of emit_ops method "emit_bit_not". */
2721
2722static void
2723aarch64_emit_bit_not (void)
2724{
2725 uint32_t buf[16];
2726 uint32_t *p = buf;
2727
2728 p += emit_mvn (p, x0, x0);
2729
2730 emit_ops_insns (buf, p - buf);
2731}
2732
2733/* Implementation of emit_ops method "emit_equal". */
2734
2735static void
2736aarch64_emit_equal (void)
2737{
2738 uint32_t buf[16];
2739 uint32_t *p = buf;
2740
2741 p += emit_pop (p, x1);
2742 p += emit_cmp (p, x0, register_operand (x1));
2743 p += emit_cset (p, x0, EQ);
2744
2745 emit_ops_insns (buf, p - buf);
2746}
2747
2748/* Implementation of emit_ops method "emit_less_signed". */
2749
2750static void
2751aarch64_emit_less_signed (void)
2752{
2753 uint32_t buf[16];
2754 uint32_t *p = buf;
2755
2756 p += emit_pop (p, x1);
2757 p += emit_cmp (p, x1, register_operand (x0));
2758 p += emit_cset (p, x0, LT);
2759
2760 emit_ops_insns (buf, p - buf);
2761}
2762
2763/* Implementation of emit_ops method "emit_less_unsigned". */
2764
2765static void
2766aarch64_emit_less_unsigned (void)
2767{
2768 uint32_t buf[16];
2769 uint32_t *p = buf;
2770
2771 p += emit_pop (p, x1);
2772 p += emit_cmp (p, x1, register_operand (x0));
2773 p += emit_cset (p, x0, LO);
2774
2775 emit_ops_insns (buf, p - buf);
2776}
2777
2778/* Implementation of emit_ops method "emit_ref". */
2779
2780static void
2781aarch64_emit_ref (int size)
2782{
2783 uint32_t buf[16];
2784 uint32_t *p = buf;
2785
2786 switch (size)
2787 {
2788 case 1:
2789 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2790 break;
2791 case 2:
2792 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2793 break;
2794 case 4:
2795 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2796 break;
2797 case 8:
2798 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2799 break;
2800 default:
2801 /* Unknown size, bail on compilation. */
2802 emit_error = 1;
2803 break;
2804 }
2805
2806 emit_ops_insns (buf, p - buf);
2807}
2808
2809/* Implementation of emit_ops method "emit_if_goto". */
2810
2811static void
2812aarch64_emit_if_goto (int *offset_p, int *size_p)
2813{
2814 uint32_t buf[16];
2815 uint32_t *p = buf;
2816
2817 /* The Z flag is set or cleared here. */
2818 p += emit_cmp (p, x0, immediate_operand (0));
2819 /* This instruction must not change the Z flag. */
2820 p += emit_pop (p, x0);
2821 /* Branch over the next instruction if x0 == 0. */
2822 p += emit_bcond (p, EQ, 8);
2823
2824 /* The NOP instruction will be patched with an unconditional branch. */
2825 if (offset_p)
2826 *offset_p = (p - buf) * 4;
2827 if (size_p)
2828 *size_p = 4;
2829 p += emit_nop (p);
2830
2831 emit_ops_insns (buf, p - buf);
2832}
2833
2834/* Implementation of emit_ops method "emit_goto". */
2835
2836static void
2837aarch64_emit_goto (int *offset_p, int *size_p)
2838{
2839 uint32_t buf[16];
2840 uint32_t *p = buf;
2841
2842 /* The NOP instruction will be patched with an unconditional branch. */
2843 if (offset_p)
2844 *offset_p = 0;
2845 if (size_p)
2846 *size_p = 4;
2847 p += emit_nop (p);
2848
2849 emit_ops_insns (buf, p - buf);
2850}
2851
2852/* Implementation of emit_ops method "write_goto_address". */
2853
bb1183e2 2854static void
afbe19f8
PL
2855aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2856{
2857 uint32_t insn;
2858
2859 emit_b (&insn, 0, to - from);
2860 append_insns (&from, 1, &insn);
2861}
2862
2863/* Implementation of emit_ops method "emit_const". */
2864
2865static void
2866aarch64_emit_const (LONGEST num)
2867{
2868 uint32_t buf[16];
2869 uint32_t *p = buf;
2870
2871 p += emit_mov_addr (p, x0, num);
2872
2873 emit_ops_insns (buf, p - buf);
2874}
2875
2876/* Implementation of emit_ops method "emit_call". */
2877
2878static void
2879aarch64_emit_call (CORE_ADDR fn)
2880{
2881 uint32_t buf[16];
2882 uint32_t *p = buf;
2883
2884 p += emit_mov_addr (p, ip0, fn);
2885 p += emit_blr (p, ip0);
2886
2887 emit_ops_insns (buf, p - buf);
2888}
2889
2890/* Implementation of emit_ops method "emit_reg". */
2891
2892static void
2893aarch64_emit_reg (int reg)
2894{
2895 uint32_t buf[16];
2896 uint32_t *p = buf;
2897
2898 /* Set x0 to unsigned char *regs. */
2899 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2900 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2901 p += emit_mov (p, x1, immediate_operand (reg));
2902
2903 emit_ops_insns (buf, p - buf);
2904
2905 aarch64_emit_call (get_raw_reg_func_addr ());
2906}
2907
2908/* Implementation of emit_ops method "emit_pop". */
2909
2910static void
2911aarch64_emit_pop (void)
2912{
2913 uint32_t buf[16];
2914 uint32_t *p = buf;
2915
2916 p += emit_pop (p, x0);
2917
2918 emit_ops_insns (buf, p - buf);
2919}
2920
2921/* Implementation of emit_ops method "emit_stack_flush". */
2922
2923static void
2924aarch64_emit_stack_flush (void)
2925{
2926 uint32_t buf[16];
2927 uint32_t *p = buf;
2928
2929 p += emit_push (p, x0);
2930
2931 emit_ops_insns (buf, p - buf);
2932}
2933
2934/* Implementation of emit_ops method "emit_zero_ext". */
2935
2936static void
2937aarch64_emit_zero_ext (int arg)
2938{
2939 uint32_t buf[16];
2940 uint32_t *p = buf;
2941
2942 p += emit_ubfx (p, x0, x0, 0, arg);
2943
2944 emit_ops_insns (buf, p - buf);
2945}
2946
2947/* Implementation of emit_ops method "emit_swap". */
2948
2949static void
2950aarch64_emit_swap (void)
2951{
2952 uint32_t buf[16];
2953 uint32_t *p = buf;
2954
2955 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2956 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2957 p += emit_mov (p, x0, register_operand (x1));
2958
2959 emit_ops_insns (buf, p - buf);
2960}
2961
2962/* Implementation of emit_ops method "emit_stack_adjust". */
2963
2964static void
2965aarch64_emit_stack_adjust (int n)
2966{
2967 /* This is not needed with our design. */
2968 uint32_t buf[16];
2969 uint32_t *p = buf;
2970
2971 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2972
2973 emit_ops_insns (buf, p - buf);
2974}
2975
2976/* Implementation of emit_ops method "emit_int_call_1". */
2977
2978static void
2979aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2980{
2981 uint32_t buf[16];
2982 uint32_t *p = buf;
2983
2984 p += emit_mov (p, x0, immediate_operand (arg1));
2985
2986 emit_ops_insns (buf, p - buf);
2987
2988 aarch64_emit_call (fn);
2989}
2990
2991/* Implementation of emit_ops method "emit_void_call_2". */
2992
2993static void
2994aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2995{
2996 uint32_t buf[16];
2997 uint32_t *p = buf;
2998
2999 /* Push x0 on the stack. */
3000 aarch64_emit_stack_flush ();
3001
3002 /* Setup arguments for the function call:
3003
3004 x0: arg1
3005 x1: top of the stack
3006
3007 MOV x1, x0
3008 MOV x0, #arg1 */
3009
3010 p += emit_mov (p, x1, register_operand (x0));
3011 p += emit_mov (p, x0, immediate_operand (arg1));
3012
3013 emit_ops_insns (buf, p - buf);
3014
3015 aarch64_emit_call (fn);
3016
3017 /* Restore x0. */
3018 aarch64_emit_pop ();
3019}
3020
3021/* Implementation of emit_ops method "emit_eq_goto". */
3022
3023static void
3024aarch64_emit_eq_goto (int *offset_p, int *size_p)
3025{
3026 uint32_t buf[16];
3027 uint32_t *p = buf;
3028
3029 p += emit_pop (p, x1);
3030 p += emit_cmp (p, x1, register_operand (x0));
3031 /* Branch over the next instruction if x0 != x1. */
3032 p += emit_bcond (p, NE, 8);
3033 /* The NOP instruction will be patched with an unconditional branch. */
3034 if (offset_p)
3035 *offset_p = (p - buf) * 4;
3036 if (size_p)
3037 *size_p = 4;
3038 p += emit_nop (p);
3039
3040 emit_ops_insns (buf, p - buf);
3041}
3042
3043/* Implementation of emit_ops method "emit_ne_goto". */
3044
3045static void
3046aarch64_emit_ne_goto (int *offset_p, int *size_p)
3047{
3048 uint32_t buf[16];
3049 uint32_t *p = buf;
3050
3051 p += emit_pop (p, x1);
3052 p += emit_cmp (p, x1, register_operand (x0));
3053 /* Branch over the next instruction if x0 == x1. */
3054 p += emit_bcond (p, EQ, 8);
3055 /* The NOP instruction will be patched with an unconditional branch. */
3056 if (offset_p)
3057 *offset_p = (p - buf) * 4;
3058 if (size_p)
3059 *size_p = 4;
3060 p += emit_nop (p);
3061
3062 emit_ops_insns (buf, p - buf);
3063}
3064
3065/* Implementation of emit_ops method "emit_lt_goto". */
3066
3067static void
3068aarch64_emit_lt_goto (int *offset_p, int *size_p)
3069{
3070 uint32_t buf[16];
3071 uint32_t *p = buf;
3072
3073 p += emit_pop (p, x1);
3074 p += emit_cmp (p, x1, register_operand (x0));
3075 /* Branch over the next instruction if x0 >= x1. */
3076 p += emit_bcond (p, GE, 8);
3077 /* The NOP instruction will be patched with an unconditional branch. */
3078 if (offset_p)
3079 *offset_p = (p - buf) * 4;
3080 if (size_p)
3081 *size_p = 4;
3082 p += emit_nop (p);
3083
3084 emit_ops_insns (buf, p - buf);
3085}
3086
3087/* Implementation of emit_ops method "emit_le_goto". */
3088
3089static void
3090aarch64_emit_le_goto (int *offset_p, int *size_p)
3091{
3092 uint32_t buf[16];
3093 uint32_t *p = buf;
3094
3095 p += emit_pop (p, x1);
3096 p += emit_cmp (p, x1, register_operand (x0));
3097 /* Branch over the next instruction if x0 > x1. */
3098 p += emit_bcond (p, GT, 8);
3099 /* The NOP instruction will be patched with an unconditional branch. */
3100 if (offset_p)
3101 *offset_p = (p - buf) * 4;
3102 if (size_p)
3103 *size_p = 4;
3104 p += emit_nop (p);
3105
3106 emit_ops_insns (buf, p - buf);
3107}
3108
3109/* Implementation of emit_ops method "emit_gt_goto". */
3110
3111static void
3112aarch64_emit_gt_goto (int *offset_p, int *size_p)
3113{
3114 uint32_t buf[16];
3115 uint32_t *p = buf;
3116
3117 p += emit_pop (p, x1);
3118 p += emit_cmp (p, x1, register_operand (x0));
3119 /* Branch over the next instruction if x0 <= x1. */
3120 p += emit_bcond (p, LE, 8);
3121 /* The NOP instruction will be patched with an unconditional branch. */
3122 if (offset_p)
3123 *offset_p = (p - buf) * 4;
3124 if (size_p)
3125 *size_p = 4;
3126 p += emit_nop (p);
3127
3128 emit_ops_insns (buf, p - buf);
3129}
3130
3131/* Implementation of emit_ops method "emit_ge_got". */
3132
3133static void
3134aarch64_emit_ge_got (int *offset_p, int *size_p)
3135{
3136 uint32_t buf[16];
3137 uint32_t *p = buf;
3138
3139 p += emit_pop (p, x1);
3140 p += emit_cmp (p, x1, register_operand (x0));
3141 /* Branch over the next instruction if x0 <= x1. */
3142 p += emit_bcond (p, LT, 8);
3143 /* The NOP instruction will be patched with an unconditional branch. */
3144 if (offset_p)
3145 *offset_p = (p - buf) * 4;
3146 if (size_p)
3147 *size_p = 4;
3148 p += emit_nop (p);
3149
3150 emit_ops_insns (buf, p - buf);
3151}
3152
3153static struct emit_ops aarch64_emit_ops_impl =
3154{
3155 aarch64_emit_prologue,
3156 aarch64_emit_epilogue,
3157 aarch64_emit_add,
3158 aarch64_emit_sub,
3159 aarch64_emit_mul,
3160 aarch64_emit_lsh,
3161 aarch64_emit_rsh_signed,
3162 aarch64_emit_rsh_unsigned,
3163 aarch64_emit_ext,
3164 aarch64_emit_log_not,
3165 aarch64_emit_bit_and,
3166 aarch64_emit_bit_or,
3167 aarch64_emit_bit_xor,
3168 aarch64_emit_bit_not,
3169 aarch64_emit_equal,
3170 aarch64_emit_less_signed,
3171 aarch64_emit_less_unsigned,
3172 aarch64_emit_ref,
3173 aarch64_emit_if_goto,
3174 aarch64_emit_goto,
3175 aarch64_write_goto_address,
3176 aarch64_emit_const,
3177 aarch64_emit_call,
3178 aarch64_emit_reg,
3179 aarch64_emit_pop,
3180 aarch64_emit_stack_flush,
3181 aarch64_emit_zero_ext,
3182 aarch64_emit_swap,
3183 aarch64_emit_stack_adjust,
3184 aarch64_emit_int_call_1,
3185 aarch64_emit_void_call_2,
3186 aarch64_emit_eq_goto,
3187 aarch64_emit_ne_goto,
3188 aarch64_emit_lt_goto,
3189 aarch64_emit_le_goto,
3190 aarch64_emit_gt_goto,
3191 aarch64_emit_ge_got,
3192};
3193
ab64c999 3194/* Implementation of target ops method "emit_ops". */
afbe19f8 3195
ab64c999
TBA
3196emit_ops *
3197aarch64_target::emit_ops ()
afbe19f8
PL
3198{
3199 return &aarch64_emit_ops_impl;
3200}
3201
809a0c35 3202/* Implementation of target ops method
bb903df0
PL
3203 "get_min_fast_tracepoint_insn_len". */
3204
809a0c35
TBA
3205int
3206aarch64_target::get_min_fast_tracepoint_insn_len ()
bb903df0
PL
3207{
3208 return 4;
3209}
3210
9cfd8715 3211/* Implementation of linux target ops method "low_supports_range_stepping". */
d1d0aea1 3212
9cfd8715
TBA
3213bool
3214aarch64_target::low_supports_range_stepping ()
d1d0aea1 3215{
9cfd8715 3216 return true;
d1d0aea1
PL
3217}
3218
3ca4edb6 3219/* Implementation of target ops method "sw_breakpoint_from_kind". */
dd373349 3220
3ca4edb6
TBA
3221const gdb_byte *
3222aarch64_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349 3223{
17b1509a
YQ
3224 if (is_64bit_tdesc ())
3225 {
3226 *size = aarch64_breakpoint_len;
3227 return aarch64_breakpoint;
3228 }
3229 else
3230 return arm_sw_breakpoint_from_kind (kind, size);
3231}
3232
06250e4e 3233/* Implementation of target ops method "breakpoint_kind_from_pc". */
17b1509a 3234
06250e4e
TBA
3235int
3236aarch64_target::breakpoint_kind_from_pc (CORE_ADDR *pcptr)
17b1509a
YQ
3237{
3238 if (is_64bit_tdesc ())
3239 return aarch64_breakpoint_len;
3240 else
3241 return arm_breakpoint_kind_from_pc (pcptr);
3242}
3243
06250e4e 3244/* Implementation of the target ops method
17b1509a
YQ
3245 "breakpoint_kind_from_current_state". */
3246
06250e4e
TBA
3247int
3248aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
17b1509a
YQ
3249{
3250 if (is_64bit_tdesc ())
3251 return aarch64_breakpoint_len;
3252 else
3253 return arm_breakpoint_kind_from_current_state (pcptr);
dd373349
AT
3254}
3255
41919a58
LM
3256/* Returns true if memory tagging is supported. */
3257bool
3258aarch64_target::supports_memory_tagging ()
3259{
3260 if (current_thread == NULL)
3261 {
3262 /* We don't have any processes running, so don't attempt to
3263 use linux_get_hwcap2 as it will try to fetch the current
3264 thread id. Instead, just fetch the auxv from the self
3265 PID. */
3266#ifdef HAVE_GETAUXVAL
3267 return (getauxval (AT_HWCAP2) & HWCAP2_MTE) != 0;
3268#else
3269 return true;
3270#endif
3271 }
3272
3273 return (linux_get_hwcap2 (8) & HWCAP2_MTE) != 0;
3274}
3275
3276bool
3277aarch64_target::fetch_memtags (CORE_ADDR address, size_t len,
3278 gdb::byte_vector &tags, int type)
3279{
3280 /* Allocation tags are per-process, so any tid is fine. */
3281 int tid = lwpid_of (current_thread);
3282
3283 /* Allocation tag? */
3284 if (type == static_cast <int> (aarch64_memtag_type::mte_allocation))
3285 return aarch64_mte_fetch_memtags (tid, address, len, tags);
3286
3287 return false;
3288}
3289
3290bool
3291aarch64_target::store_memtags (CORE_ADDR address, size_t len,
3292 const gdb::byte_vector &tags, int type)
3293{
3294 /* Allocation tags are per-process, so any tid is fine. */
3295 int tid = lwpid_of (current_thread);
3296
3297 /* Allocation tag? */
3298 if (type == static_cast <int> (aarch64_memtag_type::mte_allocation))
3299 return aarch64_mte_store_memtags (tid, address, len, tags);
3300
3301 return false;
3302}
3303
ef0478f6
TBA
3304/* The linux target ops object. */
3305
3306linux_process_target *the_linux_target = &the_aarch64_target;
3307
3aee8918
PA
3308void
3309initialize_low_arch (void)
3310{
3b53ae99
YQ
3311 initialize_low_arch_aarch32 ();
3312
3aee8918
PA
3313 initialize_regsets_info (&aarch64_regsets_info);
3314}
This page took 1.190328 seconds and 4 git commands to generate.