gdb: add target_ops::supports_displaced_step
[deliverable/binutils-gdb.git] / gdbserver / linux-aarch64-low.cc
CommitLineData
176eb98c
MS
1/* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
b811d2c2 4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
176eb98c
MS
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "server.h"
23#include "linux-low.h"
db3cb7cb 24#include "nat/aarch64-linux.h"
554717a3 25#include "nat/aarch64-linux-hw-point.h"
bb903df0 26#include "arch/aarch64-insn.h"
3b53ae99 27#include "linux-aarch32-low.h"
176eb98c 28#include "elf/common.h"
afbe19f8
PL
29#include "ax.h"
30#include "tracepoint.h"
f9d949fb 31#include "debug.h"
176eb98c
MS
32
33#include <signal.h>
34#include <sys/user.h>
5826e159 35#include "nat/gdb_ptrace.h"
e9dae05e 36#include <asm/ptrace.h>
bb903df0
PL
37#include <inttypes.h>
38#include <endian.h>
39#include <sys/uio.h>
176eb98c
MS
40
41#include "gdb_proc_service.h"
cc628f3d 42#include "arch/aarch64.h"
7cc17433 43#include "linux-aarch32-tdesc.h"
d6d7ce56 44#include "linux-aarch64-tdesc.h"
fefa175e 45#include "nat/aarch64-sve-linux-ptrace.h"
02895270 46#include "tdesc.h"
176eb98c 47
176eb98c
MS
48#ifdef HAVE_SYS_REG_H
49#include <sys/reg.h>
50#endif
51
ef0478f6
TBA
52/* Linux target op definitions for the AArch64 architecture. */
53
54class aarch64_target : public linux_process_target
55{
56public:
57
aa8d21c9
TBA
58 const regs_info *get_regs_info () override;
59
06250e4e
TBA
60 int breakpoint_kind_from_pc (CORE_ADDR *pcptr) override;
61
62 int breakpoint_kind_from_current_state (CORE_ADDR *pcptr) override;
63
3ca4edb6
TBA
64 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
65
007c9b97
TBA
66 bool supports_z_point_type (char z_type) override;
67
47f70aa7
TBA
68 bool supports_tracepoints () override;
69
809a0c35
TBA
70 bool supports_fast_tracepoints () override;
71
72 int install_fast_tracepoint_jump_pad
73 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
74 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
75 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
76 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
77 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
78 char *err) override;
79
80 int get_min_fast_tracepoint_insn_len () override;
81
ab64c999
TBA
82 struct emit_ops *emit_ops () override;
83
797bcff5
TBA
84protected:
85
86 void low_arch_setup () override;
daca57a7
TBA
87
88 bool low_cannot_fetch_register (int regno) override;
89
90 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
91
92 bool low_supports_breakpoints () override;
93
94 CORE_ADDR low_get_pc (regcache *regcache) override;
95
96 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
d7146cda
TBA
97
98 bool low_breakpoint_at (CORE_ADDR pc) override;
9db9aa23
TBA
99
100 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
101 int size, raw_breakpoint *bp) override;
102
103 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
104 int size, raw_breakpoint *bp) override;
ac1bbaca
TBA
105
106 bool low_stopped_by_watchpoint () override;
107
108 CORE_ADDR low_stopped_data_address () override;
cb63de7c
TBA
109
110 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
111 int direction) override;
fd000fb3
TBA
112
113 arch_process_info *low_new_process () override;
114
115 void low_delete_process (arch_process_info *info) override;
116
117 void low_new_thread (lwp_info *) override;
118
119 void low_delete_thread (arch_lwp_info *) override;
120
121 void low_new_fork (process_info *parent, process_info *child) override;
d7599cc0
TBA
122
123 void low_prepare_to_resume (lwp_info *lwp) override;
13e567af
TBA
124
125 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
9cfd8715
TBA
126
127 bool low_supports_range_stepping () override;
9eedd27d
TBA
128
129 bool low_supports_catch_syscall () override;
130
131 void low_get_syscall_trapinfo (regcache *regcache, int *sysno) override;
ef0478f6
TBA
132};
133
134/* The singleton target ops object. */
135
136static aarch64_target the_aarch64_target;
137
daca57a7
TBA
138bool
139aarch64_target::low_cannot_fetch_register (int regno)
140{
141 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
142 "is not implemented by the target");
143}
144
145bool
146aarch64_target::low_cannot_store_register (int regno)
147{
148 gdb_assert_not_reached ("linux target op low_cannot_store_register "
149 "is not implemented by the target");
150}
151
d7599cc0
TBA
152void
153aarch64_target::low_prepare_to_resume (lwp_info *lwp)
154{
155 aarch64_linux_prepare_to_resume (lwp);
156}
157
176eb98c
MS
158/* Per-process arch-specific data we want to keep. */
159
160struct arch_process_info
161{
162 /* Hardware breakpoint/watchpoint data.
163 The reason for them to be per-process rather than per-thread is
164 due to the lack of information in the gdbserver environment;
165 gdbserver is not told that whether a requested hardware
166 breakpoint/watchpoint is thread specific or not, so it has to set
167 each hw bp/wp for every thread in the current process. The
168 higher level bp/wp management in gdb will resume a thread if a hw
169 bp/wp trap is not expected for it. Since the hw bp/wp setting is
170 same for each thread, it is reasonable for the data to live here.
171 */
172 struct aarch64_debug_reg_state debug_reg_state;
173};
174
3b53ae99
YQ
175/* Return true if the size of register 0 is 8 byte. */
176
177static int
178is_64bit_tdesc (void)
179{
180 struct regcache *regcache = get_thread_regcache (current_thread, 0);
181
182 return register_size (regcache->tdesc, 0) == 8;
183}
184
02895270
AH
185/* Return true if the regcache contains the number of SVE registers. */
186
187static bool
188is_sve_tdesc (void)
189{
190 struct regcache *regcache = get_thread_regcache (current_thread, 0);
191
6cdd651f 192 return tdesc_contains_feature (regcache->tdesc, "org.gnu.gdb.aarch64.sve");
02895270
AH
193}
194
176eb98c
MS
195static void
196aarch64_fill_gregset (struct regcache *regcache, void *buf)
197{
6a69a054 198 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
176eb98c
MS
199 int i;
200
201 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
202 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
203 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
204 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
205 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
206}
207
208static void
209aarch64_store_gregset (struct regcache *regcache, const void *buf)
210{
6a69a054 211 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
176eb98c
MS
212 int i;
213
214 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
215 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
216 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
217 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
218 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
219}
220
221static void
222aarch64_fill_fpregset (struct regcache *regcache, void *buf)
223{
9caa3311 224 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
176eb98c
MS
225 int i;
226
227 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
228 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
229 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
230 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
231}
232
233static void
234aarch64_store_fpregset (struct regcache *regcache, const void *buf)
235{
9caa3311
YQ
236 const struct user_fpsimd_state *regset
237 = (const struct user_fpsimd_state *) buf;
176eb98c
MS
238 int i;
239
240 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
241 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
242 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
243 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
244}
245
1ef53e6b
AH
246/* Store the pauth registers to regcache. */
247
248static void
249aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
250{
251 uint64_t *pauth_regset = (uint64_t *) buf;
252 int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
253
254 if (pauth_base == 0)
255 return;
256
257 supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
258 &pauth_regset[0]);
259 supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
260 &pauth_regset[1]);
261}
262
bf9ae9d8
TBA
263bool
264aarch64_target::low_supports_breakpoints ()
265{
266 return true;
267}
268
269/* Implementation of linux target ops method "low_get_pc". */
421530db 270
bf9ae9d8
TBA
271CORE_ADDR
272aarch64_target::low_get_pc (regcache *regcache)
176eb98c 273{
8a7e4587 274 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 275 return linux_get_pc_64bit (regcache);
8a7e4587 276 else
a5652c21 277 return linux_get_pc_32bit (regcache);
176eb98c
MS
278}
279
bf9ae9d8 280/* Implementation of linux target ops method "low_set_pc". */
421530db 281
bf9ae9d8
TBA
282void
283aarch64_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
176eb98c 284{
8a7e4587 285 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 286 linux_set_pc_64bit (regcache, pc);
8a7e4587 287 else
a5652c21 288 linux_set_pc_32bit (regcache, pc);
176eb98c
MS
289}
290
176eb98c
MS
291#define aarch64_breakpoint_len 4
292
37d66942
PL
293/* AArch64 BRK software debug mode instruction.
294 This instruction needs to match gdb/aarch64-tdep.c
295 (aarch64_default_breakpoint). */
296static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
176eb98c 297
d7146cda 298/* Implementation of linux target ops method "low_breakpoint_at". */
421530db 299
d7146cda
TBA
300bool
301aarch64_target::low_breakpoint_at (CORE_ADDR where)
176eb98c 302{
db91f502
YQ
303 if (is_64bit_tdesc ())
304 {
305 gdb_byte insn[aarch64_breakpoint_len];
176eb98c 306
d7146cda 307 read_memory (where, (unsigned char *) &insn, aarch64_breakpoint_len);
db91f502 308 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
d7146cda 309 return true;
176eb98c 310
d7146cda 311 return false;
db91f502
YQ
312 }
313 else
314 return arm_breakpoint_at (where);
176eb98c
MS
315}
316
176eb98c
MS
317static void
318aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
319{
320 int i;
321
322 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
323 {
324 state->dr_addr_bp[i] = 0;
325 state->dr_ctrl_bp[i] = 0;
326 state->dr_ref_count_bp[i] = 0;
327 }
328
329 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
330 {
331 state->dr_addr_wp[i] = 0;
332 state->dr_ctrl_wp[i] = 0;
333 state->dr_ref_count_wp[i] = 0;
334 }
335}
336
176eb98c
MS
337/* Return the pointer to the debug register state structure in the
338 current process' arch-specific data area. */
339
db3cb7cb 340struct aarch64_debug_reg_state *
88e2cf7e 341aarch64_get_debug_reg_state (pid_t pid)
176eb98c 342{
88e2cf7e 343 struct process_info *proc = find_process_pid (pid);
176eb98c 344
fe978cb0 345 return &proc->priv->arch_private->debug_reg_state;
176eb98c
MS
346}
347
007c9b97 348/* Implementation of target ops method "supports_z_point_type". */
421530db 349
007c9b97
TBA
350bool
351aarch64_target::supports_z_point_type (char z_type)
4ff0d3d8
PA
352{
353 switch (z_type)
354 {
96c97461 355 case Z_PACKET_SW_BP:
4ff0d3d8
PA
356 case Z_PACKET_HW_BP:
357 case Z_PACKET_WRITE_WP:
358 case Z_PACKET_READ_WP:
359 case Z_PACKET_ACCESS_WP:
007c9b97 360 return true;
4ff0d3d8 361 default:
007c9b97 362 return false;
4ff0d3d8
PA
363 }
364}
365
9db9aa23 366/* Implementation of linux target ops method "low_insert_point".
176eb98c 367
421530db
PL
368 It actually only records the info of the to-be-inserted bp/wp;
369 the actual insertion will happen when threads are resumed. */
176eb98c 370
9db9aa23
TBA
371int
372aarch64_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
373 int len, raw_breakpoint *bp)
176eb98c
MS
374{
375 int ret;
4ff0d3d8 376 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
377 struct aarch64_debug_reg_state *state
378 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 379
c5e92cca 380 if (show_debug_regs)
176eb98c
MS
381 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
382 (unsigned long) addr, len);
383
802e8e6d
PA
384 /* Determine the type from the raw breakpoint type. */
385 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
386
387 if (targ_type != hw_execute)
39edd165
YQ
388 {
389 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
390 ret = aarch64_handle_watchpoint (targ_type, addr, len,
391 1 /* is_insert */, state);
392 else
393 ret = -1;
394 }
176eb98c 395 else
8d689ee5
YQ
396 {
397 if (len == 3)
398 {
399 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
400 instruction. Set it to 2 to correctly encode length bit
401 mask in hardware/watchpoint control register. */
402 len = 2;
403 }
404 ret = aarch64_handle_breakpoint (targ_type, addr, len,
405 1 /* is_insert */, state);
406 }
176eb98c 407
60a191ed 408 if (show_debug_regs)
88e2cf7e
YQ
409 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
410 targ_type);
176eb98c
MS
411
412 return ret;
413}
414
9db9aa23 415/* Implementation of linux target ops method "low_remove_point".
176eb98c 416
421530db
PL
417 It actually only records the info of the to-be-removed bp/wp,
418 the actual removal will be done when threads are resumed. */
176eb98c 419
9db9aa23
TBA
420int
421aarch64_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
422 int len, raw_breakpoint *bp)
176eb98c
MS
423{
424 int ret;
4ff0d3d8 425 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
426 struct aarch64_debug_reg_state *state
427 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 428
c5e92cca 429 if (show_debug_regs)
176eb98c
MS
430 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
431 (unsigned long) addr, len);
432
802e8e6d
PA
433 /* Determine the type from the raw breakpoint type. */
434 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
435
436 /* Set up state pointers. */
437 if (targ_type != hw_execute)
438 ret =
c67ca4de
YQ
439 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
440 state);
176eb98c 441 else
8d689ee5
YQ
442 {
443 if (len == 3)
444 {
445 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
446 instruction. Set it to 2 to correctly encode length bit
447 mask in hardware/watchpoint control register. */
448 len = 2;
449 }
450 ret = aarch64_handle_breakpoint (targ_type, addr, len,
451 0 /* is_insert */, state);
452 }
176eb98c 453
60a191ed 454 if (show_debug_regs)
88e2cf7e
YQ
455 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
456 targ_type);
176eb98c
MS
457
458 return ret;
459}
460
ac1bbaca 461/* Implementation of linux target ops method "low_stopped_data_address". */
176eb98c 462
ac1bbaca
TBA
463CORE_ADDR
464aarch64_target::low_stopped_data_address ()
176eb98c
MS
465{
466 siginfo_t siginfo;
467 int pid, i;
468 struct aarch64_debug_reg_state *state;
469
0bfdf32f 470 pid = lwpid_of (current_thread);
176eb98c
MS
471
472 /* Get the siginfo. */
473 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
474 return (CORE_ADDR) 0;
475
476 /* Need to be a hardware breakpoint/watchpoint trap. */
477 if (siginfo.si_signo != SIGTRAP
478 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
479 return (CORE_ADDR) 0;
480
481 /* Check if the address matches any watched address. */
88e2cf7e 482 state = aarch64_get_debug_reg_state (pid_of (current_thread));
176eb98c
MS
483 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
484 {
a3b60e45
JK
485 const unsigned int offset
486 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
176eb98c
MS
487 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
488 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
a3b60e45
JK
489 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
490 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
491 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
492
176eb98c
MS
493 if (state->dr_ref_count_wp[i]
494 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
a3b60e45 495 && addr_trap >= addr_watch_aligned
176eb98c 496 && addr_trap < addr_watch + len)
a3b60e45
JK
497 {
498 /* ADDR_TRAP reports the first address of the memory range
499 accessed by the CPU, regardless of what was the memory
500 range watched. Thus, a large CPU access that straddles
501 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
502 ADDR_TRAP that is lower than the
503 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
504
505 addr: | 4 | 5 | 6 | 7 | 8 |
506 |---- range watched ----|
507 |----------- range accessed ------------|
508
509 In this case, ADDR_TRAP will be 4.
510
511 To match a watchpoint known to GDB core, we must never
512 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
513 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
514 positive on kernels older than 4.10. See PR
515 external/20207. */
516 return addr_orig;
517 }
176eb98c
MS
518 }
519
520 return (CORE_ADDR) 0;
521}
522
ac1bbaca 523/* Implementation of linux target ops method "low_stopped_by_watchpoint". */
176eb98c 524
ac1bbaca
TBA
525bool
526aarch64_target::low_stopped_by_watchpoint ()
176eb98c 527{
ac1bbaca 528 return (low_stopped_data_address () != 0);
176eb98c
MS
529}
530
531/* Fetch the thread-local storage pointer for libthread_db. */
532
533ps_err_e
754653a7 534ps_get_thread_area (struct ps_prochandle *ph,
176eb98c
MS
535 lwpid_t lwpid, int idx, void **base)
536{
a0cc84cd
YQ
537 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
538 is_64bit_tdesc ());
176eb98c
MS
539}
540
cb63de7c 541/* Implementation of linux target ops method "low_siginfo_fixup". */
ade90bde 542
cb63de7c
TBA
543bool
544aarch64_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
545 int direction)
ade90bde
YQ
546{
547 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
548 if (!is_64bit_tdesc ())
549 {
550 if (direction == 0)
551 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
552 native);
553 else
554 aarch64_siginfo_from_compat_siginfo (native,
555 (struct compat_siginfo *) inf);
556
cb63de7c 557 return true;
ade90bde
YQ
558 }
559
cb63de7c 560 return false;
ade90bde
YQ
561}
562
fd000fb3 563/* Implementation of linux target ops method "low_new_process". */
176eb98c 564
fd000fb3
TBA
565arch_process_info *
566aarch64_target::low_new_process ()
176eb98c 567{
8d749320 568 struct arch_process_info *info = XCNEW (struct arch_process_info);
176eb98c
MS
569
570 aarch64_init_debug_reg_state (&info->debug_reg_state);
571
572 return info;
573}
574
fd000fb3 575/* Implementation of linux target ops method "low_delete_process". */
04ec7890 576
fd000fb3
TBA
577void
578aarch64_target::low_delete_process (arch_process_info *info)
04ec7890
SM
579{
580 xfree (info);
581}
582
fd000fb3
TBA
583void
584aarch64_target::low_new_thread (lwp_info *lwp)
585{
586 aarch64_linux_new_thread (lwp);
587}
421530db 588
fd000fb3
TBA
589void
590aarch64_target::low_delete_thread (arch_lwp_info *arch_lwp)
591{
592 aarch64_linux_delete_thread (arch_lwp);
593}
594
595/* Implementation of linux target ops method "low_new_fork". */
596
597void
598aarch64_target::low_new_fork (process_info *parent,
599 process_info *child)
3a8a0396
DB
600{
601 /* These are allocated by linux_add_process. */
61a7418c
DB
602 gdb_assert (parent->priv != NULL
603 && parent->priv->arch_private != NULL);
604 gdb_assert (child->priv != NULL
605 && child->priv->arch_private != NULL);
3a8a0396
DB
606
607 /* Linux kernel before 2.6.33 commit
608 72f674d203cd230426437cdcf7dd6f681dad8b0d
609 will inherit hardware debug registers from parent
610 on fork/vfork/clone. Newer Linux kernels create such tasks with
611 zeroed debug registers.
612
613 GDB core assumes the child inherits the watchpoints/hw
614 breakpoints of the parent, and will remove them all from the
615 forked off process. Copy the debug registers mirrors into the
616 new process so that all breakpoints and watchpoints can be
617 removed together. The debug registers mirror will become zeroed
618 in the end before detaching the forked off process, thus making
619 this compatible with older Linux kernels too. */
620
61a7418c 621 *child->priv->arch_private = *parent->priv->arch_private;
3a8a0396
DB
622}
623
ee4fbcfa
AH
624/* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
625#define AARCH64_HWCAP_PACA (1 << 30)
626
797bcff5 627/* Implementation of linux target ops method "low_arch_setup". */
3b53ae99 628
797bcff5
TBA
629void
630aarch64_target::low_arch_setup ()
3b53ae99
YQ
631{
632 unsigned int machine;
633 int is_elf64;
634 int tid;
635
636 tid = lwpid_of (current_thread);
637
638 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
639
640 if (is_elf64)
fefa175e
AH
641 {
642 uint64_t vq = aarch64_sve_get_vq (tid);
974c89e0
AH
643 unsigned long hwcap = linux_get_hwcap (8);
644 bool pauth_p = hwcap & AARCH64_HWCAP_PACA;
ee4fbcfa
AH
645
646 current_process ()->tdesc = aarch64_linux_read_description (vq, pauth_p);
fefa175e 647 }
3b53ae99 648 else
7cc17433 649 current_process ()->tdesc = aarch32_linux_read_description ();
176eb98c 650
af1b22f3 651 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
176eb98c
MS
652}
653
02895270
AH
654/* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
655
656static void
657aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
658{
659 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
660}
661
662/* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
663
664static void
665aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
666{
667 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
668}
669
3aee8918 670static struct regset_info aarch64_regsets[] =
176eb98c
MS
671{
672 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
673 sizeof (struct user_pt_regs), GENERAL_REGS,
674 aarch64_fill_gregset, aarch64_store_gregset },
675 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
676 sizeof (struct user_fpsimd_state), FP_REGS,
677 aarch64_fill_fpregset, aarch64_store_fpregset
678 },
1ef53e6b
AH
679 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
680 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
681 NULL, aarch64_store_pauthregset },
50bc912a 682 NULL_REGSET
176eb98c
MS
683};
684
3aee8918
PA
685static struct regsets_info aarch64_regsets_info =
686 {
687 aarch64_regsets, /* regsets */
688 0, /* num_regsets */
689 NULL, /* disabled_regsets */
690 };
691
3b53ae99 692static struct regs_info regs_info_aarch64 =
3aee8918
PA
693 {
694 NULL, /* regset_bitmap */
c2d65f38 695 NULL, /* usrregs */
3aee8918
PA
696 &aarch64_regsets_info,
697 };
698
02895270
AH
699static struct regset_info aarch64_sve_regsets[] =
700{
701 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
702 sizeof (struct user_pt_regs), GENERAL_REGS,
703 aarch64_fill_gregset, aarch64_store_gregset },
704 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
705 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
706 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
707 },
1ef53e6b
AH
708 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
709 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
710 NULL, aarch64_store_pauthregset },
02895270
AH
711 NULL_REGSET
712};
713
714static struct regsets_info aarch64_sve_regsets_info =
715 {
716 aarch64_sve_regsets, /* regsets. */
717 0, /* num_regsets. */
718 NULL, /* disabled_regsets. */
719 };
720
721static struct regs_info regs_info_aarch64_sve =
722 {
723 NULL, /* regset_bitmap. */
724 NULL, /* usrregs. */
725 &aarch64_sve_regsets_info,
726 };
727
aa8d21c9 728/* Implementation of linux target ops method "get_regs_info". */
421530db 729
aa8d21c9
TBA
730const regs_info *
731aarch64_target::get_regs_info ()
3aee8918 732{
02895270 733 if (!is_64bit_tdesc ())
3b53ae99 734 return &regs_info_aarch32;
02895270
AH
735
736 if (is_sve_tdesc ())
737 return &regs_info_aarch64_sve;
738
739 return &regs_info_aarch64;
3aee8918
PA
740}
741
47f70aa7 742/* Implementation of target ops method "supports_tracepoints". */
7671bf47 743
47f70aa7
TBA
744bool
745aarch64_target::supports_tracepoints ()
7671bf47 746{
524b57e6 747 if (current_thread == NULL)
47f70aa7 748 return true;
524b57e6
YQ
749 else
750 {
751 /* We don't support tracepoints on aarch32 now. */
752 return is_64bit_tdesc ();
753 }
7671bf47
PL
754}
755
13e567af 756/* Implementation of linux target ops method "low_get_thread_area". */
bb903df0 757
13e567af
TBA
758int
759aarch64_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
bb903df0
PL
760{
761 struct iovec iovec;
762 uint64_t reg;
763
764 iovec.iov_base = &reg;
765 iovec.iov_len = sizeof (reg);
766
767 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
768 return -1;
769
770 *addrp = reg;
771
772 return 0;
773}
774
9eedd27d
TBA
775bool
776aarch64_target::low_supports_catch_syscall ()
777{
778 return true;
779}
061fc021 780
9eedd27d
TBA
781/* Implementation of linux target ops method "low_get_syscall_trapinfo". */
782
783void
784aarch64_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
061fc021
YQ
785{
786 int use_64bit = register_size (regcache->tdesc, 0) == 8;
787
788 if (use_64bit)
789 {
790 long l_sysno;
791
792 collect_register_by_name (regcache, "x8", &l_sysno);
793 *sysno = (int) l_sysno;
794 }
795 else
796 collect_register_by_name (regcache, "r7", sysno);
797}
798
afbe19f8
PL
799/* List of condition codes that we need. */
800
801enum aarch64_condition_codes
802{
803 EQ = 0x0,
804 NE = 0x1,
805 LO = 0x3,
806 GE = 0xa,
807 LT = 0xb,
808 GT = 0xc,
809 LE = 0xd,
bb903df0
PL
810};
811
6c1c9a8b
YQ
812enum aarch64_operand_type
813{
814 OPERAND_IMMEDIATE,
815 OPERAND_REGISTER,
816};
817
bb903df0
PL
818/* Representation of an operand. At this time, it only supports register
819 and immediate types. */
820
821struct aarch64_operand
822{
823 /* Type of the operand. */
6c1c9a8b
YQ
824 enum aarch64_operand_type type;
825
bb903df0
PL
826 /* Value of the operand according to the type. */
827 union
828 {
829 uint32_t imm;
830 struct aarch64_register reg;
831 };
832};
833
834/* List of registers that we are currently using, we can add more here as
835 we need to use them. */
836
837/* General purpose scratch registers (64 bit). */
838static const struct aarch64_register x0 = { 0, 1 };
839static const struct aarch64_register x1 = { 1, 1 };
840static const struct aarch64_register x2 = { 2, 1 };
841static const struct aarch64_register x3 = { 3, 1 };
842static const struct aarch64_register x4 = { 4, 1 };
843
844/* General purpose scratch registers (32 bit). */
afbe19f8 845static const struct aarch64_register w0 = { 0, 0 };
bb903df0
PL
846static const struct aarch64_register w2 = { 2, 0 };
847
848/* Intra-procedure scratch registers. */
849static const struct aarch64_register ip0 = { 16, 1 };
850
851/* Special purpose registers. */
afbe19f8
PL
852static const struct aarch64_register fp = { 29, 1 };
853static const struct aarch64_register lr = { 30, 1 };
bb903df0
PL
854static const struct aarch64_register sp = { 31, 1 };
855static const struct aarch64_register xzr = { 31, 1 };
856
857/* Dynamically allocate a new register. If we know the register
858 statically, we should make it a global as above instead of using this
859 helper function. */
860
861static struct aarch64_register
862aarch64_register (unsigned num, int is64)
863{
864 return (struct aarch64_register) { num, is64 };
865}
866
867/* Helper function to create a register operand, for instructions with
868 different types of operands.
869
870 For example:
871 p += emit_mov (p, x0, register_operand (x1)); */
872
873static struct aarch64_operand
874register_operand (struct aarch64_register reg)
875{
876 struct aarch64_operand operand;
877
878 operand.type = OPERAND_REGISTER;
879 operand.reg = reg;
880
881 return operand;
882}
883
884/* Helper function to create an immediate operand, for instructions with
885 different types of operands.
886
887 For example:
888 p += emit_mov (p, x0, immediate_operand (12)); */
889
890static struct aarch64_operand
891immediate_operand (uint32_t imm)
892{
893 struct aarch64_operand operand;
894
895 operand.type = OPERAND_IMMEDIATE;
896 operand.imm = imm;
897
898 return operand;
899}
900
bb903df0
PL
901/* Helper function to create an offset memory operand.
902
903 For example:
904 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
905
906static struct aarch64_memory_operand
907offset_memory_operand (int32_t offset)
908{
909 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
910}
911
912/* Helper function to create a pre-index memory operand.
913
914 For example:
915 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
916
917static struct aarch64_memory_operand
918preindex_memory_operand (int32_t index)
919{
920 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
921}
922
afbe19f8
PL
923/* Helper function to create a post-index memory operand.
924
925 For example:
926 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
927
928static struct aarch64_memory_operand
929postindex_memory_operand (int32_t index)
930{
931 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
932}
933
bb903df0
PL
934/* System control registers. These special registers can be written and
935 read with the MRS and MSR instructions.
936
937 - NZCV: Condition flags. GDB refers to this register under the CPSR
938 name.
939 - FPSR: Floating-point status register.
940 - FPCR: Floating-point control registers.
941 - TPIDR_EL0: Software thread ID register. */
942
943enum aarch64_system_control_registers
944{
945 /* op0 op1 crn crm op2 */
946 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
947 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
948 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
949 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
950};
951
bb903df0
PL
952/* Write a BLR instruction into *BUF.
953
954 BLR rn
955
956 RN is the register to branch to. */
957
958static int
959emit_blr (uint32_t *buf, struct aarch64_register rn)
960{
e1c587c3 961 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
bb903df0
PL
962}
963
afbe19f8 964/* Write a RET instruction into *BUF.
bb903df0 965
afbe19f8 966 RET xn
bb903df0 967
afbe19f8 968 RN is the register to branch to. */
bb903df0
PL
969
970static int
afbe19f8
PL
971emit_ret (uint32_t *buf, struct aarch64_register rn)
972{
e1c587c3 973 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
afbe19f8
PL
974}
975
976static int
977emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
978 struct aarch64_register rt,
979 struct aarch64_register rt2,
980 struct aarch64_register rn,
981 struct aarch64_memory_operand operand)
bb903df0
PL
982{
983 uint32_t opc;
984 uint32_t pre_index;
985 uint32_t write_back;
986
987 if (rt.is64)
988 opc = ENCODE (2, 2, 30);
989 else
990 opc = ENCODE (0, 2, 30);
991
992 switch (operand.type)
993 {
994 case MEMORY_OPERAND_OFFSET:
995 {
996 pre_index = ENCODE (1, 1, 24);
997 write_back = ENCODE (0, 1, 23);
998 break;
999 }
afbe19f8
PL
1000 case MEMORY_OPERAND_POSTINDEX:
1001 {
1002 pre_index = ENCODE (0, 1, 24);
1003 write_back = ENCODE (1, 1, 23);
1004 break;
1005 }
bb903df0
PL
1006 case MEMORY_OPERAND_PREINDEX:
1007 {
1008 pre_index = ENCODE (1, 1, 24);
1009 write_back = ENCODE (1, 1, 23);
1010 break;
1011 }
1012 default:
1013 return 0;
1014 }
1015
e1c587c3
YQ
1016 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
1017 | ENCODE (operand.index >> 3, 7, 15)
1018 | ENCODE (rt2.num, 5, 10)
1019 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
1020}
1021
afbe19f8
PL
1022/* Write a STP instruction into *BUF.
1023
1024 STP rt, rt2, [rn, #offset]
1025 STP rt, rt2, [rn, #index]!
1026 STP rt, rt2, [rn], #index
1027
1028 RT and RT2 are the registers to store.
1029 RN is the base address register.
1030 OFFSET is the immediate to add to the base address. It is limited to a
1031 -512 .. 504 range (7 bits << 3). */
1032
1033static int
1034emit_stp (uint32_t *buf, struct aarch64_register rt,
1035 struct aarch64_register rt2, struct aarch64_register rn,
1036 struct aarch64_memory_operand operand)
1037{
1038 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
1039}
1040
1041/* Write a LDP instruction into *BUF.
1042
1043 LDP rt, rt2, [rn, #offset]
1044 LDP rt, rt2, [rn, #index]!
1045 LDP rt, rt2, [rn], #index
1046
1047 RT and RT2 are the registers to store.
1048 RN is the base address register.
1049 OFFSET is the immediate to add to the base address. It is limited to a
1050 -512 .. 504 range (7 bits << 3). */
1051
1052static int
1053emit_ldp (uint32_t *buf, struct aarch64_register rt,
1054 struct aarch64_register rt2, struct aarch64_register rn,
1055 struct aarch64_memory_operand operand)
1056{
1057 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
1058}
1059
bb903df0
PL
1060/* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
1061
1062 LDP qt, qt2, [rn, #offset]
1063
1064 RT and RT2 are the Q registers to store.
1065 RN is the base address register.
1066 OFFSET is the immediate to add to the base address. It is limited to
1067 -1024 .. 1008 range (7 bits << 4). */
1068
1069static int
1070emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1071 struct aarch64_register rn, int32_t offset)
1072{
1073 uint32_t opc = ENCODE (2, 2, 30);
1074 uint32_t pre_index = ENCODE (1, 1, 24);
1075
e1c587c3
YQ
1076 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
1077 | ENCODE (offset >> 4, 7, 15)
1078 | ENCODE (rt2, 5, 10)
1079 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1080}
1081
1082/* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1083
1084 STP qt, qt2, [rn, #offset]
1085
1086 RT and RT2 are the Q registers to store.
1087 RN is the base address register.
1088 OFFSET is the immediate to add to the base address. It is limited to
1089 -1024 .. 1008 range (7 bits << 4). */
1090
1091static int
1092emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1093 struct aarch64_register rn, int32_t offset)
1094{
1095 uint32_t opc = ENCODE (2, 2, 30);
1096 uint32_t pre_index = ENCODE (1, 1, 24);
1097
e1c587c3 1098 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
b6542f81
YQ
1099 | ENCODE (offset >> 4, 7, 15)
1100 | ENCODE (rt2, 5, 10)
1101 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1102}
1103
afbe19f8
PL
1104/* Write a LDRH instruction into *BUF.
1105
1106 LDRH wt, [xn, #offset]
1107 LDRH wt, [xn, #index]!
1108 LDRH wt, [xn], #index
1109
1110 RT is the register to store.
1111 RN is the base address register.
1112 OFFSET is the immediate to add to the base address. It is limited to
1113 0 .. 32760 range (12 bits << 3). */
1114
1115static int
1116emit_ldrh (uint32_t *buf, struct aarch64_register rt,
1117 struct aarch64_register rn,
1118 struct aarch64_memory_operand operand)
1119{
1c2e1515 1120 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
afbe19f8
PL
1121}
1122
1123/* Write a LDRB instruction into *BUF.
1124
1125 LDRB wt, [xn, #offset]
1126 LDRB wt, [xn, #index]!
1127 LDRB wt, [xn], #index
1128
1129 RT is the register to store.
1130 RN is the base address register.
1131 OFFSET is the immediate to add to the base address. It is limited to
1132 0 .. 32760 range (12 bits << 3). */
1133
1134static int
1135emit_ldrb (uint32_t *buf, struct aarch64_register rt,
1136 struct aarch64_register rn,
1137 struct aarch64_memory_operand operand)
1138{
1c2e1515 1139 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
afbe19f8
PL
1140}
1141
bb903df0 1142
bb903df0
PL
1143
1144/* Write a STR instruction into *BUF.
1145
1146 STR rt, [rn, #offset]
1147 STR rt, [rn, #index]!
afbe19f8 1148 STR rt, [rn], #index
bb903df0
PL
1149
1150 RT is the register to store.
1151 RN is the base address register.
1152 OFFSET is the immediate to add to the base address. It is limited to
1153 0 .. 32760 range (12 bits << 3). */
1154
1155static int
1156emit_str (uint32_t *buf, struct aarch64_register rt,
1157 struct aarch64_register rn,
1158 struct aarch64_memory_operand operand)
1159{
1c2e1515 1160 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
bb903df0
PL
1161}
1162
1163/* Helper function emitting an exclusive load or store instruction. */
1164
1165static int
1166emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1167 enum aarch64_opcodes opcode,
1168 struct aarch64_register rs,
1169 struct aarch64_register rt,
1170 struct aarch64_register rt2,
1171 struct aarch64_register rn)
1172{
e1c587c3
YQ
1173 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1174 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1175 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
1176}
1177
1178/* Write a LAXR instruction into *BUF.
1179
1180 LDAXR rt, [xn]
1181
1182 RT is the destination register.
1183 RN is the base address register. */
1184
1185static int
1186emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1187 struct aarch64_register rn)
1188{
1189 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1190 xzr, rn);
1191}
1192
1193/* Write a STXR instruction into *BUF.
1194
1195 STXR ws, rt, [xn]
1196
1197 RS is the result register, it indicates if the store succeeded or not.
1198 RT is the destination register.
1199 RN is the base address register. */
1200
1201static int
1202emit_stxr (uint32_t *buf, struct aarch64_register rs,
1203 struct aarch64_register rt, struct aarch64_register rn)
1204{
1205 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1206 xzr, rn);
1207}
1208
1209/* Write a STLR instruction into *BUF.
1210
1211 STLR rt, [xn]
1212
1213 RT is the register to store.
1214 RN is the base address register. */
1215
1216static int
1217emit_stlr (uint32_t *buf, struct aarch64_register rt,
1218 struct aarch64_register rn)
1219{
1220 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1221 xzr, rn);
1222}
1223
1224/* Helper function for data processing instructions with register sources. */
1225
1226static int
231c0592 1227emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
bb903df0
PL
1228 struct aarch64_register rd,
1229 struct aarch64_register rn,
1230 struct aarch64_register rm)
1231{
1232 uint32_t size = ENCODE (rd.is64, 1, 31);
1233
e1c587c3
YQ
1234 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1235 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1236}
1237
1238/* Helper function for data processing instructions taking either a register
1239 or an immediate. */
1240
1241static int
1242emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1243 struct aarch64_register rd,
1244 struct aarch64_register rn,
1245 struct aarch64_operand operand)
1246{
1247 uint32_t size = ENCODE (rd.is64, 1, 31);
1248 /* The opcode is different for register and immediate source operands. */
1249 uint32_t operand_opcode;
1250
1251 if (operand.type == OPERAND_IMMEDIATE)
1252 {
1253 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1254 operand_opcode = ENCODE (8, 4, 25);
1255
e1c587c3
YQ
1256 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1257 | ENCODE (operand.imm, 12, 10)
1258 | ENCODE (rn.num, 5, 5)
1259 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1260 }
1261 else
1262 {
1263 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1264 operand_opcode = ENCODE (5, 4, 25);
1265
1266 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1267 rn, operand.reg);
1268 }
1269}
1270
1271/* Write an ADD instruction into *BUF.
1272
1273 ADD rd, rn, #imm
1274 ADD rd, rn, rm
1275
1276 This function handles both an immediate and register add.
1277
1278 RD is the destination register.
1279 RN is the input register.
1280 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1281 OPERAND_REGISTER. */
1282
1283static int
1284emit_add (uint32_t *buf, struct aarch64_register rd,
1285 struct aarch64_register rn, struct aarch64_operand operand)
1286{
1287 return emit_data_processing (buf, ADD, rd, rn, operand);
1288}
1289
1290/* Write a SUB instruction into *BUF.
1291
1292 SUB rd, rn, #imm
1293 SUB rd, rn, rm
1294
1295 This function handles both an immediate and register sub.
1296
1297 RD is the destination register.
1298 RN is the input register.
1299 IMM is the immediate to substract to RN. */
1300
1301static int
1302emit_sub (uint32_t *buf, struct aarch64_register rd,
1303 struct aarch64_register rn, struct aarch64_operand operand)
1304{
1305 return emit_data_processing (buf, SUB, rd, rn, operand);
1306}
1307
1308/* Write a MOV instruction into *BUF.
1309
1310 MOV rd, #imm
1311 MOV rd, rm
1312
1313 This function handles both a wide immediate move and a register move,
1314 with the condition that the source register is not xzr. xzr and the
1315 stack pointer share the same encoding and this function only supports
1316 the stack pointer.
1317
1318 RD is the destination register.
1319 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1320 OPERAND_REGISTER. */
1321
1322static int
1323emit_mov (uint32_t *buf, struct aarch64_register rd,
1324 struct aarch64_operand operand)
1325{
1326 if (operand.type == OPERAND_IMMEDIATE)
1327 {
1328 uint32_t size = ENCODE (rd.is64, 1, 31);
1329 /* Do not shift the immediate. */
1330 uint32_t shift = ENCODE (0, 2, 21);
1331
e1c587c3
YQ
1332 return aarch64_emit_insn (buf, MOV | size | shift
1333 | ENCODE (operand.imm, 16, 5)
1334 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1335 }
1336 else
1337 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1338}
1339
1340/* Write a MOVK instruction into *BUF.
1341
1342 MOVK rd, #imm, lsl #shift
1343
1344 RD is the destination register.
1345 IMM is the immediate.
1346 SHIFT is the logical shift left to apply to IMM. */
1347
1348static int
7781c06f
YQ
1349emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1350 unsigned shift)
bb903df0
PL
1351{
1352 uint32_t size = ENCODE (rd.is64, 1, 31);
1353
e1c587c3
YQ
1354 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1355 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1356}
1357
1358/* Write instructions into *BUF in order to move ADDR into a register.
1359 ADDR can be a 64-bit value.
1360
1361 This function will emit a series of MOV and MOVK instructions, such as:
1362
1363 MOV xd, #(addr)
1364 MOVK xd, #(addr >> 16), lsl #16
1365 MOVK xd, #(addr >> 32), lsl #32
1366 MOVK xd, #(addr >> 48), lsl #48 */
1367
1368static int
1369emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1370{
1371 uint32_t *p = buf;
1372
1373 /* The MOV (wide immediate) instruction clears to top bits of the
1374 register. */
1375 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1376
1377 if ((addr >> 16) != 0)
1378 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1379 else
1380 return p - buf;
1381
1382 if ((addr >> 32) != 0)
1383 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1384 else
1385 return p - buf;
1386
1387 if ((addr >> 48) != 0)
1388 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1389
1390 return p - buf;
1391}
1392
afbe19f8
PL
1393/* Write a SUBS instruction into *BUF.
1394
1395 SUBS rd, rn, rm
1396
1397 This instruction update the condition flags.
1398
1399 RD is the destination register.
1400 RN and RM are the source registers. */
1401
1402static int
1403emit_subs (uint32_t *buf, struct aarch64_register rd,
1404 struct aarch64_register rn, struct aarch64_operand operand)
1405{
1406 return emit_data_processing (buf, SUBS, rd, rn, operand);
1407}
1408
1409/* Write a CMP instruction into *BUF.
1410
1411 CMP rn, rm
1412
1413 This instruction is an alias of SUBS xzr, rn, rm.
1414
1415 RN and RM are the registers to compare. */
1416
1417static int
1418emit_cmp (uint32_t *buf, struct aarch64_register rn,
1419 struct aarch64_operand operand)
1420{
1421 return emit_subs (buf, xzr, rn, operand);
1422}
1423
1424/* Write a AND instruction into *BUF.
1425
1426 AND rd, rn, rm
1427
1428 RD is the destination register.
1429 RN and RM are the source registers. */
1430
1431static int
1432emit_and (uint32_t *buf, struct aarch64_register rd,
1433 struct aarch64_register rn, struct aarch64_register rm)
1434{
1435 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1436}
1437
1438/* Write a ORR instruction into *BUF.
1439
1440 ORR rd, rn, rm
1441
1442 RD is the destination register.
1443 RN and RM are the source registers. */
1444
1445static int
1446emit_orr (uint32_t *buf, struct aarch64_register rd,
1447 struct aarch64_register rn, struct aarch64_register rm)
1448{
1449 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1450}
1451
1452/* Write a ORN instruction into *BUF.
1453
1454 ORN rd, rn, rm
1455
1456 RD is the destination register.
1457 RN and RM are the source registers. */
1458
1459static int
1460emit_orn (uint32_t *buf, struct aarch64_register rd,
1461 struct aarch64_register rn, struct aarch64_register rm)
1462{
1463 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1464}
1465
1466/* Write a EOR instruction into *BUF.
1467
1468 EOR rd, rn, rm
1469
1470 RD is the destination register.
1471 RN and RM are the source registers. */
1472
1473static int
1474emit_eor (uint32_t *buf, struct aarch64_register rd,
1475 struct aarch64_register rn, struct aarch64_register rm)
1476{
1477 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1478}
1479
1480/* Write a MVN instruction into *BUF.
1481
1482 MVN rd, rm
1483
1484 This is an alias for ORN rd, xzr, rm.
1485
1486 RD is the destination register.
1487 RM is the source register. */
1488
1489static int
1490emit_mvn (uint32_t *buf, struct aarch64_register rd,
1491 struct aarch64_register rm)
1492{
1493 return emit_orn (buf, rd, xzr, rm);
1494}
1495
1496/* Write a LSLV instruction into *BUF.
1497
1498 LSLV rd, rn, rm
1499
1500 RD is the destination register.
1501 RN and RM are the source registers. */
1502
1503static int
1504emit_lslv (uint32_t *buf, struct aarch64_register rd,
1505 struct aarch64_register rn, struct aarch64_register rm)
1506{
1507 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1508}
1509
1510/* Write a LSRV instruction into *BUF.
1511
1512 LSRV rd, rn, rm
1513
1514 RD is the destination register.
1515 RN and RM are the source registers. */
1516
1517static int
1518emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1519 struct aarch64_register rn, struct aarch64_register rm)
1520{
1521 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1522}
1523
1524/* Write a ASRV instruction into *BUF.
1525
1526 ASRV rd, rn, rm
1527
1528 RD is the destination register.
1529 RN and RM are the source registers. */
1530
1531static int
1532emit_asrv (uint32_t *buf, struct aarch64_register rd,
1533 struct aarch64_register rn, struct aarch64_register rm)
1534{
1535 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1536}
1537
1538/* Write a MUL instruction into *BUF.
1539
1540 MUL rd, rn, rm
1541
1542 RD is the destination register.
1543 RN and RM are the source registers. */
1544
1545static int
1546emit_mul (uint32_t *buf, struct aarch64_register rd,
1547 struct aarch64_register rn, struct aarch64_register rm)
1548{
1549 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1550}
1551
bb903df0
PL
1552/* Write a MRS instruction into *BUF. The register size is 64-bit.
1553
1554 MRS xt, system_reg
1555
1556 RT is the destination register.
1557 SYSTEM_REG is special purpose register to read. */
1558
1559static int
1560emit_mrs (uint32_t *buf, struct aarch64_register rt,
1561 enum aarch64_system_control_registers system_reg)
1562{
e1c587c3
YQ
1563 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1564 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1565}
1566
1567/* Write a MSR instruction into *BUF. The register size is 64-bit.
1568
1569 MSR system_reg, xt
1570
1571 SYSTEM_REG is special purpose register to write.
1572 RT is the input register. */
1573
1574static int
1575emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1576 struct aarch64_register rt)
1577{
e1c587c3
YQ
1578 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1579 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1580}
1581
1582/* Write a SEVL instruction into *BUF.
1583
1584 This is a hint instruction telling the hardware to trigger an event. */
1585
1586static int
1587emit_sevl (uint32_t *buf)
1588{
e1c587c3 1589 return aarch64_emit_insn (buf, SEVL);
bb903df0
PL
1590}
1591
1592/* Write a WFE instruction into *BUF.
1593
1594 This is a hint instruction telling the hardware to wait for an event. */
1595
1596static int
1597emit_wfe (uint32_t *buf)
1598{
e1c587c3 1599 return aarch64_emit_insn (buf, WFE);
bb903df0
PL
1600}
1601
afbe19f8
PL
1602/* Write a SBFM instruction into *BUF.
1603
1604 SBFM rd, rn, #immr, #imms
1605
1606 This instruction moves the bits from #immr to #imms into the
1607 destination, sign extending the result.
1608
1609 RD is the destination register.
1610 RN is the source register.
1611 IMMR is the bit number to start at (least significant bit).
1612 IMMS is the bit number to stop at (most significant bit). */
1613
1614static int
1615emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1616 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1617{
1618 uint32_t size = ENCODE (rd.is64, 1, 31);
1619 uint32_t n = ENCODE (rd.is64, 1, 22);
1620
e1c587c3
YQ
1621 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1622 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1623 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1624}
1625
1626/* Write a SBFX instruction into *BUF.
1627
1628 SBFX rd, rn, #lsb, #width
1629
1630 This instruction moves #width bits from #lsb into the destination, sign
1631 extending the result. This is an alias for:
1632
1633 SBFM rd, rn, #lsb, #(lsb + width - 1)
1634
1635 RD is the destination register.
1636 RN is the source register.
1637 LSB is the bit number to start at (least significant bit).
1638 WIDTH is the number of bits to move. */
1639
1640static int
1641emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1642 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1643{
1644 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1645}
1646
1647/* Write a UBFM instruction into *BUF.
1648
1649 UBFM rd, rn, #immr, #imms
1650
1651 This instruction moves the bits from #immr to #imms into the
1652 destination, extending the result with zeros.
1653
1654 RD is the destination register.
1655 RN is the source register.
1656 IMMR is the bit number to start at (least significant bit).
1657 IMMS is the bit number to stop at (most significant bit). */
1658
1659static int
1660emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1661 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1662{
1663 uint32_t size = ENCODE (rd.is64, 1, 31);
1664 uint32_t n = ENCODE (rd.is64, 1, 22);
1665
e1c587c3
YQ
1666 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1667 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1668 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1669}
1670
1671/* Write a UBFX instruction into *BUF.
1672
1673 UBFX rd, rn, #lsb, #width
1674
1675 This instruction moves #width bits from #lsb into the destination,
1676 extending the result with zeros. This is an alias for:
1677
1678 UBFM rd, rn, #lsb, #(lsb + width - 1)
1679
1680 RD is the destination register.
1681 RN is the source register.
1682 LSB is the bit number to start at (least significant bit).
1683 WIDTH is the number of bits to move. */
1684
1685static int
1686emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1687 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1688{
1689 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1690}
1691
1692/* Write a CSINC instruction into *BUF.
1693
1694 CSINC rd, rn, rm, cond
1695
1696 This instruction conditionally increments rn or rm and places the result
1697 in rd. rn is chosen is the condition is true.
1698
1699 RD is the destination register.
1700 RN and RM are the source registers.
1701 COND is the encoded condition. */
1702
1703static int
1704emit_csinc (uint32_t *buf, struct aarch64_register rd,
1705 struct aarch64_register rn, struct aarch64_register rm,
1706 unsigned cond)
1707{
1708 uint32_t size = ENCODE (rd.is64, 1, 31);
1709
e1c587c3
YQ
1710 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1711 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1712 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1713}
1714
1715/* Write a CSET instruction into *BUF.
1716
1717 CSET rd, cond
1718
1719 This instruction conditionally write 1 or 0 in the destination register.
1720 1 is written if the condition is true. This is an alias for:
1721
1722 CSINC rd, xzr, xzr, !cond
1723
1724 Note that the condition needs to be inverted.
1725
1726 RD is the destination register.
1727 RN and RM are the source registers.
1728 COND is the encoded condition. */
1729
1730static int
1731emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1732{
1733 /* The least significant bit of the condition needs toggling in order to
1734 invert it. */
1735 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1736}
1737
bb903df0
PL
1738/* Write LEN instructions from BUF into the inferior memory at *TO.
1739
1740 Note instructions are always little endian on AArch64, unlike data. */
1741
1742static void
1743append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1744{
1745 size_t byte_len = len * sizeof (uint32_t);
1746#if (__BYTE_ORDER == __BIG_ENDIAN)
cb93dc7f 1747 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
bb903df0
PL
1748 size_t i;
1749
1750 for (i = 0; i < len; i++)
1751 le_buf[i] = htole32 (buf[i]);
1752
4196ab2a 1753 target_write_memory (*to, (const unsigned char *) le_buf, byte_len);
bb903df0
PL
1754
1755 xfree (le_buf);
1756#else
4196ab2a 1757 target_write_memory (*to, (const unsigned char *) buf, byte_len);
bb903df0
PL
1758#endif
1759
1760 *to += byte_len;
1761}
1762
0badd99f
YQ
1763/* Sub-class of struct aarch64_insn_data, store information of
1764 instruction relocation for fast tracepoint. Visitor can
1765 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1766 the relocated instructions in buffer pointed by INSN_PTR. */
bb903df0 1767
0badd99f
YQ
1768struct aarch64_insn_relocation_data
1769{
1770 struct aarch64_insn_data base;
1771
1772 /* The new address the instruction is relocated to. */
1773 CORE_ADDR new_addr;
1774 /* Pointer to the buffer of relocated instruction(s). */
1775 uint32_t *insn_ptr;
1776};
1777
1778/* Implementation of aarch64_insn_visitor method "b". */
1779
1780static void
1781aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1782 struct aarch64_insn_data *data)
1783{
1784 struct aarch64_insn_relocation_data *insn_reloc
1785 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1786 int64_t new_offset
0badd99f
YQ
1787 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1788
1789 if (can_encode_int32 (new_offset, 28))
1790 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1791}
1792
1793/* Implementation of aarch64_insn_visitor method "b_cond". */
1794
1795static void
1796aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1797 struct aarch64_insn_data *data)
1798{
1799 struct aarch64_insn_relocation_data *insn_reloc
1800 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1801 int64_t new_offset
0badd99f
YQ
1802 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1803
1804 if (can_encode_int32 (new_offset, 21))
1805 {
1806 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1807 new_offset);
bb903df0 1808 }
0badd99f 1809 else if (can_encode_int32 (new_offset, 28))
bb903df0 1810 {
0badd99f
YQ
1811 /* The offset is out of range for a conditional branch
1812 instruction but not for a unconditional branch. We can use
1813 the following instructions instead:
bb903df0 1814
0badd99f
YQ
1815 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1816 B NOT_TAKEN ; Else jump over TAKEN and continue.
1817 TAKEN:
1818 B #(offset - 8)
1819 NOT_TAKEN:
1820
1821 */
1822
1823 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1824 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1825 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
bb903df0 1826 }
0badd99f 1827}
bb903df0 1828
0badd99f
YQ
1829/* Implementation of aarch64_insn_visitor method "cb". */
1830
1831static void
1832aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1833 const unsigned rn, int is64,
1834 struct aarch64_insn_data *data)
1835{
1836 struct aarch64_insn_relocation_data *insn_reloc
1837 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1838 int64_t new_offset
0badd99f
YQ
1839 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1840
1841 if (can_encode_int32 (new_offset, 21))
1842 {
1843 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1844 aarch64_register (rn, is64), new_offset);
bb903df0 1845 }
0badd99f 1846 else if (can_encode_int32 (new_offset, 28))
bb903df0 1847 {
0badd99f
YQ
1848 /* The offset is out of range for a compare and branch
1849 instruction but not for a unconditional branch. We can use
1850 the following instructions instead:
1851
1852 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1853 B NOT_TAKEN ; Else jump over TAKEN and continue.
1854 TAKEN:
1855 B #(offset - 8)
1856 NOT_TAKEN:
1857
1858 */
1859 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1860 aarch64_register (rn, is64), 8);
1861 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1862 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1863 }
1864}
bb903df0 1865
0badd99f 1866/* Implementation of aarch64_insn_visitor method "tb". */
bb903df0 1867
0badd99f
YQ
1868static void
1869aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1870 const unsigned rt, unsigned bit,
1871 struct aarch64_insn_data *data)
1872{
1873 struct aarch64_insn_relocation_data *insn_reloc
1874 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1875 int64_t new_offset
0badd99f
YQ
1876 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1877
1878 if (can_encode_int32 (new_offset, 16))
1879 {
1880 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1881 aarch64_register (rt, 1), new_offset);
bb903df0 1882 }
0badd99f 1883 else if (can_encode_int32 (new_offset, 28))
bb903df0 1884 {
0badd99f
YQ
1885 /* The offset is out of range for a test bit and branch
1886 instruction but not for a unconditional branch. We can use
1887 the following instructions instead:
1888
1889 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1890 B NOT_TAKEN ; Else jump over TAKEN and continue.
1891 TAKEN:
1892 B #(offset - 8)
1893 NOT_TAKEN:
1894
1895 */
1896 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1897 aarch64_register (rt, 1), 8);
1898 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1899 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1900 new_offset - 8);
1901 }
1902}
bb903df0 1903
0badd99f 1904/* Implementation of aarch64_insn_visitor method "adr". */
bb903df0 1905
0badd99f
YQ
1906static void
1907aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1908 const int is_adrp,
1909 struct aarch64_insn_data *data)
1910{
1911 struct aarch64_insn_relocation_data *insn_reloc
1912 = (struct aarch64_insn_relocation_data *) data;
1913 /* We know exactly the address the ADR{P,} instruction will compute.
1914 We can just write it to the destination register. */
1915 CORE_ADDR address = data->insn_addr + offset;
bb903df0 1916
0badd99f
YQ
1917 if (is_adrp)
1918 {
1919 /* Clear the lower 12 bits of the offset to get the 4K page. */
1920 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1921 aarch64_register (rd, 1),
1922 address & ~0xfff);
1923 }
1924 else
1925 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1926 aarch64_register (rd, 1), address);
1927}
bb903df0 1928
0badd99f 1929/* Implementation of aarch64_insn_visitor method "ldr_literal". */
bb903df0 1930
0badd99f
YQ
1931static void
1932aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1933 const unsigned rt, const int is64,
1934 struct aarch64_insn_data *data)
1935{
1936 struct aarch64_insn_relocation_data *insn_reloc
1937 = (struct aarch64_insn_relocation_data *) data;
1938 CORE_ADDR address = data->insn_addr + offset;
1939
1940 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1941 aarch64_register (rt, 1), address);
1942
1943 /* We know exactly what address to load from, and what register we
1944 can use:
1945
1946 MOV xd, #(oldloc + offset)
1947 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1948 ...
1949
1950 LDR xd, [xd] ; or LDRSW xd, [xd]
1951
1952 */
1953
1954 if (is_sw)
1955 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1956 aarch64_register (rt, 1),
1957 aarch64_register (rt, 1),
1958 offset_memory_operand (0));
bb903df0 1959 else
0badd99f
YQ
1960 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1961 aarch64_register (rt, is64),
1962 aarch64_register (rt, 1),
1963 offset_memory_operand (0));
1964}
1965
1966/* Implementation of aarch64_insn_visitor method "others". */
1967
1968static void
1969aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1970 struct aarch64_insn_data *data)
1971{
1972 struct aarch64_insn_relocation_data *insn_reloc
1973 = (struct aarch64_insn_relocation_data *) data;
bb903df0 1974
0badd99f
YQ
1975 /* The instruction is not PC relative. Just re-emit it at the new
1976 location. */
e1c587c3 1977 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
0badd99f
YQ
1978}
1979
1980static const struct aarch64_insn_visitor visitor =
1981{
1982 aarch64_ftrace_insn_reloc_b,
1983 aarch64_ftrace_insn_reloc_b_cond,
1984 aarch64_ftrace_insn_reloc_cb,
1985 aarch64_ftrace_insn_reloc_tb,
1986 aarch64_ftrace_insn_reloc_adr,
1987 aarch64_ftrace_insn_reloc_ldr_literal,
1988 aarch64_ftrace_insn_reloc_others,
1989};
1990
809a0c35
TBA
1991bool
1992aarch64_target::supports_fast_tracepoints ()
1993{
1994 return true;
1995}
1996
1997/* Implementation of target ops method
bb903df0
PL
1998 "install_fast_tracepoint_jump_pad". */
1999
809a0c35
TBA
2000int
2001aarch64_target::install_fast_tracepoint_jump_pad
2002 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
2003 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
2004 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
2005 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
2006 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
2007 char *err)
bb903df0
PL
2008{
2009 uint32_t buf[256];
2010 uint32_t *p = buf;
2ac09a5b 2011 int64_t offset;
bb903df0 2012 int i;
70b439f0 2013 uint32_t insn;
bb903df0 2014 CORE_ADDR buildaddr = *jump_entry;
0badd99f 2015 struct aarch64_insn_relocation_data insn_data;
bb903df0
PL
2016
2017 /* We need to save the current state on the stack both to restore it
2018 later and to collect register values when the tracepoint is hit.
2019
2020 The saved registers are pushed in a layout that needs to be in sync
2021 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
2022 the supply_fast_tracepoint_registers function will fill in the
2023 register cache from a pointer to saved registers on the stack we build
2024 here.
2025
2026 For simplicity, we set the size of each cell on the stack to 16 bytes.
2027 This way one cell can hold any register type, from system registers
2028 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
2029 has to be 16 bytes aligned anyway.
2030
2031 Note that the CPSR register does not exist on AArch64. Instead we
2032 can access system bits describing the process state with the
2033 MRS/MSR instructions, namely the condition flags. We save them as
2034 if they are part of a CPSR register because that's how GDB
2035 interprets these system bits. At the moment, only the condition
2036 flags are saved in CPSR (NZCV).
2037
2038 Stack layout, each cell is 16 bytes (descending):
2039
2040 High *-------- SIMD&FP registers from 31 down to 0. --------*
2041 | q31 |
2042 . .
2043 . . 32 cells
2044 . .
2045 | q0 |
2046 *---- General purpose registers from 30 down to 0. ----*
2047 | x30 |
2048 . .
2049 . . 31 cells
2050 . .
2051 | x0 |
2052 *------------- Special purpose registers. -------------*
2053 | SP |
2054 | PC |
2055 | CPSR (NZCV) | 5 cells
2056 | FPSR |
2057 | FPCR | <- SP + 16
2058 *------------- collecting_t object --------------------*
2059 | TPIDR_EL0 | struct tracepoint * |
2060 Low *------------------------------------------------------*
2061
2062 After this stack is set up, we issue a call to the collector, passing
2063 it the saved registers at (SP + 16). */
2064
2065 /* Push SIMD&FP registers on the stack:
2066
2067 SUB sp, sp, #(32 * 16)
2068
2069 STP q30, q31, [sp, #(30 * 16)]
2070 ...
2071 STP q0, q1, [sp]
2072
2073 */
2074 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
2075 for (i = 30; i >= 0; i -= 2)
2076 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
2077
30baf67b 2078 /* Push general purpose registers on the stack. Note that we do not need
bb903df0
PL
2079 to push x31 as it represents the xzr register and not the stack
2080 pointer in a STR instruction.
2081
2082 SUB sp, sp, #(31 * 16)
2083
2084 STR x30, [sp, #(30 * 16)]
2085 ...
2086 STR x0, [sp]
2087
2088 */
2089 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
2090 for (i = 30; i >= 0; i -= 1)
2091 p += emit_str (p, aarch64_register (i, 1), sp,
2092 offset_memory_operand (i * 16));
2093
2094 /* Make space for 5 more cells.
2095
2096 SUB sp, sp, #(5 * 16)
2097
2098 */
2099 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
2100
2101
2102 /* Save SP:
2103
2104 ADD x4, sp, #((32 + 31 + 5) * 16)
2105 STR x4, [sp, #(4 * 16)]
2106
2107 */
2108 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
2109 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
2110
2111 /* Save PC (tracepoint address):
2112
2113 MOV x3, #(tpaddr)
2114 ...
2115
2116 STR x3, [sp, #(3 * 16)]
2117
2118 */
2119
2120 p += emit_mov_addr (p, x3, tpaddr);
2121 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
2122
2123 /* Save CPSR (NZCV), FPSR and FPCR:
2124
2125 MRS x2, nzcv
2126 MRS x1, fpsr
2127 MRS x0, fpcr
2128
2129 STR x2, [sp, #(2 * 16)]
2130 STR x1, [sp, #(1 * 16)]
2131 STR x0, [sp, #(0 * 16)]
2132
2133 */
2134 p += emit_mrs (p, x2, NZCV);
2135 p += emit_mrs (p, x1, FPSR);
2136 p += emit_mrs (p, x0, FPCR);
2137 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2138 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2139 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2140
2141 /* Push the collecting_t object. It consist of the address of the
2142 tracepoint and an ID for the current thread. We get the latter by
2143 reading the tpidr_el0 system register. It corresponds to the
2144 NT_ARM_TLS register accessible with ptrace.
2145
2146 MOV x0, #(tpoint)
2147 ...
2148
2149 MRS x1, tpidr_el0
2150
2151 STP x0, x1, [sp, #-16]!
2152
2153 */
2154
2155 p += emit_mov_addr (p, x0, tpoint);
2156 p += emit_mrs (p, x1, TPIDR_EL0);
2157 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2158
2159 /* Spin-lock:
2160
2161 The shared memory for the lock is at lockaddr. It will hold zero
2162 if no-one is holding the lock, otherwise it contains the address of
2163 the collecting_t object on the stack of the thread which acquired it.
2164
2165 At this stage, the stack pointer points to this thread's collecting_t
2166 object.
2167
2168 We use the following registers:
2169 - x0: Address of the lock.
2170 - x1: Pointer to collecting_t object.
2171 - x2: Scratch register.
2172
2173 MOV x0, #(lockaddr)
2174 ...
2175 MOV x1, sp
2176
2177 ; Trigger an event local to this core. So the following WFE
2178 ; instruction is ignored.
2179 SEVL
2180 again:
2181 ; Wait for an event. The event is triggered by either the SEVL
2182 ; or STLR instructions (store release).
2183 WFE
2184
2185 ; Atomically read at lockaddr. This marks the memory location as
2186 ; exclusive. This instruction also has memory constraints which
2187 ; make sure all previous data reads and writes are done before
2188 ; executing it.
2189 LDAXR x2, [x0]
2190
2191 ; Try again if another thread holds the lock.
2192 CBNZ x2, again
2193
2194 ; We can lock it! Write the address of the collecting_t object.
2195 ; This instruction will fail if the memory location is not marked
2196 ; as exclusive anymore. If it succeeds, it will remove the
2197 ; exclusive mark on the memory location. This way, if another
2198 ; thread executes this instruction before us, we will fail and try
2199 ; all over again.
2200 STXR w2, x1, [x0]
2201 CBNZ w2, again
2202
2203 */
2204
2205 p += emit_mov_addr (p, x0, lockaddr);
2206 p += emit_mov (p, x1, register_operand (sp));
2207
2208 p += emit_sevl (p);
2209 p += emit_wfe (p);
2210 p += emit_ldaxr (p, x2, x0);
2211 p += emit_cb (p, 1, w2, -2 * 4);
2212 p += emit_stxr (p, w2, x1, x0);
2213 p += emit_cb (p, 1, x2, -4 * 4);
2214
2215 /* Call collector (struct tracepoint *, unsigned char *):
2216
2217 MOV x0, #(tpoint)
2218 ...
2219
2220 ; Saved registers start after the collecting_t object.
2221 ADD x1, sp, #16
2222
2223 ; We use an intra-procedure-call scratch register.
2224 MOV ip0, #(collector)
2225 ...
2226
2227 ; And call back to C!
2228 BLR ip0
2229
2230 */
2231
2232 p += emit_mov_addr (p, x0, tpoint);
2233 p += emit_add (p, x1, sp, immediate_operand (16));
2234
2235 p += emit_mov_addr (p, ip0, collector);
2236 p += emit_blr (p, ip0);
2237
2238 /* Release the lock.
2239
2240 MOV x0, #(lockaddr)
2241 ...
2242
2243 ; This instruction is a normal store with memory ordering
2244 ; constraints. Thanks to this we do not have to put a data
2245 ; barrier instruction to make sure all data read and writes are done
30baf67b 2246 ; before this instruction is executed. Furthermore, this instruction
bb903df0
PL
2247 ; will trigger an event, letting other threads know they can grab
2248 ; the lock.
2249 STLR xzr, [x0]
2250
2251 */
2252 p += emit_mov_addr (p, x0, lockaddr);
2253 p += emit_stlr (p, xzr, x0);
2254
2255 /* Free collecting_t object:
2256
2257 ADD sp, sp, #16
2258
2259 */
2260 p += emit_add (p, sp, sp, immediate_operand (16));
2261
2262 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2263 registers from the stack.
2264
2265 LDR x2, [sp, #(2 * 16)]
2266 LDR x1, [sp, #(1 * 16)]
2267 LDR x0, [sp, #(0 * 16)]
2268
2269 MSR NZCV, x2
2270 MSR FPSR, x1
2271 MSR FPCR, x0
2272
2273 ADD sp, sp #(5 * 16)
2274
2275 */
2276 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2277 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2278 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2279 p += emit_msr (p, NZCV, x2);
2280 p += emit_msr (p, FPSR, x1);
2281 p += emit_msr (p, FPCR, x0);
2282
2283 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2284
2285 /* Pop general purpose registers:
2286
2287 LDR x0, [sp]
2288 ...
2289 LDR x30, [sp, #(30 * 16)]
2290
2291 ADD sp, sp, #(31 * 16)
2292
2293 */
2294 for (i = 0; i <= 30; i += 1)
2295 p += emit_ldr (p, aarch64_register (i, 1), sp,
2296 offset_memory_operand (i * 16));
2297 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2298
2299 /* Pop SIMD&FP registers:
2300
2301 LDP q0, q1, [sp]
2302 ...
2303 LDP q30, q31, [sp, #(30 * 16)]
2304
2305 ADD sp, sp, #(32 * 16)
2306
2307 */
2308 for (i = 0; i <= 30; i += 2)
2309 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2310 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2311
2312 /* Write the code into the inferior memory. */
2313 append_insns (&buildaddr, p - buf, buf);
2314
2315 /* Now emit the relocated instruction. */
2316 *adjusted_insn_addr = buildaddr;
70b439f0 2317 target_read_uint32 (tpaddr, &insn);
0badd99f
YQ
2318
2319 insn_data.base.insn_addr = tpaddr;
2320 insn_data.new_addr = buildaddr;
2321 insn_data.insn_ptr = buf;
2322
2323 aarch64_relocate_instruction (insn, &visitor,
2324 (struct aarch64_insn_data *) &insn_data);
2325
bb903df0 2326 /* We may not have been able to relocate the instruction. */
0badd99f 2327 if (insn_data.insn_ptr == buf)
bb903df0
PL
2328 {
2329 sprintf (err,
2330 "E.Could not relocate instruction from %s to %s.",
2331 core_addr_to_string_nz (tpaddr),
2332 core_addr_to_string_nz (buildaddr));
2333 return 1;
2334 }
dfaffe9d 2335 else
0badd99f 2336 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
dfaffe9d 2337 *adjusted_insn_addr_end = buildaddr;
bb903df0
PL
2338
2339 /* Go back to the start of the buffer. */
2340 p = buf;
2341
2342 /* Emit a branch back from the jump pad. */
2343 offset = (tpaddr + orig_size - buildaddr);
2344 if (!can_encode_int32 (offset, 28))
2345 {
2346 sprintf (err,
2347 "E.Jump back from jump pad too far from tracepoint "
2ac09a5b 2348 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2349 offset);
2350 return 1;
2351 }
2352
2353 p += emit_b (p, 0, offset);
2354 append_insns (&buildaddr, p - buf, buf);
2355
2356 /* Give the caller a branch instruction into the jump pad. */
2357 offset = (*jump_entry - tpaddr);
2358 if (!can_encode_int32 (offset, 28))
2359 {
2360 sprintf (err,
2361 "E.Jump pad too far from tracepoint "
2ac09a5b 2362 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2363 offset);
2364 return 1;
2365 }
2366
2367 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2368 *jjump_pad_insn_size = 4;
2369
2370 /* Return the end address of our pad. */
2371 *jump_entry = buildaddr;
2372
2373 return 0;
2374}
2375
afbe19f8
PL
2376/* Helper function writing LEN instructions from START into
2377 current_insn_ptr. */
2378
2379static void
2380emit_ops_insns (const uint32_t *start, int len)
2381{
2382 CORE_ADDR buildaddr = current_insn_ptr;
2383
2384 if (debug_threads)
2385 debug_printf ("Adding %d instrucions at %s\n",
2386 len, paddress (buildaddr));
2387
2388 append_insns (&buildaddr, len, start);
2389 current_insn_ptr = buildaddr;
2390}
2391
2392/* Pop a register from the stack. */
2393
2394static int
2395emit_pop (uint32_t *buf, struct aarch64_register rt)
2396{
2397 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2398}
2399
2400/* Push a register on the stack. */
2401
2402static int
2403emit_push (uint32_t *buf, struct aarch64_register rt)
2404{
2405 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2406}
2407
2408/* Implementation of emit_ops method "emit_prologue". */
2409
2410static void
2411aarch64_emit_prologue (void)
2412{
2413 uint32_t buf[16];
2414 uint32_t *p = buf;
2415
2416 /* This function emit a prologue for the following function prototype:
2417
2418 enum eval_result_type f (unsigned char *regs,
2419 ULONGEST *value);
2420
2421 The first argument is a buffer of raw registers. The second
2422 argument is the result of
2423 evaluating the expression, which will be set to whatever is on top of
2424 the stack at the end.
2425
2426 The stack set up by the prologue is as such:
2427
2428 High *------------------------------------------------------*
2429 | LR |
2430 | FP | <- FP
2431 | x1 (ULONGEST *value) |
2432 | x0 (unsigned char *regs) |
2433 Low *------------------------------------------------------*
2434
2435 As we are implementing a stack machine, each opcode can expand the
2436 stack so we never know how far we are from the data saved by this
2437 prologue. In order to be able refer to value and regs later, we save
2438 the current stack pointer in the frame pointer. This way, it is not
2439 clobbered when calling C functions.
2440
30baf67b 2441 Finally, throughout every operation, we are using register x0 as the
afbe19f8
PL
2442 top of the stack, and x1 as a scratch register. */
2443
2444 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2445 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2446 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2447
2448 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2449
2450
2451 emit_ops_insns (buf, p - buf);
2452}
2453
2454/* Implementation of emit_ops method "emit_epilogue". */
2455
2456static void
2457aarch64_emit_epilogue (void)
2458{
2459 uint32_t buf[16];
2460 uint32_t *p = buf;
2461
2462 /* Store the result of the expression (x0) in *value. */
2463 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2464 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2465 p += emit_str (p, x0, x1, offset_memory_operand (0));
2466
2467 /* Restore the previous state. */
2468 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2469 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2470
2471 /* Return expr_eval_no_error. */
2472 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2473 p += emit_ret (p, lr);
2474
2475 emit_ops_insns (buf, p - buf);
2476}
2477
2478/* Implementation of emit_ops method "emit_add". */
2479
2480static void
2481aarch64_emit_add (void)
2482{
2483 uint32_t buf[16];
2484 uint32_t *p = buf;
2485
2486 p += emit_pop (p, x1);
45e3745e 2487 p += emit_add (p, x0, x1, register_operand (x0));
afbe19f8
PL
2488
2489 emit_ops_insns (buf, p - buf);
2490}
2491
2492/* Implementation of emit_ops method "emit_sub". */
2493
2494static void
2495aarch64_emit_sub (void)
2496{
2497 uint32_t buf[16];
2498 uint32_t *p = buf;
2499
2500 p += emit_pop (p, x1);
45e3745e 2501 p += emit_sub (p, x0, x1, register_operand (x0));
afbe19f8
PL
2502
2503 emit_ops_insns (buf, p - buf);
2504}
2505
2506/* Implementation of emit_ops method "emit_mul". */
2507
2508static void
2509aarch64_emit_mul (void)
2510{
2511 uint32_t buf[16];
2512 uint32_t *p = buf;
2513
2514 p += emit_pop (p, x1);
2515 p += emit_mul (p, x0, x1, x0);
2516
2517 emit_ops_insns (buf, p - buf);
2518}
2519
2520/* Implementation of emit_ops method "emit_lsh". */
2521
2522static void
2523aarch64_emit_lsh (void)
2524{
2525 uint32_t buf[16];
2526 uint32_t *p = buf;
2527
2528 p += emit_pop (p, x1);
2529 p += emit_lslv (p, x0, x1, x0);
2530
2531 emit_ops_insns (buf, p - buf);
2532}
2533
2534/* Implementation of emit_ops method "emit_rsh_signed". */
2535
2536static void
2537aarch64_emit_rsh_signed (void)
2538{
2539 uint32_t buf[16];
2540 uint32_t *p = buf;
2541
2542 p += emit_pop (p, x1);
2543 p += emit_asrv (p, x0, x1, x0);
2544
2545 emit_ops_insns (buf, p - buf);
2546}
2547
2548/* Implementation of emit_ops method "emit_rsh_unsigned". */
2549
2550static void
2551aarch64_emit_rsh_unsigned (void)
2552{
2553 uint32_t buf[16];
2554 uint32_t *p = buf;
2555
2556 p += emit_pop (p, x1);
2557 p += emit_lsrv (p, x0, x1, x0);
2558
2559 emit_ops_insns (buf, p - buf);
2560}
2561
2562/* Implementation of emit_ops method "emit_ext". */
2563
2564static void
2565aarch64_emit_ext (int arg)
2566{
2567 uint32_t buf[16];
2568 uint32_t *p = buf;
2569
2570 p += emit_sbfx (p, x0, x0, 0, arg);
2571
2572 emit_ops_insns (buf, p - buf);
2573}
2574
2575/* Implementation of emit_ops method "emit_log_not". */
2576
2577static void
2578aarch64_emit_log_not (void)
2579{
2580 uint32_t buf[16];
2581 uint32_t *p = buf;
2582
2583 /* If the top of the stack is 0, replace it with 1. Else replace it with
2584 0. */
2585
2586 p += emit_cmp (p, x0, immediate_operand (0));
2587 p += emit_cset (p, x0, EQ);
2588
2589 emit_ops_insns (buf, p - buf);
2590}
2591
2592/* Implementation of emit_ops method "emit_bit_and". */
2593
2594static void
2595aarch64_emit_bit_and (void)
2596{
2597 uint32_t buf[16];
2598 uint32_t *p = buf;
2599
2600 p += emit_pop (p, x1);
2601 p += emit_and (p, x0, x0, x1);
2602
2603 emit_ops_insns (buf, p - buf);
2604}
2605
2606/* Implementation of emit_ops method "emit_bit_or". */
2607
2608static void
2609aarch64_emit_bit_or (void)
2610{
2611 uint32_t buf[16];
2612 uint32_t *p = buf;
2613
2614 p += emit_pop (p, x1);
2615 p += emit_orr (p, x0, x0, x1);
2616
2617 emit_ops_insns (buf, p - buf);
2618}
2619
2620/* Implementation of emit_ops method "emit_bit_xor". */
2621
2622static void
2623aarch64_emit_bit_xor (void)
2624{
2625 uint32_t buf[16];
2626 uint32_t *p = buf;
2627
2628 p += emit_pop (p, x1);
2629 p += emit_eor (p, x0, x0, x1);
2630
2631 emit_ops_insns (buf, p - buf);
2632}
2633
2634/* Implementation of emit_ops method "emit_bit_not". */
2635
2636static void
2637aarch64_emit_bit_not (void)
2638{
2639 uint32_t buf[16];
2640 uint32_t *p = buf;
2641
2642 p += emit_mvn (p, x0, x0);
2643
2644 emit_ops_insns (buf, p - buf);
2645}
2646
2647/* Implementation of emit_ops method "emit_equal". */
2648
2649static void
2650aarch64_emit_equal (void)
2651{
2652 uint32_t buf[16];
2653 uint32_t *p = buf;
2654
2655 p += emit_pop (p, x1);
2656 p += emit_cmp (p, x0, register_operand (x1));
2657 p += emit_cset (p, x0, EQ);
2658
2659 emit_ops_insns (buf, p - buf);
2660}
2661
2662/* Implementation of emit_ops method "emit_less_signed". */
2663
2664static void
2665aarch64_emit_less_signed (void)
2666{
2667 uint32_t buf[16];
2668 uint32_t *p = buf;
2669
2670 p += emit_pop (p, x1);
2671 p += emit_cmp (p, x1, register_operand (x0));
2672 p += emit_cset (p, x0, LT);
2673
2674 emit_ops_insns (buf, p - buf);
2675}
2676
2677/* Implementation of emit_ops method "emit_less_unsigned". */
2678
2679static void
2680aarch64_emit_less_unsigned (void)
2681{
2682 uint32_t buf[16];
2683 uint32_t *p = buf;
2684
2685 p += emit_pop (p, x1);
2686 p += emit_cmp (p, x1, register_operand (x0));
2687 p += emit_cset (p, x0, LO);
2688
2689 emit_ops_insns (buf, p - buf);
2690}
2691
2692/* Implementation of emit_ops method "emit_ref". */
2693
2694static void
2695aarch64_emit_ref (int size)
2696{
2697 uint32_t buf[16];
2698 uint32_t *p = buf;
2699
2700 switch (size)
2701 {
2702 case 1:
2703 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2704 break;
2705 case 2:
2706 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2707 break;
2708 case 4:
2709 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2710 break;
2711 case 8:
2712 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2713 break;
2714 default:
2715 /* Unknown size, bail on compilation. */
2716 emit_error = 1;
2717 break;
2718 }
2719
2720 emit_ops_insns (buf, p - buf);
2721}
2722
2723/* Implementation of emit_ops method "emit_if_goto". */
2724
2725static void
2726aarch64_emit_if_goto (int *offset_p, int *size_p)
2727{
2728 uint32_t buf[16];
2729 uint32_t *p = buf;
2730
2731 /* The Z flag is set or cleared here. */
2732 p += emit_cmp (p, x0, immediate_operand (0));
2733 /* This instruction must not change the Z flag. */
2734 p += emit_pop (p, x0);
2735 /* Branch over the next instruction if x0 == 0. */
2736 p += emit_bcond (p, EQ, 8);
2737
2738 /* The NOP instruction will be patched with an unconditional branch. */
2739 if (offset_p)
2740 *offset_p = (p - buf) * 4;
2741 if (size_p)
2742 *size_p = 4;
2743 p += emit_nop (p);
2744
2745 emit_ops_insns (buf, p - buf);
2746}
2747
2748/* Implementation of emit_ops method "emit_goto". */
2749
2750static void
2751aarch64_emit_goto (int *offset_p, int *size_p)
2752{
2753 uint32_t buf[16];
2754 uint32_t *p = buf;
2755
2756 /* The NOP instruction will be patched with an unconditional branch. */
2757 if (offset_p)
2758 *offset_p = 0;
2759 if (size_p)
2760 *size_p = 4;
2761 p += emit_nop (p);
2762
2763 emit_ops_insns (buf, p - buf);
2764}
2765
2766/* Implementation of emit_ops method "write_goto_address". */
2767
bb1183e2 2768static void
afbe19f8
PL
2769aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2770{
2771 uint32_t insn;
2772
2773 emit_b (&insn, 0, to - from);
2774 append_insns (&from, 1, &insn);
2775}
2776
2777/* Implementation of emit_ops method "emit_const". */
2778
2779static void
2780aarch64_emit_const (LONGEST num)
2781{
2782 uint32_t buf[16];
2783 uint32_t *p = buf;
2784
2785 p += emit_mov_addr (p, x0, num);
2786
2787 emit_ops_insns (buf, p - buf);
2788}
2789
2790/* Implementation of emit_ops method "emit_call". */
2791
2792static void
2793aarch64_emit_call (CORE_ADDR fn)
2794{
2795 uint32_t buf[16];
2796 uint32_t *p = buf;
2797
2798 p += emit_mov_addr (p, ip0, fn);
2799 p += emit_blr (p, ip0);
2800
2801 emit_ops_insns (buf, p - buf);
2802}
2803
2804/* Implementation of emit_ops method "emit_reg". */
2805
2806static void
2807aarch64_emit_reg (int reg)
2808{
2809 uint32_t buf[16];
2810 uint32_t *p = buf;
2811
2812 /* Set x0 to unsigned char *regs. */
2813 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2814 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2815 p += emit_mov (p, x1, immediate_operand (reg));
2816
2817 emit_ops_insns (buf, p - buf);
2818
2819 aarch64_emit_call (get_raw_reg_func_addr ());
2820}
2821
2822/* Implementation of emit_ops method "emit_pop". */
2823
2824static void
2825aarch64_emit_pop (void)
2826{
2827 uint32_t buf[16];
2828 uint32_t *p = buf;
2829
2830 p += emit_pop (p, x0);
2831
2832 emit_ops_insns (buf, p - buf);
2833}
2834
2835/* Implementation of emit_ops method "emit_stack_flush". */
2836
2837static void
2838aarch64_emit_stack_flush (void)
2839{
2840 uint32_t buf[16];
2841 uint32_t *p = buf;
2842
2843 p += emit_push (p, x0);
2844
2845 emit_ops_insns (buf, p - buf);
2846}
2847
2848/* Implementation of emit_ops method "emit_zero_ext". */
2849
2850static void
2851aarch64_emit_zero_ext (int arg)
2852{
2853 uint32_t buf[16];
2854 uint32_t *p = buf;
2855
2856 p += emit_ubfx (p, x0, x0, 0, arg);
2857
2858 emit_ops_insns (buf, p - buf);
2859}
2860
2861/* Implementation of emit_ops method "emit_swap". */
2862
2863static void
2864aarch64_emit_swap (void)
2865{
2866 uint32_t buf[16];
2867 uint32_t *p = buf;
2868
2869 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2870 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2871 p += emit_mov (p, x0, register_operand (x1));
2872
2873 emit_ops_insns (buf, p - buf);
2874}
2875
2876/* Implementation of emit_ops method "emit_stack_adjust". */
2877
2878static void
2879aarch64_emit_stack_adjust (int n)
2880{
2881 /* This is not needed with our design. */
2882 uint32_t buf[16];
2883 uint32_t *p = buf;
2884
2885 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2886
2887 emit_ops_insns (buf, p - buf);
2888}
2889
2890/* Implementation of emit_ops method "emit_int_call_1". */
2891
2892static void
2893aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2894{
2895 uint32_t buf[16];
2896 uint32_t *p = buf;
2897
2898 p += emit_mov (p, x0, immediate_operand (arg1));
2899
2900 emit_ops_insns (buf, p - buf);
2901
2902 aarch64_emit_call (fn);
2903}
2904
2905/* Implementation of emit_ops method "emit_void_call_2". */
2906
2907static void
2908aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2909{
2910 uint32_t buf[16];
2911 uint32_t *p = buf;
2912
2913 /* Push x0 on the stack. */
2914 aarch64_emit_stack_flush ();
2915
2916 /* Setup arguments for the function call:
2917
2918 x0: arg1
2919 x1: top of the stack
2920
2921 MOV x1, x0
2922 MOV x0, #arg1 */
2923
2924 p += emit_mov (p, x1, register_operand (x0));
2925 p += emit_mov (p, x0, immediate_operand (arg1));
2926
2927 emit_ops_insns (buf, p - buf);
2928
2929 aarch64_emit_call (fn);
2930
2931 /* Restore x0. */
2932 aarch64_emit_pop ();
2933}
2934
2935/* Implementation of emit_ops method "emit_eq_goto". */
2936
2937static void
2938aarch64_emit_eq_goto (int *offset_p, int *size_p)
2939{
2940 uint32_t buf[16];
2941 uint32_t *p = buf;
2942
2943 p += emit_pop (p, x1);
2944 p += emit_cmp (p, x1, register_operand (x0));
2945 /* Branch over the next instruction if x0 != x1. */
2946 p += emit_bcond (p, NE, 8);
2947 /* The NOP instruction will be patched with an unconditional branch. */
2948 if (offset_p)
2949 *offset_p = (p - buf) * 4;
2950 if (size_p)
2951 *size_p = 4;
2952 p += emit_nop (p);
2953
2954 emit_ops_insns (buf, p - buf);
2955}
2956
2957/* Implementation of emit_ops method "emit_ne_goto". */
2958
2959static void
2960aarch64_emit_ne_goto (int *offset_p, int *size_p)
2961{
2962 uint32_t buf[16];
2963 uint32_t *p = buf;
2964
2965 p += emit_pop (p, x1);
2966 p += emit_cmp (p, x1, register_operand (x0));
2967 /* Branch over the next instruction if x0 == x1. */
2968 p += emit_bcond (p, EQ, 8);
2969 /* The NOP instruction will be patched with an unconditional branch. */
2970 if (offset_p)
2971 *offset_p = (p - buf) * 4;
2972 if (size_p)
2973 *size_p = 4;
2974 p += emit_nop (p);
2975
2976 emit_ops_insns (buf, p - buf);
2977}
2978
2979/* Implementation of emit_ops method "emit_lt_goto". */
2980
2981static void
2982aarch64_emit_lt_goto (int *offset_p, int *size_p)
2983{
2984 uint32_t buf[16];
2985 uint32_t *p = buf;
2986
2987 p += emit_pop (p, x1);
2988 p += emit_cmp (p, x1, register_operand (x0));
2989 /* Branch over the next instruction if x0 >= x1. */
2990 p += emit_bcond (p, GE, 8);
2991 /* The NOP instruction will be patched with an unconditional branch. */
2992 if (offset_p)
2993 *offset_p = (p - buf) * 4;
2994 if (size_p)
2995 *size_p = 4;
2996 p += emit_nop (p);
2997
2998 emit_ops_insns (buf, p - buf);
2999}
3000
3001/* Implementation of emit_ops method "emit_le_goto". */
3002
3003static void
3004aarch64_emit_le_goto (int *offset_p, int *size_p)
3005{
3006 uint32_t buf[16];
3007 uint32_t *p = buf;
3008
3009 p += emit_pop (p, x1);
3010 p += emit_cmp (p, x1, register_operand (x0));
3011 /* Branch over the next instruction if x0 > x1. */
3012 p += emit_bcond (p, GT, 8);
3013 /* The NOP instruction will be patched with an unconditional branch. */
3014 if (offset_p)
3015 *offset_p = (p - buf) * 4;
3016 if (size_p)
3017 *size_p = 4;
3018 p += emit_nop (p);
3019
3020 emit_ops_insns (buf, p - buf);
3021}
3022
3023/* Implementation of emit_ops method "emit_gt_goto". */
3024
3025static void
3026aarch64_emit_gt_goto (int *offset_p, int *size_p)
3027{
3028 uint32_t buf[16];
3029 uint32_t *p = buf;
3030
3031 p += emit_pop (p, x1);
3032 p += emit_cmp (p, x1, register_operand (x0));
3033 /* Branch over the next instruction if x0 <= x1. */
3034 p += emit_bcond (p, LE, 8);
3035 /* The NOP instruction will be patched with an unconditional branch. */
3036 if (offset_p)
3037 *offset_p = (p - buf) * 4;
3038 if (size_p)
3039 *size_p = 4;
3040 p += emit_nop (p);
3041
3042 emit_ops_insns (buf, p - buf);
3043}
3044
3045/* Implementation of emit_ops method "emit_ge_got". */
3046
3047static void
3048aarch64_emit_ge_got (int *offset_p, int *size_p)
3049{
3050 uint32_t buf[16];
3051 uint32_t *p = buf;
3052
3053 p += emit_pop (p, x1);
3054 p += emit_cmp (p, x1, register_operand (x0));
3055 /* Branch over the next instruction if x0 <= x1. */
3056 p += emit_bcond (p, LT, 8);
3057 /* The NOP instruction will be patched with an unconditional branch. */
3058 if (offset_p)
3059 *offset_p = (p - buf) * 4;
3060 if (size_p)
3061 *size_p = 4;
3062 p += emit_nop (p);
3063
3064 emit_ops_insns (buf, p - buf);
3065}
3066
3067static struct emit_ops aarch64_emit_ops_impl =
3068{
3069 aarch64_emit_prologue,
3070 aarch64_emit_epilogue,
3071 aarch64_emit_add,
3072 aarch64_emit_sub,
3073 aarch64_emit_mul,
3074 aarch64_emit_lsh,
3075 aarch64_emit_rsh_signed,
3076 aarch64_emit_rsh_unsigned,
3077 aarch64_emit_ext,
3078 aarch64_emit_log_not,
3079 aarch64_emit_bit_and,
3080 aarch64_emit_bit_or,
3081 aarch64_emit_bit_xor,
3082 aarch64_emit_bit_not,
3083 aarch64_emit_equal,
3084 aarch64_emit_less_signed,
3085 aarch64_emit_less_unsigned,
3086 aarch64_emit_ref,
3087 aarch64_emit_if_goto,
3088 aarch64_emit_goto,
3089 aarch64_write_goto_address,
3090 aarch64_emit_const,
3091 aarch64_emit_call,
3092 aarch64_emit_reg,
3093 aarch64_emit_pop,
3094 aarch64_emit_stack_flush,
3095 aarch64_emit_zero_ext,
3096 aarch64_emit_swap,
3097 aarch64_emit_stack_adjust,
3098 aarch64_emit_int_call_1,
3099 aarch64_emit_void_call_2,
3100 aarch64_emit_eq_goto,
3101 aarch64_emit_ne_goto,
3102 aarch64_emit_lt_goto,
3103 aarch64_emit_le_goto,
3104 aarch64_emit_gt_goto,
3105 aarch64_emit_ge_got,
3106};
3107
ab64c999 3108/* Implementation of target ops method "emit_ops". */
afbe19f8 3109
ab64c999
TBA
3110emit_ops *
3111aarch64_target::emit_ops ()
afbe19f8
PL
3112{
3113 return &aarch64_emit_ops_impl;
3114}
3115
809a0c35 3116/* Implementation of target ops method
bb903df0
PL
3117 "get_min_fast_tracepoint_insn_len". */
3118
809a0c35
TBA
3119int
3120aarch64_target::get_min_fast_tracepoint_insn_len ()
bb903df0
PL
3121{
3122 return 4;
3123}
3124
9cfd8715 3125/* Implementation of linux target ops method "low_supports_range_stepping". */
d1d0aea1 3126
9cfd8715
TBA
3127bool
3128aarch64_target::low_supports_range_stepping ()
d1d0aea1 3129{
9cfd8715 3130 return true;
d1d0aea1
PL
3131}
3132
3ca4edb6 3133/* Implementation of target ops method "sw_breakpoint_from_kind". */
dd373349 3134
3ca4edb6
TBA
3135const gdb_byte *
3136aarch64_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349 3137{
17b1509a
YQ
3138 if (is_64bit_tdesc ())
3139 {
3140 *size = aarch64_breakpoint_len;
3141 return aarch64_breakpoint;
3142 }
3143 else
3144 return arm_sw_breakpoint_from_kind (kind, size);
3145}
3146
06250e4e 3147/* Implementation of target ops method "breakpoint_kind_from_pc". */
17b1509a 3148
06250e4e
TBA
3149int
3150aarch64_target::breakpoint_kind_from_pc (CORE_ADDR *pcptr)
17b1509a
YQ
3151{
3152 if (is_64bit_tdesc ())
3153 return aarch64_breakpoint_len;
3154 else
3155 return arm_breakpoint_kind_from_pc (pcptr);
3156}
3157
06250e4e 3158/* Implementation of the target ops method
17b1509a
YQ
3159 "breakpoint_kind_from_current_state". */
3160
06250e4e
TBA
3161int
3162aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
17b1509a
YQ
3163{
3164 if (is_64bit_tdesc ())
3165 return aarch64_breakpoint_len;
3166 else
3167 return arm_breakpoint_kind_from_current_state (pcptr);
dd373349
AT
3168}
3169
ef0478f6
TBA
3170/* The linux target ops object. */
3171
3172linux_process_target *the_linux_target = &the_aarch64_target;
3173
3aee8918
PA
3174void
3175initialize_low_arch (void)
3176{
3b53ae99
YQ
3177 initialize_low_arch_aarch32 ();
3178
3aee8918 3179 initialize_regsets_info (&aarch64_regsets_info);
02895270 3180 initialize_regsets_info (&aarch64_sve_regsets_info);
3aee8918 3181}
This page took 0.743933 seconds and 4 git commands to generate.