gdbserver/linux-low: turn 'supports_hardware_single_step' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-aarch64-low.cc
CommitLineData
176eb98c
MS
1/* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
b811d2c2 4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
176eb98c
MS
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "server.h"
23#include "linux-low.h"
db3cb7cb 24#include "nat/aarch64-linux.h"
554717a3 25#include "nat/aarch64-linux-hw-point.h"
bb903df0 26#include "arch/aarch64-insn.h"
3b53ae99 27#include "linux-aarch32-low.h"
176eb98c 28#include "elf/common.h"
afbe19f8
PL
29#include "ax.h"
30#include "tracepoint.h"
f9d949fb 31#include "debug.h"
176eb98c
MS
32
33#include <signal.h>
34#include <sys/user.h>
5826e159 35#include "nat/gdb_ptrace.h"
e9dae05e 36#include <asm/ptrace.h>
bb903df0
PL
37#include <inttypes.h>
38#include <endian.h>
39#include <sys/uio.h>
176eb98c
MS
40
41#include "gdb_proc_service.h"
cc628f3d 42#include "arch/aarch64.h"
7cc17433 43#include "linux-aarch32-tdesc.h"
d6d7ce56 44#include "linux-aarch64-tdesc.h"
fefa175e 45#include "nat/aarch64-sve-linux-ptrace.h"
02895270 46#include "tdesc.h"
176eb98c 47
176eb98c
MS
48#ifdef HAVE_SYS_REG_H
49#include <sys/reg.h>
50#endif
51
ef0478f6
TBA
52/* Linux target op definitions for the AArch64 architecture. */
53
54class aarch64_target : public linux_process_target
55{
56public:
57
aa8d21c9
TBA
58 const regs_info *get_regs_info () override;
59
06250e4e
TBA
60 int breakpoint_kind_from_pc (CORE_ADDR *pcptr) override;
61
62 int breakpoint_kind_from_current_state (CORE_ADDR *pcptr) override;
63
3ca4edb6
TBA
64 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
65
007c9b97
TBA
66 bool supports_z_point_type (char z_type) override;
67
47f70aa7
TBA
68 bool supports_tracepoints () override;
69
809a0c35
TBA
70 bool supports_fast_tracepoints () override;
71
72 int install_fast_tracepoint_jump_pad
73 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
74 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
75 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
76 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
77 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
78 char *err) override;
79
80 int get_min_fast_tracepoint_insn_len () override;
81
ab64c999
TBA
82 struct emit_ops *emit_ops () override;
83
797bcff5
TBA
84protected:
85
86 void low_arch_setup () override;
daca57a7
TBA
87
88 bool low_cannot_fetch_register (int regno) override;
89
90 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
91
92 bool low_supports_breakpoints () override;
93
94 CORE_ADDR low_get_pc (regcache *regcache) override;
95
96 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
d7146cda
TBA
97
98 bool low_breakpoint_at (CORE_ADDR pc) override;
9db9aa23
TBA
99
100 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
101 int size, raw_breakpoint *bp) override;
102
103 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
104 int size, raw_breakpoint *bp) override;
ac1bbaca
TBA
105
106 bool low_stopped_by_watchpoint () override;
107
108 CORE_ADDR low_stopped_data_address () override;
cb63de7c
TBA
109
110 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
111 int direction) override;
fd000fb3
TBA
112
113 arch_process_info *low_new_process () override;
114
115 void low_delete_process (arch_process_info *info) override;
116
117 void low_new_thread (lwp_info *) override;
118
119 void low_delete_thread (arch_lwp_info *) override;
120
121 void low_new_fork (process_info *parent, process_info *child) override;
d7599cc0
TBA
122
123 void low_prepare_to_resume (lwp_info *lwp) override;
13e567af
TBA
124
125 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
9cfd8715
TBA
126
127 bool low_supports_range_stepping () override;
ef0478f6
TBA
128};
129
130/* The singleton target ops object. */
131
132static aarch64_target the_aarch64_target;
133
daca57a7
TBA
134bool
135aarch64_target::low_cannot_fetch_register (int regno)
136{
137 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
138 "is not implemented by the target");
139}
140
141bool
142aarch64_target::low_cannot_store_register (int regno)
143{
144 gdb_assert_not_reached ("linux target op low_cannot_store_register "
145 "is not implemented by the target");
146}
147
d7599cc0
TBA
148void
149aarch64_target::low_prepare_to_resume (lwp_info *lwp)
150{
151 aarch64_linux_prepare_to_resume (lwp);
152}
153
176eb98c
MS
154/* Per-process arch-specific data we want to keep. */
155
156struct arch_process_info
157{
158 /* Hardware breakpoint/watchpoint data.
159 The reason for them to be per-process rather than per-thread is
160 due to the lack of information in the gdbserver environment;
161 gdbserver is not told that whether a requested hardware
162 breakpoint/watchpoint is thread specific or not, so it has to set
163 each hw bp/wp for every thread in the current process. The
164 higher level bp/wp management in gdb will resume a thread if a hw
165 bp/wp trap is not expected for it. Since the hw bp/wp setting is
166 same for each thread, it is reasonable for the data to live here.
167 */
168 struct aarch64_debug_reg_state debug_reg_state;
169};
170
3b53ae99
YQ
171/* Return true if the size of register 0 is 8 byte. */
172
173static int
174is_64bit_tdesc (void)
175{
176 struct regcache *regcache = get_thread_regcache (current_thread, 0);
177
178 return register_size (regcache->tdesc, 0) == 8;
179}
180
02895270
AH
181/* Return true if the regcache contains the number of SVE registers. */
182
183static bool
184is_sve_tdesc (void)
185{
186 struct regcache *regcache = get_thread_regcache (current_thread, 0);
187
6cdd651f 188 return tdesc_contains_feature (regcache->tdesc, "org.gnu.gdb.aarch64.sve");
02895270
AH
189}
190
176eb98c
MS
191static void
192aarch64_fill_gregset (struct regcache *regcache, void *buf)
193{
6a69a054 194 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
176eb98c
MS
195 int i;
196
197 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
198 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
199 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
200 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
201 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
202}
203
204static void
205aarch64_store_gregset (struct regcache *regcache, const void *buf)
206{
6a69a054 207 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
176eb98c
MS
208 int i;
209
210 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
211 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
212 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
213 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
214 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
215}
216
217static void
218aarch64_fill_fpregset (struct regcache *regcache, void *buf)
219{
9caa3311 220 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
176eb98c
MS
221 int i;
222
223 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
224 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
225 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
226 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
227}
228
229static void
230aarch64_store_fpregset (struct regcache *regcache, const void *buf)
231{
9caa3311
YQ
232 const struct user_fpsimd_state *regset
233 = (const struct user_fpsimd_state *) buf;
176eb98c
MS
234 int i;
235
236 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
237 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
238 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
239 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
240}
241
1ef53e6b
AH
242/* Store the pauth registers to regcache. */
243
244static void
245aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
246{
247 uint64_t *pauth_regset = (uint64_t *) buf;
248 int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
249
250 if (pauth_base == 0)
251 return;
252
253 supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
254 &pauth_regset[0]);
255 supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
256 &pauth_regset[1]);
257}
258
bf9ae9d8
TBA
259bool
260aarch64_target::low_supports_breakpoints ()
261{
262 return true;
263}
264
265/* Implementation of linux target ops method "low_get_pc". */
421530db 266
bf9ae9d8
TBA
267CORE_ADDR
268aarch64_target::low_get_pc (regcache *regcache)
176eb98c 269{
8a7e4587 270 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 271 return linux_get_pc_64bit (regcache);
8a7e4587 272 else
a5652c21 273 return linux_get_pc_32bit (regcache);
176eb98c
MS
274}
275
bf9ae9d8 276/* Implementation of linux target ops method "low_set_pc". */
421530db 277
bf9ae9d8
TBA
278void
279aarch64_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
176eb98c 280{
8a7e4587 281 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 282 linux_set_pc_64bit (regcache, pc);
8a7e4587 283 else
a5652c21 284 linux_set_pc_32bit (regcache, pc);
176eb98c
MS
285}
286
176eb98c
MS
287#define aarch64_breakpoint_len 4
288
37d66942
PL
289/* AArch64 BRK software debug mode instruction.
290 This instruction needs to match gdb/aarch64-tdep.c
291 (aarch64_default_breakpoint). */
292static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
176eb98c 293
d7146cda 294/* Implementation of linux target ops method "low_breakpoint_at". */
421530db 295
d7146cda
TBA
296bool
297aarch64_target::low_breakpoint_at (CORE_ADDR where)
176eb98c 298{
db91f502
YQ
299 if (is_64bit_tdesc ())
300 {
301 gdb_byte insn[aarch64_breakpoint_len];
176eb98c 302
d7146cda 303 read_memory (where, (unsigned char *) &insn, aarch64_breakpoint_len);
db91f502 304 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
d7146cda 305 return true;
176eb98c 306
d7146cda 307 return false;
db91f502
YQ
308 }
309 else
310 return arm_breakpoint_at (where);
176eb98c
MS
311}
312
176eb98c
MS
313static void
314aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
315{
316 int i;
317
318 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
319 {
320 state->dr_addr_bp[i] = 0;
321 state->dr_ctrl_bp[i] = 0;
322 state->dr_ref_count_bp[i] = 0;
323 }
324
325 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
326 {
327 state->dr_addr_wp[i] = 0;
328 state->dr_ctrl_wp[i] = 0;
329 state->dr_ref_count_wp[i] = 0;
330 }
331}
332
176eb98c
MS
333/* Return the pointer to the debug register state structure in the
334 current process' arch-specific data area. */
335
db3cb7cb 336struct aarch64_debug_reg_state *
88e2cf7e 337aarch64_get_debug_reg_state (pid_t pid)
176eb98c 338{
88e2cf7e 339 struct process_info *proc = find_process_pid (pid);
176eb98c 340
fe978cb0 341 return &proc->priv->arch_private->debug_reg_state;
176eb98c
MS
342}
343
007c9b97 344/* Implementation of target ops method "supports_z_point_type". */
421530db 345
007c9b97
TBA
346bool
347aarch64_target::supports_z_point_type (char z_type)
4ff0d3d8
PA
348{
349 switch (z_type)
350 {
96c97461 351 case Z_PACKET_SW_BP:
4ff0d3d8
PA
352 case Z_PACKET_HW_BP:
353 case Z_PACKET_WRITE_WP:
354 case Z_PACKET_READ_WP:
355 case Z_PACKET_ACCESS_WP:
007c9b97 356 return true;
4ff0d3d8 357 default:
007c9b97 358 return false;
4ff0d3d8
PA
359 }
360}
361
9db9aa23 362/* Implementation of linux target ops method "low_insert_point".
176eb98c 363
421530db
PL
364 It actually only records the info of the to-be-inserted bp/wp;
365 the actual insertion will happen when threads are resumed. */
176eb98c 366
9db9aa23
TBA
367int
368aarch64_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
369 int len, raw_breakpoint *bp)
176eb98c
MS
370{
371 int ret;
4ff0d3d8 372 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
373 struct aarch64_debug_reg_state *state
374 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 375
c5e92cca 376 if (show_debug_regs)
176eb98c
MS
377 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
378 (unsigned long) addr, len);
379
802e8e6d
PA
380 /* Determine the type from the raw breakpoint type. */
381 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
382
383 if (targ_type != hw_execute)
39edd165
YQ
384 {
385 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
386 ret = aarch64_handle_watchpoint (targ_type, addr, len,
387 1 /* is_insert */, state);
388 else
389 ret = -1;
390 }
176eb98c 391 else
8d689ee5
YQ
392 {
393 if (len == 3)
394 {
395 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
396 instruction. Set it to 2 to correctly encode length bit
397 mask in hardware/watchpoint control register. */
398 len = 2;
399 }
400 ret = aarch64_handle_breakpoint (targ_type, addr, len,
401 1 /* is_insert */, state);
402 }
176eb98c 403
60a191ed 404 if (show_debug_regs)
88e2cf7e
YQ
405 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
406 targ_type);
176eb98c
MS
407
408 return ret;
409}
410
9db9aa23 411/* Implementation of linux target ops method "low_remove_point".
176eb98c 412
421530db
PL
413 It actually only records the info of the to-be-removed bp/wp,
414 the actual removal will be done when threads are resumed. */
176eb98c 415
9db9aa23
TBA
416int
417aarch64_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
418 int len, raw_breakpoint *bp)
176eb98c
MS
419{
420 int ret;
4ff0d3d8 421 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
422 struct aarch64_debug_reg_state *state
423 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 424
c5e92cca 425 if (show_debug_regs)
176eb98c
MS
426 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
427 (unsigned long) addr, len);
428
802e8e6d
PA
429 /* Determine the type from the raw breakpoint type. */
430 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
431
432 /* Set up state pointers. */
433 if (targ_type != hw_execute)
434 ret =
c67ca4de
YQ
435 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
436 state);
176eb98c 437 else
8d689ee5
YQ
438 {
439 if (len == 3)
440 {
441 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
442 instruction. Set it to 2 to correctly encode length bit
443 mask in hardware/watchpoint control register. */
444 len = 2;
445 }
446 ret = aarch64_handle_breakpoint (targ_type, addr, len,
447 0 /* is_insert */, state);
448 }
176eb98c 449
60a191ed 450 if (show_debug_regs)
88e2cf7e
YQ
451 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
452 targ_type);
176eb98c
MS
453
454 return ret;
455}
456
ac1bbaca 457/* Implementation of linux target ops method "low_stopped_data_address". */
176eb98c 458
ac1bbaca
TBA
459CORE_ADDR
460aarch64_target::low_stopped_data_address ()
176eb98c
MS
461{
462 siginfo_t siginfo;
463 int pid, i;
464 struct aarch64_debug_reg_state *state;
465
0bfdf32f 466 pid = lwpid_of (current_thread);
176eb98c
MS
467
468 /* Get the siginfo. */
469 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
470 return (CORE_ADDR) 0;
471
472 /* Need to be a hardware breakpoint/watchpoint trap. */
473 if (siginfo.si_signo != SIGTRAP
474 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
475 return (CORE_ADDR) 0;
476
477 /* Check if the address matches any watched address. */
88e2cf7e 478 state = aarch64_get_debug_reg_state (pid_of (current_thread));
176eb98c
MS
479 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
480 {
a3b60e45
JK
481 const unsigned int offset
482 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
176eb98c
MS
483 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
484 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
a3b60e45
JK
485 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
486 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
487 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
488
176eb98c
MS
489 if (state->dr_ref_count_wp[i]
490 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
a3b60e45 491 && addr_trap >= addr_watch_aligned
176eb98c 492 && addr_trap < addr_watch + len)
a3b60e45
JK
493 {
494 /* ADDR_TRAP reports the first address of the memory range
495 accessed by the CPU, regardless of what was the memory
496 range watched. Thus, a large CPU access that straddles
497 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
498 ADDR_TRAP that is lower than the
499 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
500
501 addr: | 4 | 5 | 6 | 7 | 8 |
502 |---- range watched ----|
503 |----------- range accessed ------------|
504
505 In this case, ADDR_TRAP will be 4.
506
507 To match a watchpoint known to GDB core, we must never
508 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
509 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
510 positive on kernels older than 4.10. See PR
511 external/20207. */
512 return addr_orig;
513 }
176eb98c
MS
514 }
515
516 return (CORE_ADDR) 0;
517}
518
ac1bbaca 519/* Implementation of linux target ops method "low_stopped_by_watchpoint". */
176eb98c 520
ac1bbaca
TBA
521bool
522aarch64_target::low_stopped_by_watchpoint ()
176eb98c 523{
ac1bbaca 524 return (low_stopped_data_address () != 0);
176eb98c
MS
525}
526
527/* Fetch the thread-local storage pointer for libthread_db. */
528
529ps_err_e
754653a7 530ps_get_thread_area (struct ps_prochandle *ph,
176eb98c
MS
531 lwpid_t lwpid, int idx, void **base)
532{
a0cc84cd
YQ
533 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
534 is_64bit_tdesc ());
176eb98c
MS
535}
536
cb63de7c 537/* Implementation of linux target ops method "low_siginfo_fixup". */
ade90bde 538
cb63de7c
TBA
539bool
540aarch64_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
541 int direction)
ade90bde
YQ
542{
543 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
544 if (!is_64bit_tdesc ())
545 {
546 if (direction == 0)
547 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
548 native);
549 else
550 aarch64_siginfo_from_compat_siginfo (native,
551 (struct compat_siginfo *) inf);
552
cb63de7c 553 return true;
ade90bde
YQ
554 }
555
cb63de7c 556 return false;
ade90bde
YQ
557}
558
fd000fb3 559/* Implementation of linux target ops method "low_new_process". */
176eb98c 560
fd000fb3
TBA
561arch_process_info *
562aarch64_target::low_new_process ()
176eb98c 563{
8d749320 564 struct arch_process_info *info = XCNEW (struct arch_process_info);
176eb98c
MS
565
566 aarch64_init_debug_reg_state (&info->debug_reg_state);
567
568 return info;
569}
570
fd000fb3 571/* Implementation of linux target ops method "low_delete_process". */
04ec7890 572
fd000fb3
TBA
573void
574aarch64_target::low_delete_process (arch_process_info *info)
04ec7890
SM
575{
576 xfree (info);
577}
578
fd000fb3
TBA
579void
580aarch64_target::low_new_thread (lwp_info *lwp)
581{
582 aarch64_linux_new_thread (lwp);
583}
421530db 584
fd000fb3
TBA
585void
586aarch64_target::low_delete_thread (arch_lwp_info *arch_lwp)
587{
588 aarch64_linux_delete_thread (arch_lwp);
589}
590
591/* Implementation of linux target ops method "low_new_fork". */
592
593void
594aarch64_target::low_new_fork (process_info *parent,
595 process_info *child)
3a8a0396
DB
596{
597 /* These are allocated by linux_add_process. */
61a7418c
DB
598 gdb_assert (parent->priv != NULL
599 && parent->priv->arch_private != NULL);
600 gdb_assert (child->priv != NULL
601 && child->priv->arch_private != NULL);
3a8a0396
DB
602
603 /* Linux kernel before 2.6.33 commit
604 72f674d203cd230426437cdcf7dd6f681dad8b0d
605 will inherit hardware debug registers from parent
606 on fork/vfork/clone. Newer Linux kernels create such tasks with
607 zeroed debug registers.
608
609 GDB core assumes the child inherits the watchpoints/hw
610 breakpoints of the parent, and will remove them all from the
611 forked off process. Copy the debug registers mirrors into the
612 new process so that all breakpoints and watchpoints can be
613 removed together. The debug registers mirror will become zeroed
614 in the end before detaching the forked off process, thus making
615 this compatible with older Linux kernels too. */
616
61a7418c 617 *child->priv->arch_private = *parent->priv->arch_private;
3a8a0396
DB
618}
619
ee4fbcfa
AH
620/* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
621#define AARCH64_HWCAP_PACA (1 << 30)
622
797bcff5 623/* Implementation of linux target ops method "low_arch_setup". */
3b53ae99 624
797bcff5
TBA
625void
626aarch64_target::low_arch_setup ()
3b53ae99
YQ
627{
628 unsigned int machine;
629 int is_elf64;
630 int tid;
631
632 tid = lwpid_of (current_thread);
633
634 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
635
636 if (is_elf64)
fefa175e
AH
637 {
638 uint64_t vq = aarch64_sve_get_vq (tid);
974c89e0
AH
639 unsigned long hwcap = linux_get_hwcap (8);
640 bool pauth_p = hwcap & AARCH64_HWCAP_PACA;
ee4fbcfa
AH
641
642 current_process ()->tdesc = aarch64_linux_read_description (vq, pauth_p);
fefa175e 643 }
3b53ae99 644 else
7cc17433 645 current_process ()->tdesc = aarch32_linux_read_description ();
176eb98c 646
af1b22f3 647 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
176eb98c
MS
648}
649
02895270
AH
650/* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
651
652static void
653aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
654{
655 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
656}
657
658/* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
659
660static void
661aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
662{
663 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
664}
665
3aee8918 666static struct regset_info aarch64_regsets[] =
176eb98c
MS
667{
668 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
669 sizeof (struct user_pt_regs), GENERAL_REGS,
670 aarch64_fill_gregset, aarch64_store_gregset },
671 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
672 sizeof (struct user_fpsimd_state), FP_REGS,
673 aarch64_fill_fpregset, aarch64_store_fpregset
674 },
1ef53e6b
AH
675 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
676 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
677 NULL, aarch64_store_pauthregset },
50bc912a 678 NULL_REGSET
176eb98c
MS
679};
680
3aee8918
PA
681static struct regsets_info aarch64_regsets_info =
682 {
683 aarch64_regsets, /* regsets */
684 0, /* num_regsets */
685 NULL, /* disabled_regsets */
686 };
687
3b53ae99 688static struct regs_info regs_info_aarch64 =
3aee8918
PA
689 {
690 NULL, /* regset_bitmap */
c2d65f38 691 NULL, /* usrregs */
3aee8918
PA
692 &aarch64_regsets_info,
693 };
694
02895270
AH
695static struct regset_info aarch64_sve_regsets[] =
696{
697 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
698 sizeof (struct user_pt_regs), GENERAL_REGS,
699 aarch64_fill_gregset, aarch64_store_gregset },
700 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
701 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
702 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
703 },
1ef53e6b
AH
704 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
705 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
706 NULL, aarch64_store_pauthregset },
02895270
AH
707 NULL_REGSET
708};
709
710static struct regsets_info aarch64_sve_regsets_info =
711 {
712 aarch64_sve_regsets, /* regsets. */
713 0, /* num_regsets. */
714 NULL, /* disabled_regsets. */
715 };
716
717static struct regs_info regs_info_aarch64_sve =
718 {
719 NULL, /* regset_bitmap. */
720 NULL, /* usrregs. */
721 &aarch64_sve_regsets_info,
722 };
723
aa8d21c9 724/* Implementation of linux target ops method "get_regs_info". */
421530db 725
aa8d21c9
TBA
726const regs_info *
727aarch64_target::get_regs_info ()
3aee8918 728{
02895270 729 if (!is_64bit_tdesc ())
3b53ae99 730 return &regs_info_aarch32;
02895270
AH
731
732 if (is_sve_tdesc ())
733 return &regs_info_aarch64_sve;
734
735 return &regs_info_aarch64;
3aee8918
PA
736}
737
47f70aa7 738/* Implementation of target ops method "supports_tracepoints". */
7671bf47 739
47f70aa7
TBA
740bool
741aarch64_target::supports_tracepoints ()
7671bf47 742{
524b57e6 743 if (current_thread == NULL)
47f70aa7 744 return true;
524b57e6
YQ
745 else
746 {
747 /* We don't support tracepoints on aarch32 now. */
748 return is_64bit_tdesc ();
749 }
7671bf47
PL
750}
751
13e567af 752/* Implementation of linux target ops method "low_get_thread_area". */
bb903df0 753
13e567af
TBA
754int
755aarch64_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
bb903df0
PL
756{
757 struct iovec iovec;
758 uint64_t reg;
759
760 iovec.iov_base = &reg;
761 iovec.iov_len = sizeof (reg);
762
763 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
764 return -1;
765
766 *addrp = reg;
767
768 return 0;
769}
770
061fc021
YQ
771/* Implementation of linux_target_ops method "get_syscall_trapinfo". */
772
773static void
774aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
775{
776 int use_64bit = register_size (regcache->tdesc, 0) == 8;
777
778 if (use_64bit)
779 {
780 long l_sysno;
781
782 collect_register_by_name (regcache, "x8", &l_sysno);
783 *sysno = (int) l_sysno;
784 }
785 else
786 collect_register_by_name (regcache, "r7", sysno);
787}
788
afbe19f8
PL
789/* List of condition codes that we need. */
790
791enum aarch64_condition_codes
792{
793 EQ = 0x0,
794 NE = 0x1,
795 LO = 0x3,
796 GE = 0xa,
797 LT = 0xb,
798 GT = 0xc,
799 LE = 0xd,
bb903df0
PL
800};
801
6c1c9a8b
YQ
802enum aarch64_operand_type
803{
804 OPERAND_IMMEDIATE,
805 OPERAND_REGISTER,
806};
807
bb903df0
PL
808/* Representation of an operand. At this time, it only supports register
809 and immediate types. */
810
811struct aarch64_operand
812{
813 /* Type of the operand. */
6c1c9a8b
YQ
814 enum aarch64_operand_type type;
815
bb903df0
PL
816 /* Value of the operand according to the type. */
817 union
818 {
819 uint32_t imm;
820 struct aarch64_register reg;
821 };
822};
823
824/* List of registers that we are currently using, we can add more here as
825 we need to use them. */
826
827/* General purpose scratch registers (64 bit). */
828static const struct aarch64_register x0 = { 0, 1 };
829static const struct aarch64_register x1 = { 1, 1 };
830static const struct aarch64_register x2 = { 2, 1 };
831static const struct aarch64_register x3 = { 3, 1 };
832static const struct aarch64_register x4 = { 4, 1 };
833
834/* General purpose scratch registers (32 bit). */
afbe19f8 835static const struct aarch64_register w0 = { 0, 0 };
bb903df0
PL
836static const struct aarch64_register w2 = { 2, 0 };
837
838/* Intra-procedure scratch registers. */
839static const struct aarch64_register ip0 = { 16, 1 };
840
841/* Special purpose registers. */
afbe19f8
PL
842static const struct aarch64_register fp = { 29, 1 };
843static const struct aarch64_register lr = { 30, 1 };
bb903df0
PL
844static const struct aarch64_register sp = { 31, 1 };
845static const struct aarch64_register xzr = { 31, 1 };
846
847/* Dynamically allocate a new register. If we know the register
848 statically, we should make it a global as above instead of using this
849 helper function. */
850
851static struct aarch64_register
852aarch64_register (unsigned num, int is64)
853{
854 return (struct aarch64_register) { num, is64 };
855}
856
857/* Helper function to create a register operand, for instructions with
858 different types of operands.
859
860 For example:
861 p += emit_mov (p, x0, register_operand (x1)); */
862
863static struct aarch64_operand
864register_operand (struct aarch64_register reg)
865{
866 struct aarch64_operand operand;
867
868 operand.type = OPERAND_REGISTER;
869 operand.reg = reg;
870
871 return operand;
872}
873
874/* Helper function to create an immediate operand, for instructions with
875 different types of operands.
876
877 For example:
878 p += emit_mov (p, x0, immediate_operand (12)); */
879
880static struct aarch64_operand
881immediate_operand (uint32_t imm)
882{
883 struct aarch64_operand operand;
884
885 operand.type = OPERAND_IMMEDIATE;
886 operand.imm = imm;
887
888 return operand;
889}
890
bb903df0
PL
891/* Helper function to create an offset memory operand.
892
893 For example:
894 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
895
896static struct aarch64_memory_operand
897offset_memory_operand (int32_t offset)
898{
899 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
900}
901
902/* Helper function to create a pre-index memory operand.
903
904 For example:
905 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
906
907static struct aarch64_memory_operand
908preindex_memory_operand (int32_t index)
909{
910 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
911}
912
afbe19f8
PL
913/* Helper function to create a post-index memory operand.
914
915 For example:
916 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
917
918static struct aarch64_memory_operand
919postindex_memory_operand (int32_t index)
920{
921 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
922}
923
bb903df0
PL
924/* System control registers. These special registers can be written and
925 read with the MRS and MSR instructions.
926
927 - NZCV: Condition flags. GDB refers to this register under the CPSR
928 name.
929 - FPSR: Floating-point status register.
930 - FPCR: Floating-point control registers.
931 - TPIDR_EL0: Software thread ID register. */
932
933enum aarch64_system_control_registers
934{
935 /* op0 op1 crn crm op2 */
936 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
937 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
938 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
939 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
940};
941
bb903df0
PL
942/* Write a BLR instruction into *BUF.
943
944 BLR rn
945
946 RN is the register to branch to. */
947
948static int
949emit_blr (uint32_t *buf, struct aarch64_register rn)
950{
e1c587c3 951 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
bb903df0
PL
952}
953
afbe19f8 954/* Write a RET instruction into *BUF.
bb903df0 955
afbe19f8 956 RET xn
bb903df0 957
afbe19f8 958 RN is the register to branch to. */
bb903df0
PL
959
960static int
afbe19f8
PL
961emit_ret (uint32_t *buf, struct aarch64_register rn)
962{
e1c587c3 963 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
afbe19f8
PL
964}
965
966static int
967emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
968 struct aarch64_register rt,
969 struct aarch64_register rt2,
970 struct aarch64_register rn,
971 struct aarch64_memory_operand operand)
bb903df0
PL
972{
973 uint32_t opc;
974 uint32_t pre_index;
975 uint32_t write_back;
976
977 if (rt.is64)
978 opc = ENCODE (2, 2, 30);
979 else
980 opc = ENCODE (0, 2, 30);
981
982 switch (operand.type)
983 {
984 case MEMORY_OPERAND_OFFSET:
985 {
986 pre_index = ENCODE (1, 1, 24);
987 write_back = ENCODE (0, 1, 23);
988 break;
989 }
afbe19f8
PL
990 case MEMORY_OPERAND_POSTINDEX:
991 {
992 pre_index = ENCODE (0, 1, 24);
993 write_back = ENCODE (1, 1, 23);
994 break;
995 }
bb903df0
PL
996 case MEMORY_OPERAND_PREINDEX:
997 {
998 pre_index = ENCODE (1, 1, 24);
999 write_back = ENCODE (1, 1, 23);
1000 break;
1001 }
1002 default:
1003 return 0;
1004 }
1005
e1c587c3
YQ
1006 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
1007 | ENCODE (operand.index >> 3, 7, 15)
1008 | ENCODE (rt2.num, 5, 10)
1009 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
1010}
1011
afbe19f8
PL
1012/* Write a STP instruction into *BUF.
1013
1014 STP rt, rt2, [rn, #offset]
1015 STP rt, rt2, [rn, #index]!
1016 STP rt, rt2, [rn], #index
1017
1018 RT and RT2 are the registers to store.
1019 RN is the base address register.
1020 OFFSET is the immediate to add to the base address. It is limited to a
1021 -512 .. 504 range (7 bits << 3). */
1022
1023static int
1024emit_stp (uint32_t *buf, struct aarch64_register rt,
1025 struct aarch64_register rt2, struct aarch64_register rn,
1026 struct aarch64_memory_operand operand)
1027{
1028 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
1029}
1030
1031/* Write a LDP instruction into *BUF.
1032
1033 LDP rt, rt2, [rn, #offset]
1034 LDP rt, rt2, [rn, #index]!
1035 LDP rt, rt2, [rn], #index
1036
1037 RT and RT2 are the registers to store.
1038 RN is the base address register.
1039 OFFSET is the immediate to add to the base address. It is limited to a
1040 -512 .. 504 range (7 bits << 3). */
1041
1042static int
1043emit_ldp (uint32_t *buf, struct aarch64_register rt,
1044 struct aarch64_register rt2, struct aarch64_register rn,
1045 struct aarch64_memory_operand operand)
1046{
1047 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
1048}
1049
bb903df0
PL
1050/* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
1051
1052 LDP qt, qt2, [rn, #offset]
1053
1054 RT and RT2 are the Q registers to store.
1055 RN is the base address register.
1056 OFFSET is the immediate to add to the base address. It is limited to
1057 -1024 .. 1008 range (7 bits << 4). */
1058
1059static int
1060emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1061 struct aarch64_register rn, int32_t offset)
1062{
1063 uint32_t opc = ENCODE (2, 2, 30);
1064 uint32_t pre_index = ENCODE (1, 1, 24);
1065
e1c587c3
YQ
1066 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
1067 | ENCODE (offset >> 4, 7, 15)
1068 | ENCODE (rt2, 5, 10)
1069 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1070}
1071
1072/* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1073
1074 STP qt, qt2, [rn, #offset]
1075
1076 RT and RT2 are the Q registers to store.
1077 RN is the base address register.
1078 OFFSET is the immediate to add to the base address. It is limited to
1079 -1024 .. 1008 range (7 bits << 4). */
1080
1081static int
1082emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1083 struct aarch64_register rn, int32_t offset)
1084{
1085 uint32_t opc = ENCODE (2, 2, 30);
1086 uint32_t pre_index = ENCODE (1, 1, 24);
1087
e1c587c3 1088 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
b6542f81
YQ
1089 | ENCODE (offset >> 4, 7, 15)
1090 | ENCODE (rt2, 5, 10)
1091 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1092}
1093
afbe19f8
PL
1094/* Write a LDRH instruction into *BUF.
1095
1096 LDRH wt, [xn, #offset]
1097 LDRH wt, [xn, #index]!
1098 LDRH wt, [xn], #index
1099
1100 RT is the register to store.
1101 RN is the base address register.
1102 OFFSET is the immediate to add to the base address. It is limited to
1103 0 .. 32760 range (12 bits << 3). */
1104
1105static int
1106emit_ldrh (uint32_t *buf, struct aarch64_register rt,
1107 struct aarch64_register rn,
1108 struct aarch64_memory_operand operand)
1109{
1c2e1515 1110 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
afbe19f8
PL
1111}
1112
1113/* Write a LDRB instruction into *BUF.
1114
1115 LDRB wt, [xn, #offset]
1116 LDRB wt, [xn, #index]!
1117 LDRB wt, [xn], #index
1118
1119 RT is the register to store.
1120 RN is the base address register.
1121 OFFSET is the immediate to add to the base address. It is limited to
1122 0 .. 32760 range (12 bits << 3). */
1123
1124static int
1125emit_ldrb (uint32_t *buf, struct aarch64_register rt,
1126 struct aarch64_register rn,
1127 struct aarch64_memory_operand operand)
1128{
1c2e1515 1129 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
afbe19f8
PL
1130}
1131
bb903df0 1132
bb903df0
PL
1133
1134/* Write a STR instruction into *BUF.
1135
1136 STR rt, [rn, #offset]
1137 STR rt, [rn, #index]!
afbe19f8 1138 STR rt, [rn], #index
bb903df0
PL
1139
1140 RT is the register to store.
1141 RN is the base address register.
1142 OFFSET is the immediate to add to the base address. It is limited to
1143 0 .. 32760 range (12 bits << 3). */
1144
1145static int
1146emit_str (uint32_t *buf, struct aarch64_register rt,
1147 struct aarch64_register rn,
1148 struct aarch64_memory_operand operand)
1149{
1c2e1515 1150 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
bb903df0
PL
1151}
1152
1153/* Helper function emitting an exclusive load or store instruction. */
1154
1155static int
1156emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1157 enum aarch64_opcodes opcode,
1158 struct aarch64_register rs,
1159 struct aarch64_register rt,
1160 struct aarch64_register rt2,
1161 struct aarch64_register rn)
1162{
e1c587c3
YQ
1163 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1164 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1165 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
1166}
1167
1168/* Write a LAXR instruction into *BUF.
1169
1170 LDAXR rt, [xn]
1171
1172 RT is the destination register.
1173 RN is the base address register. */
1174
1175static int
1176emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1177 struct aarch64_register rn)
1178{
1179 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1180 xzr, rn);
1181}
1182
1183/* Write a STXR instruction into *BUF.
1184
1185 STXR ws, rt, [xn]
1186
1187 RS is the result register, it indicates if the store succeeded or not.
1188 RT is the destination register.
1189 RN is the base address register. */
1190
1191static int
1192emit_stxr (uint32_t *buf, struct aarch64_register rs,
1193 struct aarch64_register rt, struct aarch64_register rn)
1194{
1195 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1196 xzr, rn);
1197}
1198
1199/* Write a STLR instruction into *BUF.
1200
1201 STLR rt, [xn]
1202
1203 RT is the register to store.
1204 RN is the base address register. */
1205
1206static int
1207emit_stlr (uint32_t *buf, struct aarch64_register rt,
1208 struct aarch64_register rn)
1209{
1210 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1211 xzr, rn);
1212}
1213
1214/* Helper function for data processing instructions with register sources. */
1215
1216static int
231c0592 1217emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
bb903df0
PL
1218 struct aarch64_register rd,
1219 struct aarch64_register rn,
1220 struct aarch64_register rm)
1221{
1222 uint32_t size = ENCODE (rd.is64, 1, 31);
1223
e1c587c3
YQ
1224 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1225 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1226}
1227
1228/* Helper function for data processing instructions taking either a register
1229 or an immediate. */
1230
1231static int
1232emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1233 struct aarch64_register rd,
1234 struct aarch64_register rn,
1235 struct aarch64_operand operand)
1236{
1237 uint32_t size = ENCODE (rd.is64, 1, 31);
1238 /* The opcode is different for register and immediate source operands. */
1239 uint32_t operand_opcode;
1240
1241 if (operand.type == OPERAND_IMMEDIATE)
1242 {
1243 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1244 operand_opcode = ENCODE (8, 4, 25);
1245
e1c587c3
YQ
1246 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1247 | ENCODE (operand.imm, 12, 10)
1248 | ENCODE (rn.num, 5, 5)
1249 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1250 }
1251 else
1252 {
1253 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1254 operand_opcode = ENCODE (5, 4, 25);
1255
1256 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1257 rn, operand.reg);
1258 }
1259}
1260
1261/* Write an ADD instruction into *BUF.
1262
1263 ADD rd, rn, #imm
1264 ADD rd, rn, rm
1265
1266 This function handles both an immediate and register add.
1267
1268 RD is the destination register.
1269 RN is the input register.
1270 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1271 OPERAND_REGISTER. */
1272
1273static int
1274emit_add (uint32_t *buf, struct aarch64_register rd,
1275 struct aarch64_register rn, struct aarch64_operand operand)
1276{
1277 return emit_data_processing (buf, ADD, rd, rn, operand);
1278}
1279
1280/* Write a SUB instruction into *BUF.
1281
1282 SUB rd, rn, #imm
1283 SUB rd, rn, rm
1284
1285 This function handles both an immediate and register sub.
1286
1287 RD is the destination register.
1288 RN is the input register.
1289 IMM is the immediate to substract to RN. */
1290
1291static int
1292emit_sub (uint32_t *buf, struct aarch64_register rd,
1293 struct aarch64_register rn, struct aarch64_operand operand)
1294{
1295 return emit_data_processing (buf, SUB, rd, rn, operand);
1296}
1297
1298/* Write a MOV instruction into *BUF.
1299
1300 MOV rd, #imm
1301 MOV rd, rm
1302
1303 This function handles both a wide immediate move and a register move,
1304 with the condition that the source register is not xzr. xzr and the
1305 stack pointer share the same encoding and this function only supports
1306 the stack pointer.
1307
1308 RD is the destination register.
1309 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1310 OPERAND_REGISTER. */
1311
1312static int
1313emit_mov (uint32_t *buf, struct aarch64_register rd,
1314 struct aarch64_operand operand)
1315{
1316 if (operand.type == OPERAND_IMMEDIATE)
1317 {
1318 uint32_t size = ENCODE (rd.is64, 1, 31);
1319 /* Do not shift the immediate. */
1320 uint32_t shift = ENCODE (0, 2, 21);
1321
e1c587c3
YQ
1322 return aarch64_emit_insn (buf, MOV | size | shift
1323 | ENCODE (operand.imm, 16, 5)
1324 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1325 }
1326 else
1327 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1328}
1329
1330/* Write a MOVK instruction into *BUF.
1331
1332 MOVK rd, #imm, lsl #shift
1333
1334 RD is the destination register.
1335 IMM is the immediate.
1336 SHIFT is the logical shift left to apply to IMM. */
1337
1338static int
7781c06f
YQ
1339emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1340 unsigned shift)
bb903df0
PL
1341{
1342 uint32_t size = ENCODE (rd.is64, 1, 31);
1343
e1c587c3
YQ
1344 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1345 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1346}
1347
1348/* Write instructions into *BUF in order to move ADDR into a register.
1349 ADDR can be a 64-bit value.
1350
1351 This function will emit a series of MOV and MOVK instructions, such as:
1352
1353 MOV xd, #(addr)
1354 MOVK xd, #(addr >> 16), lsl #16
1355 MOVK xd, #(addr >> 32), lsl #32
1356 MOVK xd, #(addr >> 48), lsl #48 */
1357
1358static int
1359emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1360{
1361 uint32_t *p = buf;
1362
1363 /* The MOV (wide immediate) instruction clears to top bits of the
1364 register. */
1365 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1366
1367 if ((addr >> 16) != 0)
1368 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1369 else
1370 return p - buf;
1371
1372 if ((addr >> 32) != 0)
1373 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1374 else
1375 return p - buf;
1376
1377 if ((addr >> 48) != 0)
1378 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1379
1380 return p - buf;
1381}
1382
afbe19f8
PL
1383/* Write a SUBS instruction into *BUF.
1384
1385 SUBS rd, rn, rm
1386
1387 This instruction update the condition flags.
1388
1389 RD is the destination register.
1390 RN and RM are the source registers. */
1391
1392static int
1393emit_subs (uint32_t *buf, struct aarch64_register rd,
1394 struct aarch64_register rn, struct aarch64_operand operand)
1395{
1396 return emit_data_processing (buf, SUBS, rd, rn, operand);
1397}
1398
1399/* Write a CMP instruction into *BUF.
1400
1401 CMP rn, rm
1402
1403 This instruction is an alias of SUBS xzr, rn, rm.
1404
1405 RN and RM are the registers to compare. */
1406
1407static int
1408emit_cmp (uint32_t *buf, struct aarch64_register rn,
1409 struct aarch64_operand operand)
1410{
1411 return emit_subs (buf, xzr, rn, operand);
1412}
1413
1414/* Write a AND instruction into *BUF.
1415
1416 AND rd, rn, rm
1417
1418 RD is the destination register.
1419 RN and RM are the source registers. */
1420
1421static int
1422emit_and (uint32_t *buf, struct aarch64_register rd,
1423 struct aarch64_register rn, struct aarch64_register rm)
1424{
1425 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1426}
1427
1428/* Write a ORR instruction into *BUF.
1429
1430 ORR rd, rn, rm
1431
1432 RD is the destination register.
1433 RN and RM are the source registers. */
1434
1435static int
1436emit_orr (uint32_t *buf, struct aarch64_register rd,
1437 struct aarch64_register rn, struct aarch64_register rm)
1438{
1439 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1440}
1441
1442/* Write a ORN instruction into *BUF.
1443
1444 ORN rd, rn, rm
1445
1446 RD is the destination register.
1447 RN and RM are the source registers. */
1448
1449static int
1450emit_orn (uint32_t *buf, struct aarch64_register rd,
1451 struct aarch64_register rn, struct aarch64_register rm)
1452{
1453 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1454}
1455
1456/* Write a EOR instruction into *BUF.
1457
1458 EOR rd, rn, rm
1459
1460 RD is the destination register.
1461 RN and RM are the source registers. */
1462
1463static int
1464emit_eor (uint32_t *buf, struct aarch64_register rd,
1465 struct aarch64_register rn, struct aarch64_register rm)
1466{
1467 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1468}
1469
1470/* Write a MVN instruction into *BUF.
1471
1472 MVN rd, rm
1473
1474 This is an alias for ORN rd, xzr, rm.
1475
1476 RD is the destination register.
1477 RM is the source register. */
1478
1479static int
1480emit_mvn (uint32_t *buf, struct aarch64_register rd,
1481 struct aarch64_register rm)
1482{
1483 return emit_orn (buf, rd, xzr, rm);
1484}
1485
1486/* Write a LSLV instruction into *BUF.
1487
1488 LSLV rd, rn, rm
1489
1490 RD is the destination register.
1491 RN and RM are the source registers. */
1492
1493static int
1494emit_lslv (uint32_t *buf, struct aarch64_register rd,
1495 struct aarch64_register rn, struct aarch64_register rm)
1496{
1497 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1498}
1499
1500/* Write a LSRV instruction into *BUF.
1501
1502 LSRV rd, rn, rm
1503
1504 RD is the destination register.
1505 RN and RM are the source registers. */
1506
1507static int
1508emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1509 struct aarch64_register rn, struct aarch64_register rm)
1510{
1511 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1512}
1513
1514/* Write a ASRV instruction into *BUF.
1515
1516 ASRV rd, rn, rm
1517
1518 RD is the destination register.
1519 RN and RM are the source registers. */
1520
1521static int
1522emit_asrv (uint32_t *buf, struct aarch64_register rd,
1523 struct aarch64_register rn, struct aarch64_register rm)
1524{
1525 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1526}
1527
1528/* Write a MUL instruction into *BUF.
1529
1530 MUL rd, rn, rm
1531
1532 RD is the destination register.
1533 RN and RM are the source registers. */
1534
1535static int
1536emit_mul (uint32_t *buf, struct aarch64_register rd,
1537 struct aarch64_register rn, struct aarch64_register rm)
1538{
1539 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1540}
1541
bb903df0
PL
1542/* Write a MRS instruction into *BUF. The register size is 64-bit.
1543
1544 MRS xt, system_reg
1545
1546 RT is the destination register.
1547 SYSTEM_REG is special purpose register to read. */
1548
1549static int
1550emit_mrs (uint32_t *buf, struct aarch64_register rt,
1551 enum aarch64_system_control_registers system_reg)
1552{
e1c587c3
YQ
1553 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1554 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1555}
1556
1557/* Write a MSR instruction into *BUF. The register size is 64-bit.
1558
1559 MSR system_reg, xt
1560
1561 SYSTEM_REG is special purpose register to write.
1562 RT is the input register. */
1563
1564static int
1565emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1566 struct aarch64_register rt)
1567{
e1c587c3
YQ
1568 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1569 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1570}
1571
1572/* Write a SEVL instruction into *BUF.
1573
1574 This is a hint instruction telling the hardware to trigger an event. */
1575
1576static int
1577emit_sevl (uint32_t *buf)
1578{
e1c587c3 1579 return aarch64_emit_insn (buf, SEVL);
bb903df0
PL
1580}
1581
1582/* Write a WFE instruction into *BUF.
1583
1584 This is a hint instruction telling the hardware to wait for an event. */
1585
1586static int
1587emit_wfe (uint32_t *buf)
1588{
e1c587c3 1589 return aarch64_emit_insn (buf, WFE);
bb903df0
PL
1590}
1591
afbe19f8
PL
1592/* Write a SBFM instruction into *BUF.
1593
1594 SBFM rd, rn, #immr, #imms
1595
1596 This instruction moves the bits from #immr to #imms into the
1597 destination, sign extending the result.
1598
1599 RD is the destination register.
1600 RN is the source register.
1601 IMMR is the bit number to start at (least significant bit).
1602 IMMS is the bit number to stop at (most significant bit). */
1603
1604static int
1605emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1606 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1607{
1608 uint32_t size = ENCODE (rd.is64, 1, 31);
1609 uint32_t n = ENCODE (rd.is64, 1, 22);
1610
e1c587c3
YQ
1611 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1612 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1613 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1614}
1615
1616/* Write a SBFX instruction into *BUF.
1617
1618 SBFX rd, rn, #lsb, #width
1619
1620 This instruction moves #width bits from #lsb into the destination, sign
1621 extending the result. This is an alias for:
1622
1623 SBFM rd, rn, #lsb, #(lsb + width - 1)
1624
1625 RD is the destination register.
1626 RN is the source register.
1627 LSB is the bit number to start at (least significant bit).
1628 WIDTH is the number of bits to move. */
1629
1630static int
1631emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1632 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1633{
1634 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1635}
1636
1637/* Write a UBFM instruction into *BUF.
1638
1639 UBFM rd, rn, #immr, #imms
1640
1641 This instruction moves the bits from #immr to #imms into the
1642 destination, extending the result with zeros.
1643
1644 RD is the destination register.
1645 RN is the source register.
1646 IMMR is the bit number to start at (least significant bit).
1647 IMMS is the bit number to stop at (most significant bit). */
1648
1649static int
1650emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1651 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1652{
1653 uint32_t size = ENCODE (rd.is64, 1, 31);
1654 uint32_t n = ENCODE (rd.is64, 1, 22);
1655
e1c587c3
YQ
1656 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1657 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1658 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1659}
1660
1661/* Write a UBFX instruction into *BUF.
1662
1663 UBFX rd, rn, #lsb, #width
1664
1665 This instruction moves #width bits from #lsb into the destination,
1666 extending the result with zeros. This is an alias for:
1667
1668 UBFM rd, rn, #lsb, #(lsb + width - 1)
1669
1670 RD is the destination register.
1671 RN is the source register.
1672 LSB is the bit number to start at (least significant bit).
1673 WIDTH is the number of bits to move. */
1674
1675static int
1676emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1677 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1678{
1679 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1680}
1681
1682/* Write a CSINC instruction into *BUF.
1683
1684 CSINC rd, rn, rm, cond
1685
1686 This instruction conditionally increments rn or rm and places the result
1687 in rd. rn is chosen is the condition is true.
1688
1689 RD is the destination register.
1690 RN and RM are the source registers.
1691 COND is the encoded condition. */
1692
1693static int
1694emit_csinc (uint32_t *buf, struct aarch64_register rd,
1695 struct aarch64_register rn, struct aarch64_register rm,
1696 unsigned cond)
1697{
1698 uint32_t size = ENCODE (rd.is64, 1, 31);
1699
e1c587c3
YQ
1700 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1701 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1702 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1703}
1704
1705/* Write a CSET instruction into *BUF.
1706
1707 CSET rd, cond
1708
1709 This instruction conditionally write 1 or 0 in the destination register.
1710 1 is written if the condition is true. This is an alias for:
1711
1712 CSINC rd, xzr, xzr, !cond
1713
1714 Note that the condition needs to be inverted.
1715
1716 RD is the destination register.
1717 RN and RM are the source registers.
1718 COND is the encoded condition. */
1719
1720static int
1721emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1722{
1723 /* The least significant bit of the condition needs toggling in order to
1724 invert it. */
1725 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1726}
1727
bb903df0
PL
1728/* Write LEN instructions from BUF into the inferior memory at *TO.
1729
1730 Note instructions are always little endian on AArch64, unlike data. */
1731
1732static void
1733append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1734{
1735 size_t byte_len = len * sizeof (uint32_t);
1736#if (__BYTE_ORDER == __BIG_ENDIAN)
cb93dc7f 1737 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
bb903df0
PL
1738 size_t i;
1739
1740 for (i = 0; i < len; i++)
1741 le_buf[i] = htole32 (buf[i]);
1742
4196ab2a 1743 target_write_memory (*to, (const unsigned char *) le_buf, byte_len);
bb903df0
PL
1744
1745 xfree (le_buf);
1746#else
4196ab2a 1747 target_write_memory (*to, (const unsigned char *) buf, byte_len);
bb903df0
PL
1748#endif
1749
1750 *to += byte_len;
1751}
1752
0badd99f
YQ
1753/* Sub-class of struct aarch64_insn_data, store information of
1754 instruction relocation for fast tracepoint. Visitor can
1755 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1756 the relocated instructions in buffer pointed by INSN_PTR. */
bb903df0 1757
0badd99f
YQ
1758struct aarch64_insn_relocation_data
1759{
1760 struct aarch64_insn_data base;
1761
1762 /* The new address the instruction is relocated to. */
1763 CORE_ADDR new_addr;
1764 /* Pointer to the buffer of relocated instruction(s). */
1765 uint32_t *insn_ptr;
1766};
1767
1768/* Implementation of aarch64_insn_visitor method "b". */
1769
1770static void
1771aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1772 struct aarch64_insn_data *data)
1773{
1774 struct aarch64_insn_relocation_data *insn_reloc
1775 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1776 int64_t new_offset
0badd99f
YQ
1777 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1778
1779 if (can_encode_int32 (new_offset, 28))
1780 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1781}
1782
1783/* Implementation of aarch64_insn_visitor method "b_cond". */
1784
1785static void
1786aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1787 struct aarch64_insn_data *data)
1788{
1789 struct aarch64_insn_relocation_data *insn_reloc
1790 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1791 int64_t new_offset
0badd99f
YQ
1792 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1793
1794 if (can_encode_int32 (new_offset, 21))
1795 {
1796 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1797 new_offset);
bb903df0 1798 }
0badd99f 1799 else if (can_encode_int32 (new_offset, 28))
bb903df0 1800 {
0badd99f
YQ
1801 /* The offset is out of range for a conditional branch
1802 instruction but not for a unconditional branch. We can use
1803 the following instructions instead:
bb903df0 1804
0badd99f
YQ
1805 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1806 B NOT_TAKEN ; Else jump over TAKEN and continue.
1807 TAKEN:
1808 B #(offset - 8)
1809 NOT_TAKEN:
1810
1811 */
1812
1813 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1814 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1815 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
bb903df0 1816 }
0badd99f 1817}
bb903df0 1818
0badd99f
YQ
1819/* Implementation of aarch64_insn_visitor method "cb". */
1820
1821static void
1822aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1823 const unsigned rn, int is64,
1824 struct aarch64_insn_data *data)
1825{
1826 struct aarch64_insn_relocation_data *insn_reloc
1827 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1828 int64_t new_offset
0badd99f
YQ
1829 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1830
1831 if (can_encode_int32 (new_offset, 21))
1832 {
1833 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1834 aarch64_register (rn, is64), new_offset);
bb903df0 1835 }
0badd99f 1836 else if (can_encode_int32 (new_offset, 28))
bb903df0 1837 {
0badd99f
YQ
1838 /* The offset is out of range for a compare and branch
1839 instruction but not for a unconditional branch. We can use
1840 the following instructions instead:
1841
1842 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1843 B NOT_TAKEN ; Else jump over TAKEN and continue.
1844 TAKEN:
1845 B #(offset - 8)
1846 NOT_TAKEN:
1847
1848 */
1849 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1850 aarch64_register (rn, is64), 8);
1851 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1852 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1853 }
1854}
bb903df0 1855
0badd99f 1856/* Implementation of aarch64_insn_visitor method "tb". */
bb903df0 1857
0badd99f
YQ
1858static void
1859aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1860 const unsigned rt, unsigned bit,
1861 struct aarch64_insn_data *data)
1862{
1863 struct aarch64_insn_relocation_data *insn_reloc
1864 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1865 int64_t new_offset
0badd99f
YQ
1866 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1867
1868 if (can_encode_int32 (new_offset, 16))
1869 {
1870 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1871 aarch64_register (rt, 1), new_offset);
bb903df0 1872 }
0badd99f 1873 else if (can_encode_int32 (new_offset, 28))
bb903df0 1874 {
0badd99f
YQ
1875 /* The offset is out of range for a test bit and branch
1876 instruction but not for a unconditional branch. We can use
1877 the following instructions instead:
1878
1879 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1880 B NOT_TAKEN ; Else jump over TAKEN and continue.
1881 TAKEN:
1882 B #(offset - 8)
1883 NOT_TAKEN:
1884
1885 */
1886 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1887 aarch64_register (rt, 1), 8);
1888 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1889 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1890 new_offset - 8);
1891 }
1892}
bb903df0 1893
0badd99f 1894/* Implementation of aarch64_insn_visitor method "adr". */
bb903df0 1895
0badd99f
YQ
1896static void
1897aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1898 const int is_adrp,
1899 struct aarch64_insn_data *data)
1900{
1901 struct aarch64_insn_relocation_data *insn_reloc
1902 = (struct aarch64_insn_relocation_data *) data;
1903 /* We know exactly the address the ADR{P,} instruction will compute.
1904 We can just write it to the destination register. */
1905 CORE_ADDR address = data->insn_addr + offset;
bb903df0 1906
0badd99f
YQ
1907 if (is_adrp)
1908 {
1909 /* Clear the lower 12 bits of the offset to get the 4K page. */
1910 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1911 aarch64_register (rd, 1),
1912 address & ~0xfff);
1913 }
1914 else
1915 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1916 aarch64_register (rd, 1), address);
1917}
bb903df0 1918
0badd99f 1919/* Implementation of aarch64_insn_visitor method "ldr_literal". */
bb903df0 1920
0badd99f
YQ
1921static void
1922aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1923 const unsigned rt, const int is64,
1924 struct aarch64_insn_data *data)
1925{
1926 struct aarch64_insn_relocation_data *insn_reloc
1927 = (struct aarch64_insn_relocation_data *) data;
1928 CORE_ADDR address = data->insn_addr + offset;
1929
1930 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1931 aarch64_register (rt, 1), address);
1932
1933 /* We know exactly what address to load from, and what register we
1934 can use:
1935
1936 MOV xd, #(oldloc + offset)
1937 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1938 ...
1939
1940 LDR xd, [xd] ; or LDRSW xd, [xd]
1941
1942 */
1943
1944 if (is_sw)
1945 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1946 aarch64_register (rt, 1),
1947 aarch64_register (rt, 1),
1948 offset_memory_operand (0));
bb903df0 1949 else
0badd99f
YQ
1950 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1951 aarch64_register (rt, is64),
1952 aarch64_register (rt, 1),
1953 offset_memory_operand (0));
1954}
1955
1956/* Implementation of aarch64_insn_visitor method "others". */
1957
1958static void
1959aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1960 struct aarch64_insn_data *data)
1961{
1962 struct aarch64_insn_relocation_data *insn_reloc
1963 = (struct aarch64_insn_relocation_data *) data;
bb903df0 1964
0badd99f
YQ
1965 /* The instruction is not PC relative. Just re-emit it at the new
1966 location. */
e1c587c3 1967 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
0badd99f
YQ
1968}
1969
1970static const struct aarch64_insn_visitor visitor =
1971{
1972 aarch64_ftrace_insn_reloc_b,
1973 aarch64_ftrace_insn_reloc_b_cond,
1974 aarch64_ftrace_insn_reloc_cb,
1975 aarch64_ftrace_insn_reloc_tb,
1976 aarch64_ftrace_insn_reloc_adr,
1977 aarch64_ftrace_insn_reloc_ldr_literal,
1978 aarch64_ftrace_insn_reloc_others,
1979};
1980
809a0c35
TBA
1981bool
1982aarch64_target::supports_fast_tracepoints ()
1983{
1984 return true;
1985}
1986
1987/* Implementation of target ops method
bb903df0
PL
1988 "install_fast_tracepoint_jump_pad". */
1989
809a0c35
TBA
1990int
1991aarch64_target::install_fast_tracepoint_jump_pad
1992 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
1993 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
1994 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
1995 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
1996 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
1997 char *err)
bb903df0
PL
1998{
1999 uint32_t buf[256];
2000 uint32_t *p = buf;
2ac09a5b 2001 int64_t offset;
bb903df0 2002 int i;
70b439f0 2003 uint32_t insn;
bb903df0 2004 CORE_ADDR buildaddr = *jump_entry;
0badd99f 2005 struct aarch64_insn_relocation_data insn_data;
bb903df0
PL
2006
2007 /* We need to save the current state on the stack both to restore it
2008 later and to collect register values when the tracepoint is hit.
2009
2010 The saved registers are pushed in a layout that needs to be in sync
2011 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
2012 the supply_fast_tracepoint_registers function will fill in the
2013 register cache from a pointer to saved registers on the stack we build
2014 here.
2015
2016 For simplicity, we set the size of each cell on the stack to 16 bytes.
2017 This way one cell can hold any register type, from system registers
2018 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
2019 has to be 16 bytes aligned anyway.
2020
2021 Note that the CPSR register does not exist on AArch64. Instead we
2022 can access system bits describing the process state with the
2023 MRS/MSR instructions, namely the condition flags. We save them as
2024 if they are part of a CPSR register because that's how GDB
2025 interprets these system bits. At the moment, only the condition
2026 flags are saved in CPSR (NZCV).
2027
2028 Stack layout, each cell is 16 bytes (descending):
2029
2030 High *-------- SIMD&FP registers from 31 down to 0. --------*
2031 | q31 |
2032 . .
2033 . . 32 cells
2034 . .
2035 | q0 |
2036 *---- General purpose registers from 30 down to 0. ----*
2037 | x30 |
2038 . .
2039 . . 31 cells
2040 . .
2041 | x0 |
2042 *------------- Special purpose registers. -------------*
2043 | SP |
2044 | PC |
2045 | CPSR (NZCV) | 5 cells
2046 | FPSR |
2047 | FPCR | <- SP + 16
2048 *------------- collecting_t object --------------------*
2049 | TPIDR_EL0 | struct tracepoint * |
2050 Low *------------------------------------------------------*
2051
2052 After this stack is set up, we issue a call to the collector, passing
2053 it the saved registers at (SP + 16). */
2054
2055 /* Push SIMD&FP registers on the stack:
2056
2057 SUB sp, sp, #(32 * 16)
2058
2059 STP q30, q31, [sp, #(30 * 16)]
2060 ...
2061 STP q0, q1, [sp]
2062
2063 */
2064 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
2065 for (i = 30; i >= 0; i -= 2)
2066 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
2067
30baf67b 2068 /* Push general purpose registers on the stack. Note that we do not need
bb903df0
PL
2069 to push x31 as it represents the xzr register and not the stack
2070 pointer in a STR instruction.
2071
2072 SUB sp, sp, #(31 * 16)
2073
2074 STR x30, [sp, #(30 * 16)]
2075 ...
2076 STR x0, [sp]
2077
2078 */
2079 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
2080 for (i = 30; i >= 0; i -= 1)
2081 p += emit_str (p, aarch64_register (i, 1), sp,
2082 offset_memory_operand (i * 16));
2083
2084 /* Make space for 5 more cells.
2085
2086 SUB sp, sp, #(5 * 16)
2087
2088 */
2089 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
2090
2091
2092 /* Save SP:
2093
2094 ADD x4, sp, #((32 + 31 + 5) * 16)
2095 STR x4, [sp, #(4 * 16)]
2096
2097 */
2098 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
2099 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
2100
2101 /* Save PC (tracepoint address):
2102
2103 MOV x3, #(tpaddr)
2104 ...
2105
2106 STR x3, [sp, #(3 * 16)]
2107
2108 */
2109
2110 p += emit_mov_addr (p, x3, tpaddr);
2111 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
2112
2113 /* Save CPSR (NZCV), FPSR and FPCR:
2114
2115 MRS x2, nzcv
2116 MRS x1, fpsr
2117 MRS x0, fpcr
2118
2119 STR x2, [sp, #(2 * 16)]
2120 STR x1, [sp, #(1 * 16)]
2121 STR x0, [sp, #(0 * 16)]
2122
2123 */
2124 p += emit_mrs (p, x2, NZCV);
2125 p += emit_mrs (p, x1, FPSR);
2126 p += emit_mrs (p, x0, FPCR);
2127 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2128 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2129 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2130
2131 /* Push the collecting_t object. It consist of the address of the
2132 tracepoint and an ID for the current thread. We get the latter by
2133 reading the tpidr_el0 system register. It corresponds to the
2134 NT_ARM_TLS register accessible with ptrace.
2135
2136 MOV x0, #(tpoint)
2137 ...
2138
2139 MRS x1, tpidr_el0
2140
2141 STP x0, x1, [sp, #-16]!
2142
2143 */
2144
2145 p += emit_mov_addr (p, x0, tpoint);
2146 p += emit_mrs (p, x1, TPIDR_EL0);
2147 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2148
2149 /* Spin-lock:
2150
2151 The shared memory for the lock is at lockaddr. It will hold zero
2152 if no-one is holding the lock, otherwise it contains the address of
2153 the collecting_t object on the stack of the thread which acquired it.
2154
2155 At this stage, the stack pointer points to this thread's collecting_t
2156 object.
2157
2158 We use the following registers:
2159 - x0: Address of the lock.
2160 - x1: Pointer to collecting_t object.
2161 - x2: Scratch register.
2162
2163 MOV x0, #(lockaddr)
2164 ...
2165 MOV x1, sp
2166
2167 ; Trigger an event local to this core. So the following WFE
2168 ; instruction is ignored.
2169 SEVL
2170 again:
2171 ; Wait for an event. The event is triggered by either the SEVL
2172 ; or STLR instructions (store release).
2173 WFE
2174
2175 ; Atomically read at lockaddr. This marks the memory location as
2176 ; exclusive. This instruction also has memory constraints which
2177 ; make sure all previous data reads and writes are done before
2178 ; executing it.
2179 LDAXR x2, [x0]
2180
2181 ; Try again if another thread holds the lock.
2182 CBNZ x2, again
2183
2184 ; We can lock it! Write the address of the collecting_t object.
2185 ; This instruction will fail if the memory location is not marked
2186 ; as exclusive anymore. If it succeeds, it will remove the
2187 ; exclusive mark on the memory location. This way, if another
2188 ; thread executes this instruction before us, we will fail and try
2189 ; all over again.
2190 STXR w2, x1, [x0]
2191 CBNZ w2, again
2192
2193 */
2194
2195 p += emit_mov_addr (p, x0, lockaddr);
2196 p += emit_mov (p, x1, register_operand (sp));
2197
2198 p += emit_sevl (p);
2199 p += emit_wfe (p);
2200 p += emit_ldaxr (p, x2, x0);
2201 p += emit_cb (p, 1, w2, -2 * 4);
2202 p += emit_stxr (p, w2, x1, x0);
2203 p += emit_cb (p, 1, x2, -4 * 4);
2204
2205 /* Call collector (struct tracepoint *, unsigned char *):
2206
2207 MOV x0, #(tpoint)
2208 ...
2209
2210 ; Saved registers start after the collecting_t object.
2211 ADD x1, sp, #16
2212
2213 ; We use an intra-procedure-call scratch register.
2214 MOV ip0, #(collector)
2215 ...
2216
2217 ; And call back to C!
2218 BLR ip0
2219
2220 */
2221
2222 p += emit_mov_addr (p, x0, tpoint);
2223 p += emit_add (p, x1, sp, immediate_operand (16));
2224
2225 p += emit_mov_addr (p, ip0, collector);
2226 p += emit_blr (p, ip0);
2227
2228 /* Release the lock.
2229
2230 MOV x0, #(lockaddr)
2231 ...
2232
2233 ; This instruction is a normal store with memory ordering
2234 ; constraints. Thanks to this we do not have to put a data
2235 ; barrier instruction to make sure all data read and writes are done
30baf67b 2236 ; before this instruction is executed. Furthermore, this instruction
bb903df0
PL
2237 ; will trigger an event, letting other threads know they can grab
2238 ; the lock.
2239 STLR xzr, [x0]
2240
2241 */
2242 p += emit_mov_addr (p, x0, lockaddr);
2243 p += emit_stlr (p, xzr, x0);
2244
2245 /* Free collecting_t object:
2246
2247 ADD sp, sp, #16
2248
2249 */
2250 p += emit_add (p, sp, sp, immediate_operand (16));
2251
2252 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2253 registers from the stack.
2254
2255 LDR x2, [sp, #(2 * 16)]
2256 LDR x1, [sp, #(1 * 16)]
2257 LDR x0, [sp, #(0 * 16)]
2258
2259 MSR NZCV, x2
2260 MSR FPSR, x1
2261 MSR FPCR, x0
2262
2263 ADD sp, sp #(5 * 16)
2264
2265 */
2266 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2267 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2268 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2269 p += emit_msr (p, NZCV, x2);
2270 p += emit_msr (p, FPSR, x1);
2271 p += emit_msr (p, FPCR, x0);
2272
2273 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2274
2275 /* Pop general purpose registers:
2276
2277 LDR x0, [sp]
2278 ...
2279 LDR x30, [sp, #(30 * 16)]
2280
2281 ADD sp, sp, #(31 * 16)
2282
2283 */
2284 for (i = 0; i <= 30; i += 1)
2285 p += emit_ldr (p, aarch64_register (i, 1), sp,
2286 offset_memory_operand (i * 16));
2287 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2288
2289 /* Pop SIMD&FP registers:
2290
2291 LDP q0, q1, [sp]
2292 ...
2293 LDP q30, q31, [sp, #(30 * 16)]
2294
2295 ADD sp, sp, #(32 * 16)
2296
2297 */
2298 for (i = 0; i <= 30; i += 2)
2299 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2300 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2301
2302 /* Write the code into the inferior memory. */
2303 append_insns (&buildaddr, p - buf, buf);
2304
2305 /* Now emit the relocated instruction. */
2306 *adjusted_insn_addr = buildaddr;
70b439f0 2307 target_read_uint32 (tpaddr, &insn);
0badd99f
YQ
2308
2309 insn_data.base.insn_addr = tpaddr;
2310 insn_data.new_addr = buildaddr;
2311 insn_data.insn_ptr = buf;
2312
2313 aarch64_relocate_instruction (insn, &visitor,
2314 (struct aarch64_insn_data *) &insn_data);
2315
bb903df0 2316 /* We may not have been able to relocate the instruction. */
0badd99f 2317 if (insn_data.insn_ptr == buf)
bb903df0
PL
2318 {
2319 sprintf (err,
2320 "E.Could not relocate instruction from %s to %s.",
2321 core_addr_to_string_nz (tpaddr),
2322 core_addr_to_string_nz (buildaddr));
2323 return 1;
2324 }
dfaffe9d 2325 else
0badd99f 2326 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
dfaffe9d 2327 *adjusted_insn_addr_end = buildaddr;
bb903df0
PL
2328
2329 /* Go back to the start of the buffer. */
2330 p = buf;
2331
2332 /* Emit a branch back from the jump pad. */
2333 offset = (tpaddr + orig_size - buildaddr);
2334 if (!can_encode_int32 (offset, 28))
2335 {
2336 sprintf (err,
2337 "E.Jump back from jump pad too far from tracepoint "
2ac09a5b 2338 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2339 offset);
2340 return 1;
2341 }
2342
2343 p += emit_b (p, 0, offset);
2344 append_insns (&buildaddr, p - buf, buf);
2345
2346 /* Give the caller a branch instruction into the jump pad. */
2347 offset = (*jump_entry - tpaddr);
2348 if (!can_encode_int32 (offset, 28))
2349 {
2350 sprintf (err,
2351 "E.Jump pad too far from tracepoint "
2ac09a5b 2352 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2353 offset);
2354 return 1;
2355 }
2356
2357 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2358 *jjump_pad_insn_size = 4;
2359
2360 /* Return the end address of our pad. */
2361 *jump_entry = buildaddr;
2362
2363 return 0;
2364}
2365
afbe19f8
PL
2366/* Helper function writing LEN instructions from START into
2367 current_insn_ptr. */
2368
2369static void
2370emit_ops_insns (const uint32_t *start, int len)
2371{
2372 CORE_ADDR buildaddr = current_insn_ptr;
2373
2374 if (debug_threads)
2375 debug_printf ("Adding %d instrucions at %s\n",
2376 len, paddress (buildaddr));
2377
2378 append_insns (&buildaddr, len, start);
2379 current_insn_ptr = buildaddr;
2380}
2381
2382/* Pop a register from the stack. */
2383
2384static int
2385emit_pop (uint32_t *buf, struct aarch64_register rt)
2386{
2387 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2388}
2389
2390/* Push a register on the stack. */
2391
2392static int
2393emit_push (uint32_t *buf, struct aarch64_register rt)
2394{
2395 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2396}
2397
2398/* Implementation of emit_ops method "emit_prologue". */
2399
2400static void
2401aarch64_emit_prologue (void)
2402{
2403 uint32_t buf[16];
2404 uint32_t *p = buf;
2405
2406 /* This function emit a prologue for the following function prototype:
2407
2408 enum eval_result_type f (unsigned char *regs,
2409 ULONGEST *value);
2410
2411 The first argument is a buffer of raw registers. The second
2412 argument is the result of
2413 evaluating the expression, which will be set to whatever is on top of
2414 the stack at the end.
2415
2416 The stack set up by the prologue is as such:
2417
2418 High *------------------------------------------------------*
2419 | LR |
2420 | FP | <- FP
2421 | x1 (ULONGEST *value) |
2422 | x0 (unsigned char *regs) |
2423 Low *------------------------------------------------------*
2424
2425 As we are implementing a stack machine, each opcode can expand the
2426 stack so we never know how far we are from the data saved by this
2427 prologue. In order to be able refer to value and regs later, we save
2428 the current stack pointer in the frame pointer. This way, it is not
2429 clobbered when calling C functions.
2430
30baf67b 2431 Finally, throughout every operation, we are using register x0 as the
afbe19f8
PL
2432 top of the stack, and x1 as a scratch register. */
2433
2434 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2435 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2436 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2437
2438 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2439
2440
2441 emit_ops_insns (buf, p - buf);
2442}
2443
2444/* Implementation of emit_ops method "emit_epilogue". */
2445
2446static void
2447aarch64_emit_epilogue (void)
2448{
2449 uint32_t buf[16];
2450 uint32_t *p = buf;
2451
2452 /* Store the result of the expression (x0) in *value. */
2453 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2454 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2455 p += emit_str (p, x0, x1, offset_memory_operand (0));
2456
2457 /* Restore the previous state. */
2458 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2459 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2460
2461 /* Return expr_eval_no_error. */
2462 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2463 p += emit_ret (p, lr);
2464
2465 emit_ops_insns (buf, p - buf);
2466}
2467
2468/* Implementation of emit_ops method "emit_add". */
2469
2470static void
2471aarch64_emit_add (void)
2472{
2473 uint32_t buf[16];
2474 uint32_t *p = buf;
2475
2476 p += emit_pop (p, x1);
45e3745e 2477 p += emit_add (p, x0, x1, register_operand (x0));
afbe19f8
PL
2478
2479 emit_ops_insns (buf, p - buf);
2480}
2481
2482/* Implementation of emit_ops method "emit_sub". */
2483
2484static void
2485aarch64_emit_sub (void)
2486{
2487 uint32_t buf[16];
2488 uint32_t *p = buf;
2489
2490 p += emit_pop (p, x1);
45e3745e 2491 p += emit_sub (p, x0, x1, register_operand (x0));
afbe19f8
PL
2492
2493 emit_ops_insns (buf, p - buf);
2494}
2495
2496/* Implementation of emit_ops method "emit_mul". */
2497
2498static void
2499aarch64_emit_mul (void)
2500{
2501 uint32_t buf[16];
2502 uint32_t *p = buf;
2503
2504 p += emit_pop (p, x1);
2505 p += emit_mul (p, x0, x1, x0);
2506
2507 emit_ops_insns (buf, p - buf);
2508}
2509
2510/* Implementation of emit_ops method "emit_lsh". */
2511
2512static void
2513aarch64_emit_lsh (void)
2514{
2515 uint32_t buf[16];
2516 uint32_t *p = buf;
2517
2518 p += emit_pop (p, x1);
2519 p += emit_lslv (p, x0, x1, x0);
2520
2521 emit_ops_insns (buf, p - buf);
2522}
2523
2524/* Implementation of emit_ops method "emit_rsh_signed". */
2525
2526static void
2527aarch64_emit_rsh_signed (void)
2528{
2529 uint32_t buf[16];
2530 uint32_t *p = buf;
2531
2532 p += emit_pop (p, x1);
2533 p += emit_asrv (p, x0, x1, x0);
2534
2535 emit_ops_insns (buf, p - buf);
2536}
2537
2538/* Implementation of emit_ops method "emit_rsh_unsigned". */
2539
2540static void
2541aarch64_emit_rsh_unsigned (void)
2542{
2543 uint32_t buf[16];
2544 uint32_t *p = buf;
2545
2546 p += emit_pop (p, x1);
2547 p += emit_lsrv (p, x0, x1, x0);
2548
2549 emit_ops_insns (buf, p - buf);
2550}
2551
2552/* Implementation of emit_ops method "emit_ext". */
2553
2554static void
2555aarch64_emit_ext (int arg)
2556{
2557 uint32_t buf[16];
2558 uint32_t *p = buf;
2559
2560 p += emit_sbfx (p, x0, x0, 0, arg);
2561
2562 emit_ops_insns (buf, p - buf);
2563}
2564
2565/* Implementation of emit_ops method "emit_log_not". */
2566
2567static void
2568aarch64_emit_log_not (void)
2569{
2570 uint32_t buf[16];
2571 uint32_t *p = buf;
2572
2573 /* If the top of the stack is 0, replace it with 1. Else replace it with
2574 0. */
2575
2576 p += emit_cmp (p, x0, immediate_operand (0));
2577 p += emit_cset (p, x0, EQ);
2578
2579 emit_ops_insns (buf, p - buf);
2580}
2581
2582/* Implementation of emit_ops method "emit_bit_and". */
2583
2584static void
2585aarch64_emit_bit_and (void)
2586{
2587 uint32_t buf[16];
2588 uint32_t *p = buf;
2589
2590 p += emit_pop (p, x1);
2591 p += emit_and (p, x0, x0, x1);
2592
2593 emit_ops_insns (buf, p - buf);
2594}
2595
2596/* Implementation of emit_ops method "emit_bit_or". */
2597
2598static void
2599aarch64_emit_bit_or (void)
2600{
2601 uint32_t buf[16];
2602 uint32_t *p = buf;
2603
2604 p += emit_pop (p, x1);
2605 p += emit_orr (p, x0, x0, x1);
2606
2607 emit_ops_insns (buf, p - buf);
2608}
2609
2610/* Implementation of emit_ops method "emit_bit_xor". */
2611
2612static void
2613aarch64_emit_bit_xor (void)
2614{
2615 uint32_t buf[16];
2616 uint32_t *p = buf;
2617
2618 p += emit_pop (p, x1);
2619 p += emit_eor (p, x0, x0, x1);
2620
2621 emit_ops_insns (buf, p - buf);
2622}
2623
2624/* Implementation of emit_ops method "emit_bit_not". */
2625
2626static void
2627aarch64_emit_bit_not (void)
2628{
2629 uint32_t buf[16];
2630 uint32_t *p = buf;
2631
2632 p += emit_mvn (p, x0, x0);
2633
2634 emit_ops_insns (buf, p - buf);
2635}
2636
2637/* Implementation of emit_ops method "emit_equal". */
2638
2639static void
2640aarch64_emit_equal (void)
2641{
2642 uint32_t buf[16];
2643 uint32_t *p = buf;
2644
2645 p += emit_pop (p, x1);
2646 p += emit_cmp (p, x0, register_operand (x1));
2647 p += emit_cset (p, x0, EQ);
2648
2649 emit_ops_insns (buf, p - buf);
2650}
2651
2652/* Implementation of emit_ops method "emit_less_signed". */
2653
2654static void
2655aarch64_emit_less_signed (void)
2656{
2657 uint32_t buf[16];
2658 uint32_t *p = buf;
2659
2660 p += emit_pop (p, x1);
2661 p += emit_cmp (p, x1, register_operand (x0));
2662 p += emit_cset (p, x0, LT);
2663
2664 emit_ops_insns (buf, p - buf);
2665}
2666
2667/* Implementation of emit_ops method "emit_less_unsigned". */
2668
2669static void
2670aarch64_emit_less_unsigned (void)
2671{
2672 uint32_t buf[16];
2673 uint32_t *p = buf;
2674
2675 p += emit_pop (p, x1);
2676 p += emit_cmp (p, x1, register_operand (x0));
2677 p += emit_cset (p, x0, LO);
2678
2679 emit_ops_insns (buf, p - buf);
2680}
2681
2682/* Implementation of emit_ops method "emit_ref". */
2683
2684static void
2685aarch64_emit_ref (int size)
2686{
2687 uint32_t buf[16];
2688 uint32_t *p = buf;
2689
2690 switch (size)
2691 {
2692 case 1:
2693 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2694 break;
2695 case 2:
2696 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2697 break;
2698 case 4:
2699 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2700 break;
2701 case 8:
2702 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2703 break;
2704 default:
2705 /* Unknown size, bail on compilation. */
2706 emit_error = 1;
2707 break;
2708 }
2709
2710 emit_ops_insns (buf, p - buf);
2711}
2712
2713/* Implementation of emit_ops method "emit_if_goto". */
2714
2715static void
2716aarch64_emit_if_goto (int *offset_p, int *size_p)
2717{
2718 uint32_t buf[16];
2719 uint32_t *p = buf;
2720
2721 /* The Z flag is set or cleared here. */
2722 p += emit_cmp (p, x0, immediate_operand (0));
2723 /* This instruction must not change the Z flag. */
2724 p += emit_pop (p, x0);
2725 /* Branch over the next instruction if x0 == 0. */
2726 p += emit_bcond (p, EQ, 8);
2727
2728 /* The NOP instruction will be patched with an unconditional branch. */
2729 if (offset_p)
2730 *offset_p = (p - buf) * 4;
2731 if (size_p)
2732 *size_p = 4;
2733 p += emit_nop (p);
2734
2735 emit_ops_insns (buf, p - buf);
2736}
2737
2738/* Implementation of emit_ops method "emit_goto". */
2739
2740static void
2741aarch64_emit_goto (int *offset_p, int *size_p)
2742{
2743 uint32_t buf[16];
2744 uint32_t *p = buf;
2745
2746 /* The NOP instruction will be patched with an unconditional branch. */
2747 if (offset_p)
2748 *offset_p = 0;
2749 if (size_p)
2750 *size_p = 4;
2751 p += emit_nop (p);
2752
2753 emit_ops_insns (buf, p - buf);
2754}
2755
2756/* Implementation of emit_ops method "write_goto_address". */
2757
bb1183e2 2758static void
afbe19f8
PL
2759aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2760{
2761 uint32_t insn;
2762
2763 emit_b (&insn, 0, to - from);
2764 append_insns (&from, 1, &insn);
2765}
2766
2767/* Implementation of emit_ops method "emit_const". */
2768
2769static void
2770aarch64_emit_const (LONGEST num)
2771{
2772 uint32_t buf[16];
2773 uint32_t *p = buf;
2774
2775 p += emit_mov_addr (p, x0, num);
2776
2777 emit_ops_insns (buf, p - buf);
2778}
2779
2780/* Implementation of emit_ops method "emit_call". */
2781
2782static void
2783aarch64_emit_call (CORE_ADDR fn)
2784{
2785 uint32_t buf[16];
2786 uint32_t *p = buf;
2787
2788 p += emit_mov_addr (p, ip0, fn);
2789 p += emit_blr (p, ip0);
2790
2791 emit_ops_insns (buf, p - buf);
2792}
2793
2794/* Implementation of emit_ops method "emit_reg". */
2795
2796static void
2797aarch64_emit_reg (int reg)
2798{
2799 uint32_t buf[16];
2800 uint32_t *p = buf;
2801
2802 /* Set x0 to unsigned char *regs. */
2803 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2804 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2805 p += emit_mov (p, x1, immediate_operand (reg));
2806
2807 emit_ops_insns (buf, p - buf);
2808
2809 aarch64_emit_call (get_raw_reg_func_addr ());
2810}
2811
2812/* Implementation of emit_ops method "emit_pop". */
2813
2814static void
2815aarch64_emit_pop (void)
2816{
2817 uint32_t buf[16];
2818 uint32_t *p = buf;
2819
2820 p += emit_pop (p, x0);
2821
2822 emit_ops_insns (buf, p - buf);
2823}
2824
2825/* Implementation of emit_ops method "emit_stack_flush". */
2826
2827static void
2828aarch64_emit_stack_flush (void)
2829{
2830 uint32_t buf[16];
2831 uint32_t *p = buf;
2832
2833 p += emit_push (p, x0);
2834
2835 emit_ops_insns (buf, p - buf);
2836}
2837
2838/* Implementation of emit_ops method "emit_zero_ext". */
2839
2840static void
2841aarch64_emit_zero_ext (int arg)
2842{
2843 uint32_t buf[16];
2844 uint32_t *p = buf;
2845
2846 p += emit_ubfx (p, x0, x0, 0, arg);
2847
2848 emit_ops_insns (buf, p - buf);
2849}
2850
2851/* Implementation of emit_ops method "emit_swap". */
2852
2853static void
2854aarch64_emit_swap (void)
2855{
2856 uint32_t buf[16];
2857 uint32_t *p = buf;
2858
2859 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2860 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2861 p += emit_mov (p, x0, register_operand (x1));
2862
2863 emit_ops_insns (buf, p - buf);
2864}
2865
2866/* Implementation of emit_ops method "emit_stack_adjust". */
2867
2868static void
2869aarch64_emit_stack_adjust (int n)
2870{
2871 /* This is not needed with our design. */
2872 uint32_t buf[16];
2873 uint32_t *p = buf;
2874
2875 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2876
2877 emit_ops_insns (buf, p - buf);
2878}
2879
2880/* Implementation of emit_ops method "emit_int_call_1". */
2881
2882static void
2883aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2884{
2885 uint32_t buf[16];
2886 uint32_t *p = buf;
2887
2888 p += emit_mov (p, x0, immediate_operand (arg1));
2889
2890 emit_ops_insns (buf, p - buf);
2891
2892 aarch64_emit_call (fn);
2893}
2894
2895/* Implementation of emit_ops method "emit_void_call_2". */
2896
2897static void
2898aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2899{
2900 uint32_t buf[16];
2901 uint32_t *p = buf;
2902
2903 /* Push x0 on the stack. */
2904 aarch64_emit_stack_flush ();
2905
2906 /* Setup arguments for the function call:
2907
2908 x0: arg1
2909 x1: top of the stack
2910
2911 MOV x1, x0
2912 MOV x0, #arg1 */
2913
2914 p += emit_mov (p, x1, register_operand (x0));
2915 p += emit_mov (p, x0, immediate_operand (arg1));
2916
2917 emit_ops_insns (buf, p - buf);
2918
2919 aarch64_emit_call (fn);
2920
2921 /* Restore x0. */
2922 aarch64_emit_pop ();
2923}
2924
2925/* Implementation of emit_ops method "emit_eq_goto". */
2926
2927static void
2928aarch64_emit_eq_goto (int *offset_p, int *size_p)
2929{
2930 uint32_t buf[16];
2931 uint32_t *p = buf;
2932
2933 p += emit_pop (p, x1);
2934 p += emit_cmp (p, x1, register_operand (x0));
2935 /* Branch over the next instruction if x0 != x1. */
2936 p += emit_bcond (p, NE, 8);
2937 /* The NOP instruction will be patched with an unconditional branch. */
2938 if (offset_p)
2939 *offset_p = (p - buf) * 4;
2940 if (size_p)
2941 *size_p = 4;
2942 p += emit_nop (p);
2943
2944 emit_ops_insns (buf, p - buf);
2945}
2946
2947/* Implementation of emit_ops method "emit_ne_goto". */
2948
2949static void
2950aarch64_emit_ne_goto (int *offset_p, int *size_p)
2951{
2952 uint32_t buf[16];
2953 uint32_t *p = buf;
2954
2955 p += emit_pop (p, x1);
2956 p += emit_cmp (p, x1, register_operand (x0));
2957 /* Branch over the next instruction if x0 == x1. */
2958 p += emit_bcond (p, EQ, 8);
2959 /* The NOP instruction will be patched with an unconditional branch. */
2960 if (offset_p)
2961 *offset_p = (p - buf) * 4;
2962 if (size_p)
2963 *size_p = 4;
2964 p += emit_nop (p);
2965
2966 emit_ops_insns (buf, p - buf);
2967}
2968
2969/* Implementation of emit_ops method "emit_lt_goto". */
2970
2971static void
2972aarch64_emit_lt_goto (int *offset_p, int *size_p)
2973{
2974 uint32_t buf[16];
2975 uint32_t *p = buf;
2976
2977 p += emit_pop (p, x1);
2978 p += emit_cmp (p, x1, register_operand (x0));
2979 /* Branch over the next instruction if x0 >= x1. */
2980 p += emit_bcond (p, GE, 8);
2981 /* The NOP instruction will be patched with an unconditional branch. */
2982 if (offset_p)
2983 *offset_p = (p - buf) * 4;
2984 if (size_p)
2985 *size_p = 4;
2986 p += emit_nop (p);
2987
2988 emit_ops_insns (buf, p - buf);
2989}
2990
2991/* Implementation of emit_ops method "emit_le_goto". */
2992
2993static void
2994aarch64_emit_le_goto (int *offset_p, int *size_p)
2995{
2996 uint32_t buf[16];
2997 uint32_t *p = buf;
2998
2999 p += emit_pop (p, x1);
3000 p += emit_cmp (p, x1, register_operand (x0));
3001 /* Branch over the next instruction if x0 > x1. */
3002 p += emit_bcond (p, GT, 8);
3003 /* The NOP instruction will be patched with an unconditional branch. */
3004 if (offset_p)
3005 *offset_p = (p - buf) * 4;
3006 if (size_p)
3007 *size_p = 4;
3008 p += emit_nop (p);
3009
3010 emit_ops_insns (buf, p - buf);
3011}
3012
3013/* Implementation of emit_ops method "emit_gt_goto". */
3014
3015static void
3016aarch64_emit_gt_goto (int *offset_p, int *size_p)
3017{
3018 uint32_t buf[16];
3019 uint32_t *p = buf;
3020
3021 p += emit_pop (p, x1);
3022 p += emit_cmp (p, x1, register_operand (x0));
3023 /* Branch over the next instruction if x0 <= x1. */
3024 p += emit_bcond (p, LE, 8);
3025 /* The NOP instruction will be patched with an unconditional branch. */
3026 if (offset_p)
3027 *offset_p = (p - buf) * 4;
3028 if (size_p)
3029 *size_p = 4;
3030 p += emit_nop (p);
3031
3032 emit_ops_insns (buf, p - buf);
3033}
3034
3035/* Implementation of emit_ops method "emit_ge_got". */
3036
3037static void
3038aarch64_emit_ge_got (int *offset_p, int *size_p)
3039{
3040 uint32_t buf[16];
3041 uint32_t *p = buf;
3042
3043 p += emit_pop (p, x1);
3044 p += emit_cmp (p, x1, register_operand (x0));
3045 /* Branch over the next instruction if x0 <= x1. */
3046 p += emit_bcond (p, LT, 8);
3047 /* The NOP instruction will be patched with an unconditional branch. */
3048 if (offset_p)
3049 *offset_p = (p - buf) * 4;
3050 if (size_p)
3051 *size_p = 4;
3052 p += emit_nop (p);
3053
3054 emit_ops_insns (buf, p - buf);
3055}
3056
3057static struct emit_ops aarch64_emit_ops_impl =
3058{
3059 aarch64_emit_prologue,
3060 aarch64_emit_epilogue,
3061 aarch64_emit_add,
3062 aarch64_emit_sub,
3063 aarch64_emit_mul,
3064 aarch64_emit_lsh,
3065 aarch64_emit_rsh_signed,
3066 aarch64_emit_rsh_unsigned,
3067 aarch64_emit_ext,
3068 aarch64_emit_log_not,
3069 aarch64_emit_bit_and,
3070 aarch64_emit_bit_or,
3071 aarch64_emit_bit_xor,
3072 aarch64_emit_bit_not,
3073 aarch64_emit_equal,
3074 aarch64_emit_less_signed,
3075 aarch64_emit_less_unsigned,
3076 aarch64_emit_ref,
3077 aarch64_emit_if_goto,
3078 aarch64_emit_goto,
3079 aarch64_write_goto_address,
3080 aarch64_emit_const,
3081 aarch64_emit_call,
3082 aarch64_emit_reg,
3083 aarch64_emit_pop,
3084 aarch64_emit_stack_flush,
3085 aarch64_emit_zero_ext,
3086 aarch64_emit_swap,
3087 aarch64_emit_stack_adjust,
3088 aarch64_emit_int_call_1,
3089 aarch64_emit_void_call_2,
3090 aarch64_emit_eq_goto,
3091 aarch64_emit_ne_goto,
3092 aarch64_emit_lt_goto,
3093 aarch64_emit_le_goto,
3094 aarch64_emit_gt_goto,
3095 aarch64_emit_ge_got,
3096};
3097
ab64c999 3098/* Implementation of target ops method "emit_ops". */
afbe19f8 3099
ab64c999
TBA
3100emit_ops *
3101aarch64_target::emit_ops ()
afbe19f8
PL
3102{
3103 return &aarch64_emit_ops_impl;
3104}
3105
809a0c35 3106/* Implementation of target ops method
bb903df0
PL
3107 "get_min_fast_tracepoint_insn_len". */
3108
809a0c35
TBA
3109int
3110aarch64_target::get_min_fast_tracepoint_insn_len ()
bb903df0
PL
3111{
3112 return 4;
3113}
3114
9cfd8715 3115/* Implementation of linux target ops method "low_supports_range_stepping". */
d1d0aea1 3116
9cfd8715
TBA
3117bool
3118aarch64_target::low_supports_range_stepping ()
d1d0aea1 3119{
9cfd8715 3120 return true;
d1d0aea1
PL
3121}
3122
3ca4edb6 3123/* Implementation of target ops method "sw_breakpoint_from_kind". */
dd373349 3124
3ca4edb6
TBA
3125const gdb_byte *
3126aarch64_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349 3127{
17b1509a
YQ
3128 if (is_64bit_tdesc ())
3129 {
3130 *size = aarch64_breakpoint_len;
3131 return aarch64_breakpoint;
3132 }
3133 else
3134 return arm_sw_breakpoint_from_kind (kind, size);
3135}
3136
06250e4e 3137/* Implementation of target ops method "breakpoint_kind_from_pc". */
17b1509a 3138
06250e4e
TBA
3139int
3140aarch64_target::breakpoint_kind_from_pc (CORE_ADDR *pcptr)
17b1509a
YQ
3141{
3142 if (is_64bit_tdesc ())
3143 return aarch64_breakpoint_len;
3144 else
3145 return arm_breakpoint_kind_from_pc (pcptr);
3146}
3147
06250e4e 3148/* Implementation of the target ops method
17b1509a
YQ
3149 "breakpoint_kind_from_current_state". */
3150
06250e4e
TBA
3151int
3152aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
17b1509a
YQ
3153{
3154 if (is_64bit_tdesc ())
3155 return aarch64_breakpoint_len;
3156 else
3157 return arm_breakpoint_kind_from_current_state (pcptr);
dd373349
AT
3158}
3159
176eb98c
MS
3160struct linux_target_ops the_low_target =
3161{
061fc021 3162 aarch64_get_syscall_trapinfo,
176eb98c 3163};
3aee8918 3164
ef0478f6
TBA
3165/* The linux target ops object. */
3166
3167linux_process_target *the_linux_target = &the_aarch64_target;
3168
3aee8918
PA
3169void
3170initialize_low_arch (void)
3171{
3b53ae99
YQ
3172 initialize_low_arch_aarch32 ();
3173
3aee8918 3174 initialize_regsets_info (&aarch64_regsets_info);
02895270 3175 initialize_regsets_info (&aarch64_sve_regsets_info);
3aee8918 3176}
This page took 0.757307 seconds and 4 git commands to generate.