gdbserver/linux-low: turn 'supports_tracepoints' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-aarch64-low.cc
CommitLineData
176eb98c
MS
1/* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
b811d2c2 4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
176eb98c
MS
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "server.h"
23#include "linux-low.h"
db3cb7cb 24#include "nat/aarch64-linux.h"
554717a3 25#include "nat/aarch64-linux-hw-point.h"
bb903df0 26#include "arch/aarch64-insn.h"
3b53ae99 27#include "linux-aarch32-low.h"
176eb98c 28#include "elf/common.h"
afbe19f8
PL
29#include "ax.h"
30#include "tracepoint.h"
f9d949fb 31#include "debug.h"
176eb98c
MS
32
33#include <signal.h>
34#include <sys/user.h>
5826e159 35#include "nat/gdb_ptrace.h"
e9dae05e 36#include <asm/ptrace.h>
bb903df0
PL
37#include <inttypes.h>
38#include <endian.h>
39#include <sys/uio.h>
176eb98c
MS
40
41#include "gdb_proc_service.h"
cc628f3d 42#include "arch/aarch64.h"
7cc17433 43#include "linux-aarch32-tdesc.h"
d6d7ce56 44#include "linux-aarch64-tdesc.h"
fefa175e 45#include "nat/aarch64-sve-linux-ptrace.h"
02895270 46#include "tdesc.h"
176eb98c 47
176eb98c
MS
48#ifdef HAVE_SYS_REG_H
49#include <sys/reg.h>
50#endif
51
ef0478f6
TBA
52/* Linux target op definitions for the AArch64 architecture. */
53
54class aarch64_target : public linux_process_target
55{
56public:
57
aa8d21c9
TBA
58 const regs_info *get_regs_info () override;
59
06250e4e
TBA
60 int breakpoint_kind_from_pc (CORE_ADDR *pcptr) override;
61
62 int breakpoint_kind_from_current_state (CORE_ADDR *pcptr) override;
63
3ca4edb6
TBA
64 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
65
007c9b97
TBA
66 bool supports_z_point_type (char z_type) override;
67
47f70aa7
TBA
68 bool supports_tracepoints () override;
69
797bcff5
TBA
70protected:
71
72 void low_arch_setup () override;
daca57a7
TBA
73
74 bool low_cannot_fetch_register (int regno) override;
75
76 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
77
78 bool low_supports_breakpoints () override;
79
80 CORE_ADDR low_get_pc (regcache *regcache) override;
81
82 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
d7146cda
TBA
83
84 bool low_breakpoint_at (CORE_ADDR pc) override;
9db9aa23
TBA
85
86 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
87 int size, raw_breakpoint *bp) override;
88
89 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
90 int size, raw_breakpoint *bp) override;
ac1bbaca
TBA
91
92 bool low_stopped_by_watchpoint () override;
93
94 CORE_ADDR low_stopped_data_address () override;
cb63de7c
TBA
95
96 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
97 int direction) override;
fd000fb3
TBA
98
99 arch_process_info *low_new_process () override;
100
101 void low_delete_process (arch_process_info *info) override;
102
103 void low_new_thread (lwp_info *) override;
104
105 void low_delete_thread (arch_lwp_info *) override;
106
107 void low_new_fork (process_info *parent, process_info *child) override;
d7599cc0
TBA
108
109 void low_prepare_to_resume (lwp_info *lwp) override;
ef0478f6
TBA
110};
111
112/* The singleton target ops object. */
113
114static aarch64_target the_aarch64_target;
115
daca57a7
TBA
116bool
117aarch64_target::low_cannot_fetch_register (int regno)
118{
119 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
120 "is not implemented by the target");
121}
122
123bool
124aarch64_target::low_cannot_store_register (int regno)
125{
126 gdb_assert_not_reached ("linux target op low_cannot_store_register "
127 "is not implemented by the target");
128}
129
d7599cc0
TBA
130void
131aarch64_target::low_prepare_to_resume (lwp_info *lwp)
132{
133 aarch64_linux_prepare_to_resume (lwp);
134}
135
176eb98c
MS
136/* Per-process arch-specific data we want to keep. */
137
138struct arch_process_info
139{
140 /* Hardware breakpoint/watchpoint data.
141 The reason for them to be per-process rather than per-thread is
142 due to the lack of information in the gdbserver environment;
143 gdbserver is not told that whether a requested hardware
144 breakpoint/watchpoint is thread specific or not, so it has to set
145 each hw bp/wp for every thread in the current process. The
146 higher level bp/wp management in gdb will resume a thread if a hw
147 bp/wp trap is not expected for it. Since the hw bp/wp setting is
148 same for each thread, it is reasonable for the data to live here.
149 */
150 struct aarch64_debug_reg_state debug_reg_state;
151};
152
3b53ae99
YQ
153/* Return true if the size of register 0 is 8 byte. */
154
155static int
156is_64bit_tdesc (void)
157{
158 struct regcache *regcache = get_thread_regcache (current_thread, 0);
159
160 return register_size (regcache->tdesc, 0) == 8;
161}
162
02895270
AH
163/* Return true if the regcache contains the number of SVE registers. */
164
165static bool
166is_sve_tdesc (void)
167{
168 struct regcache *regcache = get_thread_regcache (current_thread, 0);
169
6cdd651f 170 return tdesc_contains_feature (regcache->tdesc, "org.gnu.gdb.aarch64.sve");
02895270
AH
171}
172
176eb98c
MS
173static void
174aarch64_fill_gregset (struct regcache *regcache, void *buf)
175{
6a69a054 176 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
176eb98c
MS
177 int i;
178
179 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
180 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
181 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
182 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
183 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
184}
185
186static void
187aarch64_store_gregset (struct regcache *regcache, const void *buf)
188{
6a69a054 189 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
176eb98c
MS
190 int i;
191
192 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
193 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
194 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
195 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
196 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
197}
198
199static void
200aarch64_fill_fpregset (struct regcache *regcache, void *buf)
201{
9caa3311 202 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
176eb98c
MS
203 int i;
204
205 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
206 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
207 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
208 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
209}
210
211static void
212aarch64_store_fpregset (struct regcache *regcache, const void *buf)
213{
9caa3311
YQ
214 const struct user_fpsimd_state *regset
215 = (const struct user_fpsimd_state *) buf;
176eb98c
MS
216 int i;
217
218 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
219 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
220 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
221 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
222}
223
1ef53e6b
AH
224/* Store the pauth registers to regcache. */
225
226static void
227aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
228{
229 uint64_t *pauth_regset = (uint64_t *) buf;
230 int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
231
232 if (pauth_base == 0)
233 return;
234
235 supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
236 &pauth_regset[0]);
237 supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
238 &pauth_regset[1]);
239}
240
bf9ae9d8
TBA
241bool
242aarch64_target::low_supports_breakpoints ()
243{
244 return true;
245}
246
247/* Implementation of linux target ops method "low_get_pc". */
421530db 248
bf9ae9d8
TBA
249CORE_ADDR
250aarch64_target::low_get_pc (regcache *regcache)
176eb98c 251{
8a7e4587 252 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 253 return linux_get_pc_64bit (regcache);
8a7e4587 254 else
a5652c21 255 return linux_get_pc_32bit (regcache);
176eb98c
MS
256}
257
bf9ae9d8 258/* Implementation of linux target ops method "low_set_pc". */
421530db 259
bf9ae9d8
TBA
260void
261aarch64_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
176eb98c 262{
8a7e4587 263 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 264 linux_set_pc_64bit (regcache, pc);
8a7e4587 265 else
a5652c21 266 linux_set_pc_32bit (regcache, pc);
176eb98c
MS
267}
268
176eb98c
MS
269#define aarch64_breakpoint_len 4
270
37d66942
PL
271/* AArch64 BRK software debug mode instruction.
272 This instruction needs to match gdb/aarch64-tdep.c
273 (aarch64_default_breakpoint). */
274static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
176eb98c 275
d7146cda 276/* Implementation of linux target ops method "low_breakpoint_at". */
421530db 277
d7146cda
TBA
278bool
279aarch64_target::low_breakpoint_at (CORE_ADDR where)
176eb98c 280{
db91f502
YQ
281 if (is_64bit_tdesc ())
282 {
283 gdb_byte insn[aarch64_breakpoint_len];
176eb98c 284
d7146cda 285 read_memory (where, (unsigned char *) &insn, aarch64_breakpoint_len);
db91f502 286 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
d7146cda 287 return true;
176eb98c 288
d7146cda 289 return false;
db91f502
YQ
290 }
291 else
292 return arm_breakpoint_at (where);
176eb98c
MS
293}
294
176eb98c
MS
295static void
296aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
297{
298 int i;
299
300 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
301 {
302 state->dr_addr_bp[i] = 0;
303 state->dr_ctrl_bp[i] = 0;
304 state->dr_ref_count_bp[i] = 0;
305 }
306
307 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
308 {
309 state->dr_addr_wp[i] = 0;
310 state->dr_ctrl_wp[i] = 0;
311 state->dr_ref_count_wp[i] = 0;
312 }
313}
314
176eb98c
MS
315/* Return the pointer to the debug register state structure in the
316 current process' arch-specific data area. */
317
db3cb7cb 318struct aarch64_debug_reg_state *
88e2cf7e 319aarch64_get_debug_reg_state (pid_t pid)
176eb98c 320{
88e2cf7e 321 struct process_info *proc = find_process_pid (pid);
176eb98c 322
fe978cb0 323 return &proc->priv->arch_private->debug_reg_state;
176eb98c
MS
324}
325
007c9b97 326/* Implementation of target ops method "supports_z_point_type". */
421530db 327
007c9b97
TBA
328bool
329aarch64_target::supports_z_point_type (char z_type)
4ff0d3d8
PA
330{
331 switch (z_type)
332 {
96c97461 333 case Z_PACKET_SW_BP:
4ff0d3d8
PA
334 case Z_PACKET_HW_BP:
335 case Z_PACKET_WRITE_WP:
336 case Z_PACKET_READ_WP:
337 case Z_PACKET_ACCESS_WP:
007c9b97 338 return true;
4ff0d3d8 339 default:
007c9b97 340 return false;
4ff0d3d8
PA
341 }
342}
343
9db9aa23 344/* Implementation of linux target ops method "low_insert_point".
176eb98c 345
421530db
PL
346 It actually only records the info of the to-be-inserted bp/wp;
347 the actual insertion will happen when threads are resumed. */
176eb98c 348
9db9aa23
TBA
349int
350aarch64_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
351 int len, raw_breakpoint *bp)
176eb98c
MS
352{
353 int ret;
4ff0d3d8 354 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
355 struct aarch64_debug_reg_state *state
356 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 357
c5e92cca 358 if (show_debug_regs)
176eb98c
MS
359 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
360 (unsigned long) addr, len);
361
802e8e6d
PA
362 /* Determine the type from the raw breakpoint type. */
363 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
364
365 if (targ_type != hw_execute)
39edd165
YQ
366 {
367 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
368 ret = aarch64_handle_watchpoint (targ_type, addr, len,
369 1 /* is_insert */, state);
370 else
371 ret = -1;
372 }
176eb98c 373 else
8d689ee5
YQ
374 {
375 if (len == 3)
376 {
377 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
378 instruction. Set it to 2 to correctly encode length bit
379 mask in hardware/watchpoint control register. */
380 len = 2;
381 }
382 ret = aarch64_handle_breakpoint (targ_type, addr, len,
383 1 /* is_insert */, state);
384 }
176eb98c 385
60a191ed 386 if (show_debug_regs)
88e2cf7e
YQ
387 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
388 targ_type);
176eb98c
MS
389
390 return ret;
391}
392
9db9aa23 393/* Implementation of linux target ops method "low_remove_point".
176eb98c 394
421530db
PL
395 It actually only records the info of the to-be-removed bp/wp,
396 the actual removal will be done when threads are resumed. */
176eb98c 397
9db9aa23
TBA
398int
399aarch64_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
400 int len, raw_breakpoint *bp)
176eb98c
MS
401{
402 int ret;
4ff0d3d8 403 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
404 struct aarch64_debug_reg_state *state
405 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 406
c5e92cca 407 if (show_debug_regs)
176eb98c
MS
408 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
409 (unsigned long) addr, len);
410
802e8e6d
PA
411 /* Determine the type from the raw breakpoint type. */
412 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
413
414 /* Set up state pointers. */
415 if (targ_type != hw_execute)
416 ret =
c67ca4de
YQ
417 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
418 state);
176eb98c 419 else
8d689ee5
YQ
420 {
421 if (len == 3)
422 {
423 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
424 instruction. Set it to 2 to correctly encode length bit
425 mask in hardware/watchpoint control register. */
426 len = 2;
427 }
428 ret = aarch64_handle_breakpoint (targ_type, addr, len,
429 0 /* is_insert */, state);
430 }
176eb98c 431
60a191ed 432 if (show_debug_regs)
88e2cf7e
YQ
433 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
434 targ_type);
176eb98c
MS
435
436 return ret;
437}
438
ac1bbaca 439/* Implementation of linux target ops method "low_stopped_data_address". */
176eb98c 440
ac1bbaca
TBA
441CORE_ADDR
442aarch64_target::low_stopped_data_address ()
176eb98c
MS
443{
444 siginfo_t siginfo;
445 int pid, i;
446 struct aarch64_debug_reg_state *state;
447
0bfdf32f 448 pid = lwpid_of (current_thread);
176eb98c
MS
449
450 /* Get the siginfo. */
451 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
452 return (CORE_ADDR) 0;
453
454 /* Need to be a hardware breakpoint/watchpoint trap. */
455 if (siginfo.si_signo != SIGTRAP
456 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
457 return (CORE_ADDR) 0;
458
459 /* Check if the address matches any watched address. */
88e2cf7e 460 state = aarch64_get_debug_reg_state (pid_of (current_thread));
176eb98c
MS
461 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
462 {
a3b60e45
JK
463 const unsigned int offset
464 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
176eb98c
MS
465 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
466 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
a3b60e45
JK
467 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
468 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
469 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
470
176eb98c
MS
471 if (state->dr_ref_count_wp[i]
472 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
a3b60e45 473 && addr_trap >= addr_watch_aligned
176eb98c 474 && addr_trap < addr_watch + len)
a3b60e45
JK
475 {
476 /* ADDR_TRAP reports the first address of the memory range
477 accessed by the CPU, regardless of what was the memory
478 range watched. Thus, a large CPU access that straddles
479 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
480 ADDR_TRAP that is lower than the
481 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
482
483 addr: | 4 | 5 | 6 | 7 | 8 |
484 |---- range watched ----|
485 |----------- range accessed ------------|
486
487 In this case, ADDR_TRAP will be 4.
488
489 To match a watchpoint known to GDB core, we must never
490 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
491 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
492 positive on kernels older than 4.10. See PR
493 external/20207. */
494 return addr_orig;
495 }
176eb98c
MS
496 }
497
498 return (CORE_ADDR) 0;
499}
500
ac1bbaca 501/* Implementation of linux target ops method "low_stopped_by_watchpoint". */
176eb98c 502
ac1bbaca
TBA
503bool
504aarch64_target::low_stopped_by_watchpoint ()
176eb98c 505{
ac1bbaca 506 return (low_stopped_data_address () != 0);
176eb98c
MS
507}
508
509/* Fetch the thread-local storage pointer for libthread_db. */
510
511ps_err_e
754653a7 512ps_get_thread_area (struct ps_prochandle *ph,
176eb98c
MS
513 lwpid_t lwpid, int idx, void **base)
514{
a0cc84cd
YQ
515 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
516 is_64bit_tdesc ());
176eb98c
MS
517}
518
cb63de7c 519/* Implementation of linux target ops method "low_siginfo_fixup". */
ade90bde 520
cb63de7c
TBA
521bool
522aarch64_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
523 int direction)
ade90bde
YQ
524{
525 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
526 if (!is_64bit_tdesc ())
527 {
528 if (direction == 0)
529 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
530 native);
531 else
532 aarch64_siginfo_from_compat_siginfo (native,
533 (struct compat_siginfo *) inf);
534
cb63de7c 535 return true;
ade90bde
YQ
536 }
537
cb63de7c 538 return false;
ade90bde
YQ
539}
540
fd000fb3 541/* Implementation of linux target ops method "low_new_process". */
176eb98c 542
fd000fb3
TBA
543arch_process_info *
544aarch64_target::low_new_process ()
176eb98c 545{
8d749320 546 struct arch_process_info *info = XCNEW (struct arch_process_info);
176eb98c
MS
547
548 aarch64_init_debug_reg_state (&info->debug_reg_state);
549
550 return info;
551}
552
fd000fb3 553/* Implementation of linux target ops method "low_delete_process". */
04ec7890 554
fd000fb3
TBA
555void
556aarch64_target::low_delete_process (arch_process_info *info)
04ec7890
SM
557{
558 xfree (info);
559}
560
fd000fb3
TBA
561void
562aarch64_target::low_new_thread (lwp_info *lwp)
563{
564 aarch64_linux_new_thread (lwp);
565}
421530db 566
fd000fb3
TBA
567void
568aarch64_target::low_delete_thread (arch_lwp_info *arch_lwp)
569{
570 aarch64_linux_delete_thread (arch_lwp);
571}
572
573/* Implementation of linux target ops method "low_new_fork". */
574
575void
576aarch64_target::low_new_fork (process_info *parent,
577 process_info *child)
3a8a0396
DB
578{
579 /* These are allocated by linux_add_process. */
61a7418c
DB
580 gdb_assert (parent->priv != NULL
581 && parent->priv->arch_private != NULL);
582 gdb_assert (child->priv != NULL
583 && child->priv->arch_private != NULL);
3a8a0396
DB
584
585 /* Linux kernel before 2.6.33 commit
586 72f674d203cd230426437cdcf7dd6f681dad8b0d
587 will inherit hardware debug registers from parent
588 on fork/vfork/clone. Newer Linux kernels create such tasks with
589 zeroed debug registers.
590
591 GDB core assumes the child inherits the watchpoints/hw
592 breakpoints of the parent, and will remove them all from the
593 forked off process. Copy the debug registers mirrors into the
594 new process so that all breakpoints and watchpoints can be
595 removed together. The debug registers mirror will become zeroed
596 in the end before detaching the forked off process, thus making
597 this compatible with older Linux kernels too. */
598
61a7418c 599 *child->priv->arch_private = *parent->priv->arch_private;
3a8a0396
DB
600}
601
ee4fbcfa
AH
602/* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
603#define AARCH64_HWCAP_PACA (1 << 30)
604
797bcff5 605/* Implementation of linux target ops method "low_arch_setup". */
3b53ae99 606
797bcff5
TBA
607void
608aarch64_target::low_arch_setup ()
3b53ae99
YQ
609{
610 unsigned int machine;
611 int is_elf64;
612 int tid;
613
614 tid = lwpid_of (current_thread);
615
616 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
617
618 if (is_elf64)
fefa175e
AH
619 {
620 uint64_t vq = aarch64_sve_get_vq (tid);
974c89e0
AH
621 unsigned long hwcap = linux_get_hwcap (8);
622 bool pauth_p = hwcap & AARCH64_HWCAP_PACA;
ee4fbcfa
AH
623
624 current_process ()->tdesc = aarch64_linux_read_description (vq, pauth_p);
fefa175e 625 }
3b53ae99 626 else
7cc17433 627 current_process ()->tdesc = aarch32_linux_read_description ();
176eb98c 628
af1b22f3 629 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
176eb98c
MS
630}
631
02895270
AH
632/* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
633
634static void
635aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
636{
637 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
638}
639
640/* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
641
642static void
643aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
644{
645 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
646}
647
3aee8918 648static struct regset_info aarch64_regsets[] =
176eb98c
MS
649{
650 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
651 sizeof (struct user_pt_regs), GENERAL_REGS,
652 aarch64_fill_gregset, aarch64_store_gregset },
653 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
654 sizeof (struct user_fpsimd_state), FP_REGS,
655 aarch64_fill_fpregset, aarch64_store_fpregset
656 },
1ef53e6b
AH
657 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
658 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
659 NULL, aarch64_store_pauthregset },
50bc912a 660 NULL_REGSET
176eb98c
MS
661};
662
3aee8918
PA
663static struct regsets_info aarch64_regsets_info =
664 {
665 aarch64_regsets, /* regsets */
666 0, /* num_regsets */
667 NULL, /* disabled_regsets */
668 };
669
3b53ae99 670static struct regs_info regs_info_aarch64 =
3aee8918
PA
671 {
672 NULL, /* regset_bitmap */
c2d65f38 673 NULL, /* usrregs */
3aee8918
PA
674 &aarch64_regsets_info,
675 };
676
02895270
AH
677static struct regset_info aarch64_sve_regsets[] =
678{
679 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
680 sizeof (struct user_pt_regs), GENERAL_REGS,
681 aarch64_fill_gregset, aarch64_store_gregset },
682 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
683 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
684 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
685 },
1ef53e6b
AH
686 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
687 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
688 NULL, aarch64_store_pauthregset },
02895270
AH
689 NULL_REGSET
690};
691
692static struct regsets_info aarch64_sve_regsets_info =
693 {
694 aarch64_sve_regsets, /* regsets. */
695 0, /* num_regsets. */
696 NULL, /* disabled_regsets. */
697 };
698
699static struct regs_info regs_info_aarch64_sve =
700 {
701 NULL, /* regset_bitmap. */
702 NULL, /* usrregs. */
703 &aarch64_sve_regsets_info,
704 };
705
aa8d21c9 706/* Implementation of linux target ops method "get_regs_info". */
421530db 707
aa8d21c9
TBA
708const regs_info *
709aarch64_target::get_regs_info ()
3aee8918 710{
02895270 711 if (!is_64bit_tdesc ())
3b53ae99 712 return &regs_info_aarch32;
02895270
AH
713
714 if (is_sve_tdesc ())
715 return &regs_info_aarch64_sve;
716
717 return &regs_info_aarch64;
3aee8918
PA
718}
719
47f70aa7 720/* Implementation of target ops method "supports_tracepoints". */
7671bf47 721
47f70aa7
TBA
722bool
723aarch64_target::supports_tracepoints ()
7671bf47 724{
524b57e6 725 if (current_thread == NULL)
47f70aa7 726 return true;
524b57e6
YQ
727 else
728 {
729 /* We don't support tracepoints on aarch32 now. */
730 return is_64bit_tdesc ();
731 }
7671bf47
PL
732}
733
bb903df0
PL
734/* Implementation of linux_target_ops method "get_thread_area". */
735
736static int
737aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
738{
739 struct iovec iovec;
740 uint64_t reg;
741
742 iovec.iov_base = &reg;
743 iovec.iov_len = sizeof (reg);
744
745 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
746 return -1;
747
748 *addrp = reg;
749
750 return 0;
751}
752
061fc021
YQ
753/* Implementation of linux_target_ops method "get_syscall_trapinfo". */
754
755static void
756aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
757{
758 int use_64bit = register_size (regcache->tdesc, 0) == 8;
759
760 if (use_64bit)
761 {
762 long l_sysno;
763
764 collect_register_by_name (regcache, "x8", &l_sysno);
765 *sysno = (int) l_sysno;
766 }
767 else
768 collect_register_by_name (regcache, "r7", sysno);
769}
770
afbe19f8
PL
771/* List of condition codes that we need. */
772
773enum aarch64_condition_codes
774{
775 EQ = 0x0,
776 NE = 0x1,
777 LO = 0x3,
778 GE = 0xa,
779 LT = 0xb,
780 GT = 0xc,
781 LE = 0xd,
bb903df0
PL
782};
783
6c1c9a8b
YQ
784enum aarch64_operand_type
785{
786 OPERAND_IMMEDIATE,
787 OPERAND_REGISTER,
788};
789
bb903df0
PL
790/* Representation of an operand. At this time, it only supports register
791 and immediate types. */
792
793struct aarch64_operand
794{
795 /* Type of the operand. */
6c1c9a8b
YQ
796 enum aarch64_operand_type type;
797
bb903df0
PL
798 /* Value of the operand according to the type. */
799 union
800 {
801 uint32_t imm;
802 struct aarch64_register reg;
803 };
804};
805
806/* List of registers that we are currently using, we can add more here as
807 we need to use them. */
808
809/* General purpose scratch registers (64 bit). */
810static const struct aarch64_register x0 = { 0, 1 };
811static const struct aarch64_register x1 = { 1, 1 };
812static const struct aarch64_register x2 = { 2, 1 };
813static const struct aarch64_register x3 = { 3, 1 };
814static const struct aarch64_register x4 = { 4, 1 };
815
816/* General purpose scratch registers (32 bit). */
afbe19f8 817static const struct aarch64_register w0 = { 0, 0 };
bb903df0
PL
818static const struct aarch64_register w2 = { 2, 0 };
819
820/* Intra-procedure scratch registers. */
821static const struct aarch64_register ip0 = { 16, 1 };
822
823/* Special purpose registers. */
afbe19f8
PL
824static const struct aarch64_register fp = { 29, 1 };
825static const struct aarch64_register lr = { 30, 1 };
bb903df0
PL
826static const struct aarch64_register sp = { 31, 1 };
827static const struct aarch64_register xzr = { 31, 1 };
828
829/* Dynamically allocate a new register. If we know the register
830 statically, we should make it a global as above instead of using this
831 helper function. */
832
833static struct aarch64_register
834aarch64_register (unsigned num, int is64)
835{
836 return (struct aarch64_register) { num, is64 };
837}
838
839/* Helper function to create a register operand, for instructions with
840 different types of operands.
841
842 For example:
843 p += emit_mov (p, x0, register_operand (x1)); */
844
845static struct aarch64_operand
846register_operand (struct aarch64_register reg)
847{
848 struct aarch64_operand operand;
849
850 operand.type = OPERAND_REGISTER;
851 operand.reg = reg;
852
853 return operand;
854}
855
856/* Helper function to create an immediate operand, for instructions with
857 different types of operands.
858
859 For example:
860 p += emit_mov (p, x0, immediate_operand (12)); */
861
862static struct aarch64_operand
863immediate_operand (uint32_t imm)
864{
865 struct aarch64_operand operand;
866
867 operand.type = OPERAND_IMMEDIATE;
868 operand.imm = imm;
869
870 return operand;
871}
872
bb903df0
PL
873/* Helper function to create an offset memory operand.
874
875 For example:
876 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
877
878static struct aarch64_memory_operand
879offset_memory_operand (int32_t offset)
880{
881 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
882}
883
884/* Helper function to create a pre-index memory operand.
885
886 For example:
887 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
888
889static struct aarch64_memory_operand
890preindex_memory_operand (int32_t index)
891{
892 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
893}
894
afbe19f8
PL
895/* Helper function to create a post-index memory operand.
896
897 For example:
898 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
899
900static struct aarch64_memory_operand
901postindex_memory_operand (int32_t index)
902{
903 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
904}
905
bb903df0
PL
906/* System control registers. These special registers can be written and
907 read with the MRS and MSR instructions.
908
909 - NZCV: Condition flags. GDB refers to this register under the CPSR
910 name.
911 - FPSR: Floating-point status register.
912 - FPCR: Floating-point control registers.
913 - TPIDR_EL0: Software thread ID register. */
914
915enum aarch64_system_control_registers
916{
917 /* op0 op1 crn crm op2 */
918 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
919 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
920 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
921 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
922};
923
bb903df0
PL
924/* Write a BLR instruction into *BUF.
925
926 BLR rn
927
928 RN is the register to branch to. */
929
930static int
931emit_blr (uint32_t *buf, struct aarch64_register rn)
932{
e1c587c3 933 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
bb903df0
PL
934}
935
afbe19f8 936/* Write a RET instruction into *BUF.
bb903df0 937
afbe19f8 938 RET xn
bb903df0 939
afbe19f8 940 RN is the register to branch to. */
bb903df0
PL
941
942static int
afbe19f8
PL
943emit_ret (uint32_t *buf, struct aarch64_register rn)
944{
e1c587c3 945 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
afbe19f8
PL
946}
947
948static int
949emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
950 struct aarch64_register rt,
951 struct aarch64_register rt2,
952 struct aarch64_register rn,
953 struct aarch64_memory_operand operand)
bb903df0
PL
954{
955 uint32_t opc;
956 uint32_t pre_index;
957 uint32_t write_back;
958
959 if (rt.is64)
960 opc = ENCODE (2, 2, 30);
961 else
962 opc = ENCODE (0, 2, 30);
963
964 switch (operand.type)
965 {
966 case MEMORY_OPERAND_OFFSET:
967 {
968 pre_index = ENCODE (1, 1, 24);
969 write_back = ENCODE (0, 1, 23);
970 break;
971 }
afbe19f8
PL
972 case MEMORY_OPERAND_POSTINDEX:
973 {
974 pre_index = ENCODE (0, 1, 24);
975 write_back = ENCODE (1, 1, 23);
976 break;
977 }
bb903df0
PL
978 case MEMORY_OPERAND_PREINDEX:
979 {
980 pre_index = ENCODE (1, 1, 24);
981 write_back = ENCODE (1, 1, 23);
982 break;
983 }
984 default:
985 return 0;
986 }
987
e1c587c3
YQ
988 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
989 | ENCODE (operand.index >> 3, 7, 15)
990 | ENCODE (rt2.num, 5, 10)
991 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
992}
993
afbe19f8
PL
994/* Write a STP instruction into *BUF.
995
996 STP rt, rt2, [rn, #offset]
997 STP rt, rt2, [rn, #index]!
998 STP rt, rt2, [rn], #index
999
1000 RT and RT2 are the registers to store.
1001 RN is the base address register.
1002 OFFSET is the immediate to add to the base address. It is limited to a
1003 -512 .. 504 range (7 bits << 3). */
1004
1005static int
1006emit_stp (uint32_t *buf, struct aarch64_register rt,
1007 struct aarch64_register rt2, struct aarch64_register rn,
1008 struct aarch64_memory_operand operand)
1009{
1010 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
1011}
1012
1013/* Write a LDP instruction into *BUF.
1014
1015 LDP rt, rt2, [rn, #offset]
1016 LDP rt, rt2, [rn, #index]!
1017 LDP rt, rt2, [rn], #index
1018
1019 RT and RT2 are the registers to store.
1020 RN is the base address register.
1021 OFFSET is the immediate to add to the base address. It is limited to a
1022 -512 .. 504 range (7 bits << 3). */
1023
1024static int
1025emit_ldp (uint32_t *buf, struct aarch64_register rt,
1026 struct aarch64_register rt2, struct aarch64_register rn,
1027 struct aarch64_memory_operand operand)
1028{
1029 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
1030}
1031
bb903df0
PL
1032/* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
1033
1034 LDP qt, qt2, [rn, #offset]
1035
1036 RT and RT2 are the Q registers to store.
1037 RN is the base address register.
1038 OFFSET is the immediate to add to the base address. It is limited to
1039 -1024 .. 1008 range (7 bits << 4). */
1040
1041static int
1042emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1043 struct aarch64_register rn, int32_t offset)
1044{
1045 uint32_t opc = ENCODE (2, 2, 30);
1046 uint32_t pre_index = ENCODE (1, 1, 24);
1047
e1c587c3
YQ
1048 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
1049 | ENCODE (offset >> 4, 7, 15)
1050 | ENCODE (rt2, 5, 10)
1051 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1052}
1053
1054/* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1055
1056 STP qt, qt2, [rn, #offset]
1057
1058 RT and RT2 are the Q registers to store.
1059 RN is the base address register.
1060 OFFSET is the immediate to add to the base address. It is limited to
1061 -1024 .. 1008 range (7 bits << 4). */
1062
1063static int
1064emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1065 struct aarch64_register rn, int32_t offset)
1066{
1067 uint32_t opc = ENCODE (2, 2, 30);
1068 uint32_t pre_index = ENCODE (1, 1, 24);
1069
e1c587c3 1070 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
b6542f81
YQ
1071 | ENCODE (offset >> 4, 7, 15)
1072 | ENCODE (rt2, 5, 10)
1073 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1074}
1075
afbe19f8
PL
1076/* Write a LDRH instruction into *BUF.
1077
1078 LDRH wt, [xn, #offset]
1079 LDRH wt, [xn, #index]!
1080 LDRH wt, [xn], #index
1081
1082 RT is the register to store.
1083 RN is the base address register.
1084 OFFSET is the immediate to add to the base address. It is limited to
1085 0 .. 32760 range (12 bits << 3). */
1086
1087static int
1088emit_ldrh (uint32_t *buf, struct aarch64_register rt,
1089 struct aarch64_register rn,
1090 struct aarch64_memory_operand operand)
1091{
1c2e1515 1092 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
afbe19f8
PL
1093}
1094
1095/* Write a LDRB instruction into *BUF.
1096
1097 LDRB wt, [xn, #offset]
1098 LDRB wt, [xn, #index]!
1099 LDRB wt, [xn], #index
1100
1101 RT is the register to store.
1102 RN is the base address register.
1103 OFFSET is the immediate to add to the base address. It is limited to
1104 0 .. 32760 range (12 bits << 3). */
1105
1106static int
1107emit_ldrb (uint32_t *buf, struct aarch64_register rt,
1108 struct aarch64_register rn,
1109 struct aarch64_memory_operand operand)
1110{
1c2e1515 1111 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
afbe19f8
PL
1112}
1113
bb903df0 1114
bb903df0
PL
1115
1116/* Write a STR instruction into *BUF.
1117
1118 STR rt, [rn, #offset]
1119 STR rt, [rn, #index]!
afbe19f8 1120 STR rt, [rn], #index
bb903df0
PL
1121
1122 RT is the register to store.
1123 RN is the base address register.
1124 OFFSET is the immediate to add to the base address. It is limited to
1125 0 .. 32760 range (12 bits << 3). */
1126
1127static int
1128emit_str (uint32_t *buf, struct aarch64_register rt,
1129 struct aarch64_register rn,
1130 struct aarch64_memory_operand operand)
1131{
1c2e1515 1132 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
bb903df0
PL
1133}
1134
1135/* Helper function emitting an exclusive load or store instruction. */
1136
1137static int
1138emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1139 enum aarch64_opcodes opcode,
1140 struct aarch64_register rs,
1141 struct aarch64_register rt,
1142 struct aarch64_register rt2,
1143 struct aarch64_register rn)
1144{
e1c587c3
YQ
1145 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1146 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1147 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
1148}
1149
1150/* Write a LAXR instruction into *BUF.
1151
1152 LDAXR rt, [xn]
1153
1154 RT is the destination register.
1155 RN is the base address register. */
1156
1157static int
1158emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1159 struct aarch64_register rn)
1160{
1161 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1162 xzr, rn);
1163}
1164
1165/* Write a STXR instruction into *BUF.
1166
1167 STXR ws, rt, [xn]
1168
1169 RS is the result register, it indicates if the store succeeded or not.
1170 RT is the destination register.
1171 RN is the base address register. */
1172
1173static int
1174emit_stxr (uint32_t *buf, struct aarch64_register rs,
1175 struct aarch64_register rt, struct aarch64_register rn)
1176{
1177 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1178 xzr, rn);
1179}
1180
1181/* Write a STLR instruction into *BUF.
1182
1183 STLR rt, [xn]
1184
1185 RT is the register to store.
1186 RN is the base address register. */
1187
1188static int
1189emit_stlr (uint32_t *buf, struct aarch64_register rt,
1190 struct aarch64_register rn)
1191{
1192 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1193 xzr, rn);
1194}
1195
1196/* Helper function for data processing instructions with register sources. */
1197
1198static int
231c0592 1199emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
bb903df0
PL
1200 struct aarch64_register rd,
1201 struct aarch64_register rn,
1202 struct aarch64_register rm)
1203{
1204 uint32_t size = ENCODE (rd.is64, 1, 31);
1205
e1c587c3
YQ
1206 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1207 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1208}
1209
1210/* Helper function for data processing instructions taking either a register
1211 or an immediate. */
1212
1213static int
1214emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1215 struct aarch64_register rd,
1216 struct aarch64_register rn,
1217 struct aarch64_operand operand)
1218{
1219 uint32_t size = ENCODE (rd.is64, 1, 31);
1220 /* The opcode is different for register and immediate source operands. */
1221 uint32_t operand_opcode;
1222
1223 if (operand.type == OPERAND_IMMEDIATE)
1224 {
1225 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1226 operand_opcode = ENCODE (8, 4, 25);
1227
e1c587c3
YQ
1228 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1229 | ENCODE (operand.imm, 12, 10)
1230 | ENCODE (rn.num, 5, 5)
1231 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1232 }
1233 else
1234 {
1235 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1236 operand_opcode = ENCODE (5, 4, 25);
1237
1238 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1239 rn, operand.reg);
1240 }
1241}
1242
1243/* Write an ADD instruction into *BUF.
1244
1245 ADD rd, rn, #imm
1246 ADD rd, rn, rm
1247
1248 This function handles both an immediate and register add.
1249
1250 RD is the destination register.
1251 RN is the input register.
1252 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1253 OPERAND_REGISTER. */
1254
1255static int
1256emit_add (uint32_t *buf, struct aarch64_register rd,
1257 struct aarch64_register rn, struct aarch64_operand operand)
1258{
1259 return emit_data_processing (buf, ADD, rd, rn, operand);
1260}
1261
1262/* Write a SUB instruction into *BUF.
1263
1264 SUB rd, rn, #imm
1265 SUB rd, rn, rm
1266
1267 This function handles both an immediate and register sub.
1268
1269 RD is the destination register.
1270 RN is the input register.
1271 IMM is the immediate to substract to RN. */
1272
1273static int
1274emit_sub (uint32_t *buf, struct aarch64_register rd,
1275 struct aarch64_register rn, struct aarch64_operand operand)
1276{
1277 return emit_data_processing (buf, SUB, rd, rn, operand);
1278}
1279
1280/* Write a MOV instruction into *BUF.
1281
1282 MOV rd, #imm
1283 MOV rd, rm
1284
1285 This function handles both a wide immediate move and a register move,
1286 with the condition that the source register is not xzr. xzr and the
1287 stack pointer share the same encoding and this function only supports
1288 the stack pointer.
1289
1290 RD is the destination register.
1291 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1292 OPERAND_REGISTER. */
1293
1294static int
1295emit_mov (uint32_t *buf, struct aarch64_register rd,
1296 struct aarch64_operand operand)
1297{
1298 if (operand.type == OPERAND_IMMEDIATE)
1299 {
1300 uint32_t size = ENCODE (rd.is64, 1, 31);
1301 /* Do not shift the immediate. */
1302 uint32_t shift = ENCODE (0, 2, 21);
1303
e1c587c3
YQ
1304 return aarch64_emit_insn (buf, MOV | size | shift
1305 | ENCODE (operand.imm, 16, 5)
1306 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1307 }
1308 else
1309 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1310}
1311
1312/* Write a MOVK instruction into *BUF.
1313
1314 MOVK rd, #imm, lsl #shift
1315
1316 RD is the destination register.
1317 IMM is the immediate.
1318 SHIFT is the logical shift left to apply to IMM. */
1319
1320static int
7781c06f
YQ
1321emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1322 unsigned shift)
bb903df0
PL
1323{
1324 uint32_t size = ENCODE (rd.is64, 1, 31);
1325
e1c587c3
YQ
1326 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1327 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1328}
1329
1330/* Write instructions into *BUF in order to move ADDR into a register.
1331 ADDR can be a 64-bit value.
1332
1333 This function will emit a series of MOV and MOVK instructions, such as:
1334
1335 MOV xd, #(addr)
1336 MOVK xd, #(addr >> 16), lsl #16
1337 MOVK xd, #(addr >> 32), lsl #32
1338 MOVK xd, #(addr >> 48), lsl #48 */
1339
1340static int
1341emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1342{
1343 uint32_t *p = buf;
1344
1345 /* The MOV (wide immediate) instruction clears to top bits of the
1346 register. */
1347 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1348
1349 if ((addr >> 16) != 0)
1350 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1351 else
1352 return p - buf;
1353
1354 if ((addr >> 32) != 0)
1355 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1356 else
1357 return p - buf;
1358
1359 if ((addr >> 48) != 0)
1360 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1361
1362 return p - buf;
1363}
1364
afbe19f8
PL
1365/* Write a SUBS instruction into *BUF.
1366
1367 SUBS rd, rn, rm
1368
1369 This instruction update the condition flags.
1370
1371 RD is the destination register.
1372 RN and RM are the source registers. */
1373
1374static int
1375emit_subs (uint32_t *buf, struct aarch64_register rd,
1376 struct aarch64_register rn, struct aarch64_operand operand)
1377{
1378 return emit_data_processing (buf, SUBS, rd, rn, operand);
1379}
1380
1381/* Write a CMP instruction into *BUF.
1382
1383 CMP rn, rm
1384
1385 This instruction is an alias of SUBS xzr, rn, rm.
1386
1387 RN and RM are the registers to compare. */
1388
1389static int
1390emit_cmp (uint32_t *buf, struct aarch64_register rn,
1391 struct aarch64_operand operand)
1392{
1393 return emit_subs (buf, xzr, rn, operand);
1394}
1395
1396/* Write a AND instruction into *BUF.
1397
1398 AND rd, rn, rm
1399
1400 RD is the destination register.
1401 RN and RM are the source registers. */
1402
1403static int
1404emit_and (uint32_t *buf, struct aarch64_register rd,
1405 struct aarch64_register rn, struct aarch64_register rm)
1406{
1407 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1408}
1409
1410/* Write a ORR instruction into *BUF.
1411
1412 ORR rd, rn, rm
1413
1414 RD is the destination register.
1415 RN and RM are the source registers. */
1416
1417static int
1418emit_orr (uint32_t *buf, struct aarch64_register rd,
1419 struct aarch64_register rn, struct aarch64_register rm)
1420{
1421 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1422}
1423
1424/* Write a ORN instruction into *BUF.
1425
1426 ORN rd, rn, rm
1427
1428 RD is the destination register.
1429 RN and RM are the source registers. */
1430
1431static int
1432emit_orn (uint32_t *buf, struct aarch64_register rd,
1433 struct aarch64_register rn, struct aarch64_register rm)
1434{
1435 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1436}
1437
1438/* Write a EOR instruction into *BUF.
1439
1440 EOR rd, rn, rm
1441
1442 RD is the destination register.
1443 RN and RM are the source registers. */
1444
1445static int
1446emit_eor (uint32_t *buf, struct aarch64_register rd,
1447 struct aarch64_register rn, struct aarch64_register rm)
1448{
1449 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1450}
1451
1452/* Write a MVN instruction into *BUF.
1453
1454 MVN rd, rm
1455
1456 This is an alias for ORN rd, xzr, rm.
1457
1458 RD is the destination register.
1459 RM is the source register. */
1460
1461static int
1462emit_mvn (uint32_t *buf, struct aarch64_register rd,
1463 struct aarch64_register rm)
1464{
1465 return emit_orn (buf, rd, xzr, rm);
1466}
1467
1468/* Write a LSLV instruction into *BUF.
1469
1470 LSLV rd, rn, rm
1471
1472 RD is the destination register.
1473 RN and RM are the source registers. */
1474
1475static int
1476emit_lslv (uint32_t *buf, struct aarch64_register rd,
1477 struct aarch64_register rn, struct aarch64_register rm)
1478{
1479 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1480}
1481
1482/* Write a LSRV instruction into *BUF.
1483
1484 LSRV rd, rn, rm
1485
1486 RD is the destination register.
1487 RN and RM are the source registers. */
1488
1489static int
1490emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1491 struct aarch64_register rn, struct aarch64_register rm)
1492{
1493 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1494}
1495
1496/* Write a ASRV instruction into *BUF.
1497
1498 ASRV rd, rn, rm
1499
1500 RD is the destination register.
1501 RN and RM are the source registers. */
1502
1503static int
1504emit_asrv (uint32_t *buf, struct aarch64_register rd,
1505 struct aarch64_register rn, struct aarch64_register rm)
1506{
1507 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1508}
1509
1510/* Write a MUL instruction into *BUF.
1511
1512 MUL rd, rn, rm
1513
1514 RD is the destination register.
1515 RN and RM are the source registers. */
1516
1517static int
1518emit_mul (uint32_t *buf, struct aarch64_register rd,
1519 struct aarch64_register rn, struct aarch64_register rm)
1520{
1521 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1522}
1523
bb903df0
PL
1524/* Write a MRS instruction into *BUF. The register size is 64-bit.
1525
1526 MRS xt, system_reg
1527
1528 RT is the destination register.
1529 SYSTEM_REG is special purpose register to read. */
1530
1531static int
1532emit_mrs (uint32_t *buf, struct aarch64_register rt,
1533 enum aarch64_system_control_registers system_reg)
1534{
e1c587c3
YQ
1535 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1536 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1537}
1538
1539/* Write a MSR instruction into *BUF. The register size is 64-bit.
1540
1541 MSR system_reg, xt
1542
1543 SYSTEM_REG is special purpose register to write.
1544 RT is the input register. */
1545
1546static int
1547emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1548 struct aarch64_register rt)
1549{
e1c587c3
YQ
1550 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1551 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1552}
1553
1554/* Write a SEVL instruction into *BUF.
1555
1556 This is a hint instruction telling the hardware to trigger an event. */
1557
1558static int
1559emit_sevl (uint32_t *buf)
1560{
e1c587c3 1561 return aarch64_emit_insn (buf, SEVL);
bb903df0
PL
1562}
1563
1564/* Write a WFE instruction into *BUF.
1565
1566 This is a hint instruction telling the hardware to wait for an event. */
1567
1568static int
1569emit_wfe (uint32_t *buf)
1570{
e1c587c3 1571 return aarch64_emit_insn (buf, WFE);
bb903df0
PL
1572}
1573
afbe19f8
PL
1574/* Write a SBFM instruction into *BUF.
1575
1576 SBFM rd, rn, #immr, #imms
1577
1578 This instruction moves the bits from #immr to #imms into the
1579 destination, sign extending the result.
1580
1581 RD is the destination register.
1582 RN is the source register.
1583 IMMR is the bit number to start at (least significant bit).
1584 IMMS is the bit number to stop at (most significant bit). */
1585
1586static int
1587emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1588 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1589{
1590 uint32_t size = ENCODE (rd.is64, 1, 31);
1591 uint32_t n = ENCODE (rd.is64, 1, 22);
1592
e1c587c3
YQ
1593 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1594 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1595 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1596}
1597
1598/* Write a SBFX instruction into *BUF.
1599
1600 SBFX rd, rn, #lsb, #width
1601
1602 This instruction moves #width bits from #lsb into the destination, sign
1603 extending the result. This is an alias for:
1604
1605 SBFM rd, rn, #lsb, #(lsb + width - 1)
1606
1607 RD is the destination register.
1608 RN is the source register.
1609 LSB is the bit number to start at (least significant bit).
1610 WIDTH is the number of bits to move. */
1611
1612static int
1613emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1614 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1615{
1616 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1617}
1618
1619/* Write a UBFM instruction into *BUF.
1620
1621 UBFM rd, rn, #immr, #imms
1622
1623 This instruction moves the bits from #immr to #imms into the
1624 destination, extending the result with zeros.
1625
1626 RD is the destination register.
1627 RN is the source register.
1628 IMMR is the bit number to start at (least significant bit).
1629 IMMS is the bit number to stop at (most significant bit). */
1630
1631static int
1632emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1633 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1634{
1635 uint32_t size = ENCODE (rd.is64, 1, 31);
1636 uint32_t n = ENCODE (rd.is64, 1, 22);
1637
e1c587c3
YQ
1638 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1639 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1640 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1641}
1642
1643/* Write a UBFX instruction into *BUF.
1644
1645 UBFX rd, rn, #lsb, #width
1646
1647 This instruction moves #width bits from #lsb into the destination,
1648 extending the result with zeros. This is an alias for:
1649
1650 UBFM rd, rn, #lsb, #(lsb + width - 1)
1651
1652 RD is the destination register.
1653 RN is the source register.
1654 LSB is the bit number to start at (least significant bit).
1655 WIDTH is the number of bits to move. */
1656
1657static int
1658emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1659 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1660{
1661 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1662}
1663
1664/* Write a CSINC instruction into *BUF.
1665
1666 CSINC rd, rn, rm, cond
1667
1668 This instruction conditionally increments rn or rm and places the result
1669 in rd. rn is chosen is the condition is true.
1670
1671 RD is the destination register.
1672 RN and RM are the source registers.
1673 COND is the encoded condition. */
1674
1675static int
1676emit_csinc (uint32_t *buf, struct aarch64_register rd,
1677 struct aarch64_register rn, struct aarch64_register rm,
1678 unsigned cond)
1679{
1680 uint32_t size = ENCODE (rd.is64, 1, 31);
1681
e1c587c3
YQ
1682 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1683 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1684 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1685}
1686
1687/* Write a CSET instruction into *BUF.
1688
1689 CSET rd, cond
1690
1691 This instruction conditionally write 1 or 0 in the destination register.
1692 1 is written if the condition is true. This is an alias for:
1693
1694 CSINC rd, xzr, xzr, !cond
1695
1696 Note that the condition needs to be inverted.
1697
1698 RD is the destination register.
1699 RN and RM are the source registers.
1700 COND is the encoded condition. */
1701
1702static int
1703emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1704{
1705 /* The least significant bit of the condition needs toggling in order to
1706 invert it. */
1707 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1708}
1709
bb903df0
PL
1710/* Write LEN instructions from BUF into the inferior memory at *TO.
1711
1712 Note instructions are always little endian on AArch64, unlike data. */
1713
1714static void
1715append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1716{
1717 size_t byte_len = len * sizeof (uint32_t);
1718#if (__BYTE_ORDER == __BIG_ENDIAN)
cb93dc7f 1719 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
bb903df0
PL
1720 size_t i;
1721
1722 for (i = 0; i < len; i++)
1723 le_buf[i] = htole32 (buf[i]);
1724
4196ab2a 1725 target_write_memory (*to, (const unsigned char *) le_buf, byte_len);
bb903df0
PL
1726
1727 xfree (le_buf);
1728#else
4196ab2a 1729 target_write_memory (*to, (const unsigned char *) buf, byte_len);
bb903df0
PL
1730#endif
1731
1732 *to += byte_len;
1733}
1734
0badd99f
YQ
1735/* Sub-class of struct aarch64_insn_data, store information of
1736 instruction relocation for fast tracepoint. Visitor can
1737 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1738 the relocated instructions in buffer pointed by INSN_PTR. */
bb903df0 1739
0badd99f
YQ
1740struct aarch64_insn_relocation_data
1741{
1742 struct aarch64_insn_data base;
1743
1744 /* The new address the instruction is relocated to. */
1745 CORE_ADDR new_addr;
1746 /* Pointer to the buffer of relocated instruction(s). */
1747 uint32_t *insn_ptr;
1748};
1749
1750/* Implementation of aarch64_insn_visitor method "b". */
1751
1752static void
1753aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1754 struct aarch64_insn_data *data)
1755{
1756 struct aarch64_insn_relocation_data *insn_reloc
1757 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1758 int64_t new_offset
0badd99f
YQ
1759 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1760
1761 if (can_encode_int32 (new_offset, 28))
1762 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1763}
1764
1765/* Implementation of aarch64_insn_visitor method "b_cond". */
1766
1767static void
1768aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1769 struct aarch64_insn_data *data)
1770{
1771 struct aarch64_insn_relocation_data *insn_reloc
1772 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1773 int64_t new_offset
0badd99f
YQ
1774 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1775
1776 if (can_encode_int32 (new_offset, 21))
1777 {
1778 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1779 new_offset);
bb903df0 1780 }
0badd99f 1781 else if (can_encode_int32 (new_offset, 28))
bb903df0 1782 {
0badd99f
YQ
1783 /* The offset is out of range for a conditional branch
1784 instruction but not for a unconditional branch. We can use
1785 the following instructions instead:
bb903df0 1786
0badd99f
YQ
1787 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1788 B NOT_TAKEN ; Else jump over TAKEN and continue.
1789 TAKEN:
1790 B #(offset - 8)
1791 NOT_TAKEN:
1792
1793 */
1794
1795 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1796 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1797 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
bb903df0 1798 }
0badd99f 1799}
bb903df0 1800
0badd99f
YQ
1801/* Implementation of aarch64_insn_visitor method "cb". */
1802
1803static void
1804aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1805 const unsigned rn, int is64,
1806 struct aarch64_insn_data *data)
1807{
1808 struct aarch64_insn_relocation_data *insn_reloc
1809 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1810 int64_t new_offset
0badd99f
YQ
1811 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1812
1813 if (can_encode_int32 (new_offset, 21))
1814 {
1815 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1816 aarch64_register (rn, is64), new_offset);
bb903df0 1817 }
0badd99f 1818 else if (can_encode_int32 (new_offset, 28))
bb903df0 1819 {
0badd99f
YQ
1820 /* The offset is out of range for a compare and branch
1821 instruction but not for a unconditional branch. We can use
1822 the following instructions instead:
1823
1824 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1825 B NOT_TAKEN ; Else jump over TAKEN and continue.
1826 TAKEN:
1827 B #(offset - 8)
1828 NOT_TAKEN:
1829
1830 */
1831 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1832 aarch64_register (rn, is64), 8);
1833 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1834 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1835 }
1836}
bb903df0 1837
0badd99f 1838/* Implementation of aarch64_insn_visitor method "tb". */
bb903df0 1839
0badd99f
YQ
1840static void
1841aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1842 const unsigned rt, unsigned bit,
1843 struct aarch64_insn_data *data)
1844{
1845 struct aarch64_insn_relocation_data *insn_reloc
1846 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1847 int64_t new_offset
0badd99f
YQ
1848 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1849
1850 if (can_encode_int32 (new_offset, 16))
1851 {
1852 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1853 aarch64_register (rt, 1), new_offset);
bb903df0 1854 }
0badd99f 1855 else if (can_encode_int32 (new_offset, 28))
bb903df0 1856 {
0badd99f
YQ
1857 /* The offset is out of range for a test bit and branch
1858 instruction but not for a unconditional branch. We can use
1859 the following instructions instead:
1860
1861 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1862 B NOT_TAKEN ; Else jump over TAKEN and continue.
1863 TAKEN:
1864 B #(offset - 8)
1865 NOT_TAKEN:
1866
1867 */
1868 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1869 aarch64_register (rt, 1), 8);
1870 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1871 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1872 new_offset - 8);
1873 }
1874}
bb903df0 1875
0badd99f 1876/* Implementation of aarch64_insn_visitor method "adr". */
bb903df0 1877
0badd99f
YQ
1878static void
1879aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1880 const int is_adrp,
1881 struct aarch64_insn_data *data)
1882{
1883 struct aarch64_insn_relocation_data *insn_reloc
1884 = (struct aarch64_insn_relocation_data *) data;
1885 /* We know exactly the address the ADR{P,} instruction will compute.
1886 We can just write it to the destination register. */
1887 CORE_ADDR address = data->insn_addr + offset;
bb903df0 1888
0badd99f
YQ
1889 if (is_adrp)
1890 {
1891 /* Clear the lower 12 bits of the offset to get the 4K page. */
1892 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1893 aarch64_register (rd, 1),
1894 address & ~0xfff);
1895 }
1896 else
1897 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1898 aarch64_register (rd, 1), address);
1899}
bb903df0 1900
0badd99f 1901/* Implementation of aarch64_insn_visitor method "ldr_literal". */
bb903df0 1902
0badd99f
YQ
1903static void
1904aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1905 const unsigned rt, const int is64,
1906 struct aarch64_insn_data *data)
1907{
1908 struct aarch64_insn_relocation_data *insn_reloc
1909 = (struct aarch64_insn_relocation_data *) data;
1910 CORE_ADDR address = data->insn_addr + offset;
1911
1912 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1913 aarch64_register (rt, 1), address);
1914
1915 /* We know exactly what address to load from, and what register we
1916 can use:
1917
1918 MOV xd, #(oldloc + offset)
1919 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1920 ...
1921
1922 LDR xd, [xd] ; or LDRSW xd, [xd]
1923
1924 */
1925
1926 if (is_sw)
1927 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1928 aarch64_register (rt, 1),
1929 aarch64_register (rt, 1),
1930 offset_memory_operand (0));
bb903df0 1931 else
0badd99f
YQ
1932 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1933 aarch64_register (rt, is64),
1934 aarch64_register (rt, 1),
1935 offset_memory_operand (0));
1936}
1937
1938/* Implementation of aarch64_insn_visitor method "others". */
1939
1940static void
1941aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1942 struct aarch64_insn_data *data)
1943{
1944 struct aarch64_insn_relocation_data *insn_reloc
1945 = (struct aarch64_insn_relocation_data *) data;
bb903df0 1946
0badd99f
YQ
1947 /* The instruction is not PC relative. Just re-emit it at the new
1948 location. */
e1c587c3 1949 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
0badd99f
YQ
1950}
1951
1952static const struct aarch64_insn_visitor visitor =
1953{
1954 aarch64_ftrace_insn_reloc_b,
1955 aarch64_ftrace_insn_reloc_b_cond,
1956 aarch64_ftrace_insn_reloc_cb,
1957 aarch64_ftrace_insn_reloc_tb,
1958 aarch64_ftrace_insn_reloc_adr,
1959 aarch64_ftrace_insn_reloc_ldr_literal,
1960 aarch64_ftrace_insn_reloc_others,
1961};
1962
bb903df0
PL
1963/* Implementation of linux_target_ops method
1964 "install_fast_tracepoint_jump_pad". */
1965
1966static int
1967aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1968 CORE_ADDR tpaddr,
1969 CORE_ADDR collector,
1970 CORE_ADDR lockaddr,
1971 ULONGEST orig_size,
1972 CORE_ADDR *jump_entry,
1973 CORE_ADDR *trampoline,
1974 ULONGEST *trampoline_size,
1975 unsigned char *jjump_pad_insn,
1976 ULONGEST *jjump_pad_insn_size,
1977 CORE_ADDR *adjusted_insn_addr,
1978 CORE_ADDR *adjusted_insn_addr_end,
1979 char *err)
1980{
1981 uint32_t buf[256];
1982 uint32_t *p = buf;
2ac09a5b 1983 int64_t offset;
bb903df0 1984 int i;
70b439f0 1985 uint32_t insn;
bb903df0 1986 CORE_ADDR buildaddr = *jump_entry;
0badd99f 1987 struct aarch64_insn_relocation_data insn_data;
bb903df0
PL
1988
1989 /* We need to save the current state on the stack both to restore it
1990 later and to collect register values when the tracepoint is hit.
1991
1992 The saved registers are pushed in a layout that needs to be in sync
1993 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1994 the supply_fast_tracepoint_registers function will fill in the
1995 register cache from a pointer to saved registers on the stack we build
1996 here.
1997
1998 For simplicity, we set the size of each cell on the stack to 16 bytes.
1999 This way one cell can hold any register type, from system registers
2000 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
2001 has to be 16 bytes aligned anyway.
2002
2003 Note that the CPSR register does not exist on AArch64. Instead we
2004 can access system bits describing the process state with the
2005 MRS/MSR instructions, namely the condition flags. We save them as
2006 if they are part of a CPSR register because that's how GDB
2007 interprets these system bits. At the moment, only the condition
2008 flags are saved in CPSR (NZCV).
2009
2010 Stack layout, each cell is 16 bytes (descending):
2011
2012 High *-------- SIMD&FP registers from 31 down to 0. --------*
2013 | q31 |
2014 . .
2015 . . 32 cells
2016 . .
2017 | q0 |
2018 *---- General purpose registers from 30 down to 0. ----*
2019 | x30 |
2020 . .
2021 . . 31 cells
2022 . .
2023 | x0 |
2024 *------------- Special purpose registers. -------------*
2025 | SP |
2026 | PC |
2027 | CPSR (NZCV) | 5 cells
2028 | FPSR |
2029 | FPCR | <- SP + 16
2030 *------------- collecting_t object --------------------*
2031 | TPIDR_EL0 | struct tracepoint * |
2032 Low *------------------------------------------------------*
2033
2034 After this stack is set up, we issue a call to the collector, passing
2035 it the saved registers at (SP + 16). */
2036
2037 /* Push SIMD&FP registers on the stack:
2038
2039 SUB sp, sp, #(32 * 16)
2040
2041 STP q30, q31, [sp, #(30 * 16)]
2042 ...
2043 STP q0, q1, [sp]
2044
2045 */
2046 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
2047 for (i = 30; i >= 0; i -= 2)
2048 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
2049
30baf67b 2050 /* Push general purpose registers on the stack. Note that we do not need
bb903df0
PL
2051 to push x31 as it represents the xzr register and not the stack
2052 pointer in a STR instruction.
2053
2054 SUB sp, sp, #(31 * 16)
2055
2056 STR x30, [sp, #(30 * 16)]
2057 ...
2058 STR x0, [sp]
2059
2060 */
2061 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
2062 for (i = 30; i >= 0; i -= 1)
2063 p += emit_str (p, aarch64_register (i, 1), sp,
2064 offset_memory_operand (i * 16));
2065
2066 /* Make space for 5 more cells.
2067
2068 SUB sp, sp, #(5 * 16)
2069
2070 */
2071 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
2072
2073
2074 /* Save SP:
2075
2076 ADD x4, sp, #((32 + 31 + 5) * 16)
2077 STR x4, [sp, #(4 * 16)]
2078
2079 */
2080 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
2081 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
2082
2083 /* Save PC (tracepoint address):
2084
2085 MOV x3, #(tpaddr)
2086 ...
2087
2088 STR x3, [sp, #(3 * 16)]
2089
2090 */
2091
2092 p += emit_mov_addr (p, x3, tpaddr);
2093 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
2094
2095 /* Save CPSR (NZCV), FPSR and FPCR:
2096
2097 MRS x2, nzcv
2098 MRS x1, fpsr
2099 MRS x0, fpcr
2100
2101 STR x2, [sp, #(2 * 16)]
2102 STR x1, [sp, #(1 * 16)]
2103 STR x0, [sp, #(0 * 16)]
2104
2105 */
2106 p += emit_mrs (p, x2, NZCV);
2107 p += emit_mrs (p, x1, FPSR);
2108 p += emit_mrs (p, x0, FPCR);
2109 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2110 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2111 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2112
2113 /* Push the collecting_t object. It consist of the address of the
2114 tracepoint and an ID for the current thread. We get the latter by
2115 reading the tpidr_el0 system register. It corresponds to the
2116 NT_ARM_TLS register accessible with ptrace.
2117
2118 MOV x0, #(tpoint)
2119 ...
2120
2121 MRS x1, tpidr_el0
2122
2123 STP x0, x1, [sp, #-16]!
2124
2125 */
2126
2127 p += emit_mov_addr (p, x0, tpoint);
2128 p += emit_mrs (p, x1, TPIDR_EL0);
2129 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2130
2131 /* Spin-lock:
2132
2133 The shared memory for the lock is at lockaddr. It will hold zero
2134 if no-one is holding the lock, otherwise it contains the address of
2135 the collecting_t object on the stack of the thread which acquired it.
2136
2137 At this stage, the stack pointer points to this thread's collecting_t
2138 object.
2139
2140 We use the following registers:
2141 - x0: Address of the lock.
2142 - x1: Pointer to collecting_t object.
2143 - x2: Scratch register.
2144
2145 MOV x0, #(lockaddr)
2146 ...
2147 MOV x1, sp
2148
2149 ; Trigger an event local to this core. So the following WFE
2150 ; instruction is ignored.
2151 SEVL
2152 again:
2153 ; Wait for an event. The event is triggered by either the SEVL
2154 ; or STLR instructions (store release).
2155 WFE
2156
2157 ; Atomically read at lockaddr. This marks the memory location as
2158 ; exclusive. This instruction also has memory constraints which
2159 ; make sure all previous data reads and writes are done before
2160 ; executing it.
2161 LDAXR x2, [x0]
2162
2163 ; Try again if another thread holds the lock.
2164 CBNZ x2, again
2165
2166 ; We can lock it! Write the address of the collecting_t object.
2167 ; This instruction will fail if the memory location is not marked
2168 ; as exclusive anymore. If it succeeds, it will remove the
2169 ; exclusive mark on the memory location. This way, if another
2170 ; thread executes this instruction before us, we will fail and try
2171 ; all over again.
2172 STXR w2, x1, [x0]
2173 CBNZ w2, again
2174
2175 */
2176
2177 p += emit_mov_addr (p, x0, lockaddr);
2178 p += emit_mov (p, x1, register_operand (sp));
2179
2180 p += emit_sevl (p);
2181 p += emit_wfe (p);
2182 p += emit_ldaxr (p, x2, x0);
2183 p += emit_cb (p, 1, w2, -2 * 4);
2184 p += emit_stxr (p, w2, x1, x0);
2185 p += emit_cb (p, 1, x2, -4 * 4);
2186
2187 /* Call collector (struct tracepoint *, unsigned char *):
2188
2189 MOV x0, #(tpoint)
2190 ...
2191
2192 ; Saved registers start after the collecting_t object.
2193 ADD x1, sp, #16
2194
2195 ; We use an intra-procedure-call scratch register.
2196 MOV ip0, #(collector)
2197 ...
2198
2199 ; And call back to C!
2200 BLR ip0
2201
2202 */
2203
2204 p += emit_mov_addr (p, x0, tpoint);
2205 p += emit_add (p, x1, sp, immediate_operand (16));
2206
2207 p += emit_mov_addr (p, ip0, collector);
2208 p += emit_blr (p, ip0);
2209
2210 /* Release the lock.
2211
2212 MOV x0, #(lockaddr)
2213 ...
2214
2215 ; This instruction is a normal store with memory ordering
2216 ; constraints. Thanks to this we do not have to put a data
2217 ; barrier instruction to make sure all data read and writes are done
30baf67b 2218 ; before this instruction is executed. Furthermore, this instruction
bb903df0
PL
2219 ; will trigger an event, letting other threads know they can grab
2220 ; the lock.
2221 STLR xzr, [x0]
2222
2223 */
2224 p += emit_mov_addr (p, x0, lockaddr);
2225 p += emit_stlr (p, xzr, x0);
2226
2227 /* Free collecting_t object:
2228
2229 ADD sp, sp, #16
2230
2231 */
2232 p += emit_add (p, sp, sp, immediate_operand (16));
2233
2234 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2235 registers from the stack.
2236
2237 LDR x2, [sp, #(2 * 16)]
2238 LDR x1, [sp, #(1 * 16)]
2239 LDR x0, [sp, #(0 * 16)]
2240
2241 MSR NZCV, x2
2242 MSR FPSR, x1
2243 MSR FPCR, x0
2244
2245 ADD sp, sp #(5 * 16)
2246
2247 */
2248 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2249 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2250 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2251 p += emit_msr (p, NZCV, x2);
2252 p += emit_msr (p, FPSR, x1);
2253 p += emit_msr (p, FPCR, x0);
2254
2255 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2256
2257 /* Pop general purpose registers:
2258
2259 LDR x0, [sp]
2260 ...
2261 LDR x30, [sp, #(30 * 16)]
2262
2263 ADD sp, sp, #(31 * 16)
2264
2265 */
2266 for (i = 0; i <= 30; i += 1)
2267 p += emit_ldr (p, aarch64_register (i, 1), sp,
2268 offset_memory_operand (i * 16));
2269 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2270
2271 /* Pop SIMD&FP registers:
2272
2273 LDP q0, q1, [sp]
2274 ...
2275 LDP q30, q31, [sp, #(30 * 16)]
2276
2277 ADD sp, sp, #(32 * 16)
2278
2279 */
2280 for (i = 0; i <= 30; i += 2)
2281 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2282 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2283
2284 /* Write the code into the inferior memory. */
2285 append_insns (&buildaddr, p - buf, buf);
2286
2287 /* Now emit the relocated instruction. */
2288 *adjusted_insn_addr = buildaddr;
70b439f0 2289 target_read_uint32 (tpaddr, &insn);
0badd99f
YQ
2290
2291 insn_data.base.insn_addr = tpaddr;
2292 insn_data.new_addr = buildaddr;
2293 insn_data.insn_ptr = buf;
2294
2295 aarch64_relocate_instruction (insn, &visitor,
2296 (struct aarch64_insn_data *) &insn_data);
2297
bb903df0 2298 /* We may not have been able to relocate the instruction. */
0badd99f 2299 if (insn_data.insn_ptr == buf)
bb903df0
PL
2300 {
2301 sprintf (err,
2302 "E.Could not relocate instruction from %s to %s.",
2303 core_addr_to_string_nz (tpaddr),
2304 core_addr_to_string_nz (buildaddr));
2305 return 1;
2306 }
dfaffe9d 2307 else
0badd99f 2308 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
dfaffe9d 2309 *adjusted_insn_addr_end = buildaddr;
bb903df0
PL
2310
2311 /* Go back to the start of the buffer. */
2312 p = buf;
2313
2314 /* Emit a branch back from the jump pad. */
2315 offset = (tpaddr + orig_size - buildaddr);
2316 if (!can_encode_int32 (offset, 28))
2317 {
2318 sprintf (err,
2319 "E.Jump back from jump pad too far from tracepoint "
2ac09a5b 2320 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2321 offset);
2322 return 1;
2323 }
2324
2325 p += emit_b (p, 0, offset);
2326 append_insns (&buildaddr, p - buf, buf);
2327
2328 /* Give the caller a branch instruction into the jump pad. */
2329 offset = (*jump_entry - tpaddr);
2330 if (!can_encode_int32 (offset, 28))
2331 {
2332 sprintf (err,
2333 "E.Jump pad too far from tracepoint "
2ac09a5b 2334 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2335 offset);
2336 return 1;
2337 }
2338
2339 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2340 *jjump_pad_insn_size = 4;
2341
2342 /* Return the end address of our pad. */
2343 *jump_entry = buildaddr;
2344
2345 return 0;
2346}
2347
afbe19f8
PL
2348/* Helper function writing LEN instructions from START into
2349 current_insn_ptr. */
2350
2351static void
2352emit_ops_insns (const uint32_t *start, int len)
2353{
2354 CORE_ADDR buildaddr = current_insn_ptr;
2355
2356 if (debug_threads)
2357 debug_printf ("Adding %d instrucions at %s\n",
2358 len, paddress (buildaddr));
2359
2360 append_insns (&buildaddr, len, start);
2361 current_insn_ptr = buildaddr;
2362}
2363
2364/* Pop a register from the stack. */
2365
2366static int
2367emit_pop (uint32_t *buf, struct aarch64_register rt)
2368{
2369 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2370}
2371
2372/* Push a register on the stack. */
2373
2374static int
2375emit_push (uint32_t *buf, struct aarch64_register rt)
2376{
2377 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2378}
2379
2380/* Implementation of emit_ops method "emit_prologue". */
2381
2382static void
2383aarch64_emit_prologue (void)
2384{
2385 uint32_t buf[16];
2386 uint32_t *p = buf;
2387
2388 /* This function emit a prologue for the following function prototype:
2389
2390 enum eval_result_type f (unsigned char *regs,
2391 ULONGEST *value);
2392
2393 The first argument is a buffer of raw registers. The second
2394 argument is the result of
2395 evaluating the expression, which will be set to whatever is on top of
2396 the stack at the end.
2397
2398 The stack set up by the prologue is as such:
2399
2400 High *------------------------------------------------------*
2401 | LR |
2402 | FP | <- FP
2403 | x1 (ULONGEST *value) |
2404 | x0 (unsigned char *regs) |
2405 Low *------------------------------------------------------*
2406
2407 As we are implementing a stack machine, each opcode can expand the
2408 stack so we never know how far we are from the data saved by this
2409 prologue. In order to be able refer to value and regs later, we save
2410 the current stack pointer in the frame pointer. This way, it is not
2411 clobbered when calling C functions.
2412
30baf67b 2413 Finally, throughout every operation, we are using register x0 as the
afbe19f8
PL
2414 top of the stack, and x1 as a scratch register. */
2415
2416 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2417 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2418 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2419
2420 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2421
2422
2423 emit_ops_insns (buf, p - buf);
2424}
2425
2426/* Implementation of emit_ops method "emit_epilogue". */
2427
2428static void
2429aarch64_emit_epilogue (void)
2430{
2431 uint32_t buf[16];
2432 uint32_t *p = buf;
2433
2434 /* Store the result of the expression (x0) in *value. */
2435 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2436 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2437 p += emit_str (p, x0, x1, offset_memory_operand (0));
2438
2439 /* Restore the previous state. */
2440 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2441 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2442
2443 /* Return expr_eval_no_error. */
2444 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2445 p += emit_ret (p, lr);
2446
2447 emit_ops_insns (buf, p - buf);
2448}
2449
2450/* Implementation of emit_ops method "emit_add". */
2451
2452static void
2453aarch64_emit_add (void)
2454{
2455 uint32_t buf[16];
2456 uint32_t *p = buf;
2457
2458 p += emit_pop (p, x1);
45e3745e 2459 p += emit_add (p, x0, x1, register_operand (x0));
afbe19f8
PL
2460
2461 emit_ops_insns (buf, p - buf);
2462}
2463
2464/* Implementation of emit_ops method "emit_sub". */
2465
2466static void
2467aarch64_emit_sub (void)
2468{
2469 uint32_t buf[16];
2470 uint32_t *p = buf;
2471
2472 p += emit_pop (p, x1);
45e3745e 2473 p += emit_sub (p, x0, x1, register_operand (x0));
afbe19f8
PL
2474
2475 emit_ops_insns (buf, p - buf);
2476}
2477
2478/* Implementation of emit_ops method "emit_mul". */
2479
2480static void
2481aarch64_emit_mul (void)
2482{
2483 uint32_t buf[16];
2484 uint32_t *p = buf;
2485
2486 p += emit_pop (p, x1);
2487 p += emit_mul (p, x0, x1, x0);
2488
2489 emit_ops_insns (buf, p - buf);
2490}
2491
2492/* Implementation of emit_ops method "emit_lsh". */
2493
2494static void
2495aarch64_emit_lsh (void)
2496{
2497 uint32_t buf[16];
2498 uint32_t *p = buf;
2499
2500 p += emit_pop (p, x1);
2501 p += emit_lslv (p, x0, x1, x0);
2502
2503 emit_ops_insns (buf, p - buf);
2504}
2505
2506/* Implementation of emit_ops method "emit_rsh_signed". */
2507
2508static void
2509aarch64_emit_rsh_signed (void)
2510{
2511 uint32_t buf[16];
2512 uint32_t *p = buf;
2513
2514 p += emit_pop (p, x1);
2515 p += emit_asrv (p, x0, x1, x0);
2516
2517 emit_ops_insns (buf, p - buf);
2518}
2519
2520/* Implementation of emit_ops method "emit_rsh_unsigned". */
2521
2522static void
2523aarch64_emit_rsh_unsigned (void)
2524{
2525 uint32_t buf[16];
2526 uint32_t *p = buf;
2527
2528 p += emit_pop (p, x1);
2529 p += emit_lsrv (p, x0, x1, x0);
2530
2531 emit_ops_insns (buf, p - buf);
2532}
2533
2534/* Implementation of emit_ops method "emit_ext". */
2535
2536static void
2537aarch64_emit_ext (int arg)
2538{
2539 uint32_t buf[16];
2540 uint32_t *p = buf;
2541
2542 p += emit_sbfx (p, x0, x0, 0, arg);
2543
2544 emit_ops_insns (buf, p - buf);
2545}
2546
2547/* Implementation of emit_ops method "emit_log_not". */
2548
2549static void
2550aarch64_emit_log_not (void)
2551{
2552 uint32_t buf[16];
2553 uint32_t *p = buf;
2554
2555 /* If the top of the stack is 0, replace it with 1. Else replace it with
2556 0. */
2557
2558 p += emit_cmp (p, x0, immediate_operand (0));
2559 p += emit_cset (p, x0, EQ);
2560
2561 emit_ops_insns (buf, p - buf);
2562}
2563
2564/* Implementation of emit_ops method "emit_bit_and". */
2565
2566static void
2567aarch64_emit_bit_and (void)
2568{
2569 uint32_t buf[16];
2570 uint32_t *p = buf;
2571
2572 p += emit_pop (p, x1);
2573 p += emit_and (p, x0, x0, x1);
2574
2575 emit_ops_insns (buf, p - buf);
2576}
2577
2578/* Implementation of emit_ops method "emit_bit_or". */
2579
2580static void
2581aarch64_emit_bit_or (void)
2582{
2583 uint32_t buf[16];
2584 uint32_t *p = buf;
2585
2586 p += emit_pop (p, x1);
2587 p += emit_orr (p, x0, x0, x1);
2588
2589 emit_ops_insns (buf, p - buf);
2590}
2591
2592/* Implementation of emit_ops method "emit_bit_xor". */
2593
2594static void
2595aarch64_emit_bit_xor (void)
2596{
2597 uint32_t buf[16];
2598 uint32_t *p = buf;
2599
2600 p += emit_pop (p, x1);
2601 p += emit_eor (p, x0, x0, x1);
2602
2603 emit_ops_insns (buf, p - buf);
2604}
2605
2606/* Implementation of emit_ops method "emit_bit_not". */
2607
2608static void
2609aarch64_emit_bit_not (void)
2610{
2611 uint32_t buf[16];
2612 uint32_t *p = buf;
2613
2614 p += emit_mvn (p, x0, x0);
2615
2616 emit_ops_insns (buf, p - buf);
2617}
2618
2619/* Implementation of emit_ops method "emit_equal". */
2620
2621static void
2622aarch64_emit_equal (void)
2623{
2624 uint32_t buf[16];
2625 uint32_t *p = buf;
2626
2627 p += emit_pop (p, x1);
2628 p += emit_cmp (p, x0, register_operand (x1));
2629 p += emit_cset (p, x0, EQ);
2630
2631 emit_ops_insns (buf, p - buf);
2632}
2633
2634/* Implementation of emit_ops method "emit_less_signed". */
2635
2636static void
2637aarch64_emit_less_signed (void)
2638{
2639 uint32_t buf[16];
2640 uint32_t *p = buf;
2641
2642 p += emit_pop (p, x1);
2643 p += emit_cmp (p, x1, register_operand (x0));
2644 p += emit_cset (p, x0, LT);
2645
2646 emit_ops_insns (buf, p - buf);
2647}
2648
2649/* Implementation of emit_ops method "emit_less_unsigned". */
2650
2651static void
2652aarch64_emit_less_unsigned (void)
2653{
2654 uint32_t buf[16];
2655 uint32_t *p = buf;
2656
2657 p += emit_pop (p, x1);
2658 p += emit_cmp (p, x1, register_operand (x0));
2659 p += emit_cset (p, x0, LO);
2660
2661 emit_ops_insns (buf, p - buf);
2662}
2663
2664/* Implementation of emit_ops method "emit_ref". */
2665
2666static void
2667aarch64_emit_ref (int size)
2668{
2669 uint32_t buf[16];
2670 uint32_t *p = buf;
2671
2672 switch (size)
2673 {
2674 case 1:
2675 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2676 break;
2677 case 2:
2678 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2679 break;
2680 case 4:
2681 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2682 break;
2683 case 8:
2684 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2685 break;
2686 default:
2687 /* Unknown size, bail on compilation. */
2688 emit_error = 1;
2689 break;
2690 }
2691
2692 emit_ops_insns (buf, p - buf);
2693}
2694
2695/* Implementation of emit_ops method "emit_if_goto". */
2696
2697static void
2698aarch64_emit_if_goto (int *offset_p, int *size_p)
2699{
2700 uint32_t buf[16];
2701 uint32_t *p = buf;
2702
2703 /* The Z flag is set or cleared here. */
2704 p += emit_cmp (p, x0, immediate_operand (0));
2705 /* This instruction must not change the Z flag. */
2706 p += emit_pop (p, x0);
2707 /* Branch over the next instruction if x0 == 0. */
2708 p += emit_bcond (p, EQ, 8);
2709
2710 /* The NOP instruction will be patched with an unconditional branch. */
2711 if (offset_p)
2712 *offset_p = (p - buf) * 4;
2713 if (size_p)
2714 *size_p = 4;
2715 p += emit_nop (p);
2716
2717 emit_ops_insns (buf, p - buf);
2718}
2719
2720/* Implementation of emit_ops method "emit_goto". */
2721
2722static void
2723aarch64_emit_goto (int *offset_p, int *size_p)
2724{
2725 uint32_t buf[16];
2726 uint32_t *p = buf;
2727
2728 /* The NOP instruction will be patched with an unconditional branch. */
2729 if (offset_p)
2730 *offset_p = 0;
2731 if (size_p)
2732 *size_p = 4;
2733 p += emit_nop (p);
2734
2735 emit_ops_insns (buf, p - buf);
2736}
2737
2738/* Implementation of emit_ops method "write_goto_address". */
2739
bb1183e2 2740static void
afbe19f8
PL
2741aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2742{
2743 uint32_t insn;
2744
2745 emit_b (&insn, 0, to - from);
2746 append_insns (&from, 1, &insn);
2747}
2748
2749/* Implementation of emit_ops method "emit_const". */
2750
2751static void
2752aarch64_emit_const (LONGEST num)
2753{
2754 uint32_t buf[16];
2755 uint32_t *p = buf;
2756
2757 p += emit_mov_addr (p, x0, num);
2758
2759 emit_ops_insns (buf, p - buf);
2760}
2761
2762/* Implementation of emit_ops method "emit_call". */
2763
2764static void
2765aarch64_emit_call (CORE_ADDR fn)
2766{
2767 uint32_t buf[16];
2768 uint32_t *p = buf;
2769
2770 p += emit_mov_addr (p, ip0, fn);
2771 p += emit_blr (p, ip0);
2772
2773 emit_ops_insns (buf, p - buf);
2774}
2775
2776/* Implementation of emit_ops method "emit_reg". */
2777
2778static void
2779aarch64_emit_reg (int reg)
2780{
2781 uint32_t buf[16];
2782 uint32_t *p = buf;
2783
2784 /* Set x0 to unsigned char *regs. */
2785 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2786 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2787 p += emit_mov (p, x1, immediate_operand (reg));
2788
2789 emit_ops_insns (buf, p - buf);
2790
2791 aarch64_emit_call (get_raw_reg_func_addr ());
2792}
2793
2794/* Implementation of emit_ops method "emit_pop". */
2795
2796static void
2797aarch64_emit_pop (void)
2798{
2799 uint32_t buf[16];
2800 uint32_t *p = buf;
2801
2802 p += emit_pop (p, x0);
2803
2804 emit_ops_insns (buf, p - buf);
2805}
2806
2807/* Implementation of emit_ops method "emit_stack_flush". */
2808
2809static void
2810aarch64_emit_stack_flush (void)
2811{
2812 uint32_t buf[16];
2813 uint32_t *p = buf;
2814
2815 p += emit_push (p, x0);
2816
2817 emit_ops_insns (buf, p - buf);
2818}
2819
2820/* Implementation of emit_ops method "emit_zero_ext". */
2821
2822static void
2823aarch64_emit_zero_ext (int arg)
2824{
2825 uint32_t buf[16];
2826 uint32_t *p = buf;
2827
2828 p += emit_ubfx (p, x0, x0, 0, arg);
2829
2830 emit_ops_insns (buf, p - buf);
2831}
2832
2833/* Implementation of emit_ops method "emit_swap". */
2834
2835static void
2836aarch64_emit_swap (void)
2837{
2838 uint32_t buf[16];
2839 uint32_t *p = buf;
2840
2841 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2842 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2843 p += emit_mov (p, x0, register_operand (x1));
2844
2845 emit_ops_insns (buf, p - buf);
2846}
2847
2848/* Implementation of emit_ops method "emit_stack_adjust". */
2849
2850static void
2851aarch64_emit_stack_adjust (int n)
2852{
2853 /* This is not needed with our design. */
2854 uint32_t buf[16];
2855 uint32_t *p = buf;
2856
2857 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2858
2859 emit_ops_insns (buf, p - buf);
2860}
2861
2862/* Implementation of emit_ops method "emit_int_call_1". */
2863
2864static void
2865aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2866{
2867 uint32_t buf[16];
2868 uint32_t *p = buf;
2869
2870 p += emit_mov (p, x0, immediate_operand (arg1));
2871
2872 emit_ops_insns (buf, p - buf);
2873
2874 aarch64_emit_call (fn);
2875}
2876
2877/* Implementation of emit_ops method "emit_void_call_2". */
2878
2879static void
2880aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2881{
2882 uint32_t buf[16];
2883 uint32_t *p = buf;
2884
2885 /* Push x0 on the stack. */
2886 aarch64_emit_stack_flush ();
2887
2888 /* Setup arguments for the function call:
2889
2890 x0: arg1
2891 x1: top of the stack
2892
2893 MOV x1, x0
2894 MOV x0, #arg1 */
2895
2896 p += emit_mov (p, x1, register_operand (x0));
2897 p += emit_mov (p, x0, immediate_operand (arg1));
2898
2899 emit_ops_insns (buf, p - buf);
2900
2901 aarch64_emit_call (fn);
2902
2903 /* Restore x0. */
2904 aarch64_emit_pop ();
2905}
2906
2907/* Implementation of emit_ops method "emit_eq_goto". */
2908
2909static void
2910aarch64_emit_eq_goto (int *offset_p, int *size_p)
2911{
2912 uint32_t buf[16];
2913 uint32_t *p = buf;
2914
2915 p += emit_pop (p, x1);
2916 p += emit_cmp (p, x1, register_operand (x0));
2917 /* Branch over the next instruction if x0 != x1. */
2918 p += emit_bcond (p, NE, 8);
2919 /* The NOP instruction will be patched with an unconditional branch. */
2920 if (offset_p)
2921 *offset_p = (p - buf) * 4;
2922 if (size_p)
2923 *size_p = 4;
2924 p += emit_nop (p);
2925
2926 emit_ops_insns (buf, p - buf);
2927}
2928
2929/* Implementation of emit_ops method "emit_ne_goto". */
2930
2931static void
2932aarch64_emit_ne_goto (int *offset_p, int *size_p)
2933{
2934 uint32_t buf[16];
2935 uint32_t *p = buf;
2936
2937 p += emit_pop (p, x1);
2938 p += emit_cmp (p, x1, register_operand (x0));
2939 /* Branch over the next instruction if x0 == x1. */
2940 p += emit_bcond (p, EQ, 8);
2941 /* The NOP instruction will be patched with an unconditional branch. */
2942 if (offset_p)
2943 *offset_p = (p - buf) * 4;
2944 if (size_p)
2945 *size_p = 4;
2946 p += emit_nop (p);
2947
2948 emit_ops_insns (buf, p - buf);
2949}
2950
2951/* Implementation of emit_ops method "emit_lt_goto". */
2952
2953static void
2954aarch64_emit_lt_goto (int *offset_p, int *size_p)
2955{
2956 uint32_t buf[16];
2957 uint32_t *p = buf;
2958
2959 p += emit_pop (p, x1);
2960 p += emit_cmp (p, x1, register_operand (x0));
2961 /* Branch over the next instruction if x0 >= x1. */
2962 p += emit_bcond (p, GE, 8);
2963 /* The NOP instruction will be patched with an unconditional branch. */
2964 if (offset_p)
2965 *offset_p = (p - buf) * 4;
2966 if (size_p)
2967 *size_p = 4;
2968 p += emit_nop (p);
2969
2970 emit_ops_insns (buf, p - buf);
2971}
2972
2973/* Implementation of emit_ops method "emit_le_goto". */
2974
2975static void
2976aarch64_emit_le_goto (int *offset_p, int *size_p)
2977{
2978 uint32_t buf[16];
2979 uint32_t *p = buf;
2980
2981 p += emit_pop (p, x1);
2982 p += emit_cmp (p, x1, register_operand (x0));
2983 /* Branch over the next instruction if x0 > x1. */
2984 p += emit_bcond (p, GT, 8);
2985 /* The NOP instruction will be patched with an unconditional branch. */
2986 if (offset_p)
2987 *offset_p = (p - buf) * 4;
2988 if (size_p)
2989 *size_p = 4;
2990 p += emit_nop (p);
2991
2992 emit_ops_insns (buf, p - buf);
2993}
2994
2995/* Implementation of emit_ops method "emit_gt_goto". */
2996
2997static void
2998aarch64_emit_gt_goto (int *offset_p, int *size_p)
2999{
3000 uint32_t buf[16];
3001 uint32_t *p = buf;
3002
3003 p += emit_pop (p, x1);
3004 p += emit_cmp (p, x1, register_operand (x0));
3005 /* Branch over the next instruction if x0 <= x1. */
3006 p += emit_bcond (p, LE, 8);
3007 /* The NOP instruction will be patched with an unconditional branch. */
3008 if (offset_p)
3009 *offset_p = (p - buf) * 4;
3010 if (size_p)
3011 *size_p = 4;
3012 p += emit_nop (p);
3013
3014 emit_ops_insns (buf, p - buf);
3015}
3016
3017/* Implementation of emit_ops method "emit_ge_got". */
3018
3019static void
3020aarch64_emit_ge_got (int *offset_p, int *size_p)
3021{
3022 uint32_t buf[16];
3023 uint32_t *p = buf;
3024
3025 p += emit_pop (p, x1);
3026 p += emit_cmp (p, x1, register_operand (x0));
3027 /* Branch over the next instruction if x0 <= x1. */
3028 p += emit_bcond (p, LT, 8);
3029 /* The NOP instruction will be patched with an unconditional branch. */
3030 if (offset_p)
3031 *offset_p = (p - buf) * 4;
3032 if (size_p)
3033 *size_p = 4;
3034 p += emit_nop (p);
3035
3036 emit_ops_insns (buf, p - buf);
3037}
3038
3039static struct emit_ops aarch64_emit_ops_impl =
3040{
3041 aarch64_emit_prologue,
3042 aarch64_emit_epilogue,
3043 aarch64_emit_add,
3044 aarch64_emit_sub,
3045 aarch64_emit_mul,
3046 aarch64_emit_lsh,
3047 aarch64_emit_rsh_signed,
3048 aarch64_emit_rsh_unsigned,
3049 aarch64_emit_ext,
3050 aarch64_emit_log_not,
3051 aarch64_emit_bit_and,
3052 aarch64_emit_bit_or,
3053 aarch64_emit_bit_xor,
3054 aarch64_emit_bit_not,
3055 aarch64_emit_equal,
3056 aarch64_emit_less_signed,
3057 aarch64_emit_less_unsigned,
3058 aarch64_emit_ref,
3059 aarch64_emit_if_goto,
3060 aarch64_emit_goto,
3061 aarch64_write_goto_address,
3062 aarch64_emit_const,
3063 aarch64_emit_call,
3064 aarch64_emit_reg,
3065 aarch64_emit_pop,
3066 aarch64_emit_stack_flush,
3067 aarch64_emit_zero_ext,
3068 aarch64_emit_swap,
3069 aarch64_emit_stack_adjust,
3070 aarch64_emit_int_call_1,
3071 aarch64_emit_void_call_2,
3072 aarch64_emit_eq_goto,
3073 aarch64_emit_ne_goto,
3074 aarch64_emit_lt_goto,
3075 aarch64_emit_le_goto,
3076 aarch64_emit_gt_goto,
3077 aarch64_emit_ge_got,
3078};
3079
3080/* Implementation of linux_target_ops method "emit_ops". */
3081
3082static struct emit_ops *
3083aarch64_emit_ops (void)
3084{
3085 return &aarch64_emit_ops_impl;
3086}
3087
bb903df0
PL
3088/* Implementation of linux_target_ops method
3089 "get_min_fast_tracepoint_insn_len". */
3090
3091static int
3092aarch64_get_min_fast_tracepoint_insn_len (void)
3093{
3094 return 4;
3095}
3096
d1d0aea1
PL
3097/* Implementation of linux_target_ops method "supports_range_stepping". */
3098
3099static int
3100aarch64_supports_range_stepping (void)
3101{
3102 return 1;
3103}
3104
3ca4edb6 3105/* Implementation of target ops method "sw_breakpoint_from_kind". */
dd373349 3106
3ca4edb6
TBA
3107const gdb_byte *
3108aarch64_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349 3109{
17b1509a
YQ
3110 if (is_64bit_tdesc ())
3111 {
3112 *size = aarch64_breakpoint_len;
3113 return aarch64_breakpoint;
3114 }
3115 else
3116 return arm_sw_breakpoint_from_kind (kind, size);
3117}
3118
06250e4e 3119/* Implementation of target ops method "breakpoint_kind_from_pc". */
17b1509a 3120
06250e4e
TBA
3121int
3122aarch64_target::breakpoint_kind_from_pc (CORE_ADDR *pcptr)
17b1509a
YQ
3123{
3124 if (is_64bit_tdesc ())
3125 return aarch64_breakpoint_len;
3126 else
3127 return arm_breakpoint_kind_from_pc (pcptr);
3128}
3129
06250e4e 3130/* Implementation of the target ops method
17b1509a
YQ
3131 "breakpoint_kind_from_current_state". */
3132
06250e4e
TBA
3133int
3134aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
17b1509a
YQ
3135{
3136 if (is_64bit_tdesc ())
3137 return aarch64_breakpoint_len;
3138 else
3139 return arm_breakpoint_kind_from_current_state (pcptr);
dd373349
AT
3140}
3141
7d00775e
AT
3142/* Support for hardware single step. */
3143
3144static int
3145aarch64_supports_hardware_single_step (void)
3146{
3147 return 1;
3148}
3149
176eb98c
MS
3150struct linux_target_ops the_low_target =
3151{
bb903df0
PL
3152 aarch64_get_thread_area,
3153 aarch64_install_fast_tracepoint_jump_pad,
afbe19f8 3154 aarch64_emit_ops,
bb903df0 3155 aarch64_get_min_fast_tracepoint_insn_len,
d1d0aea1 3156 aarch64_supports_range_stepping,
7d00775e 3157 aarch64_supports_hardware_single_step,
061fc021 3158 aarch64_get_syscall_trapinfo,
176eb98c 3159};
3aee8918 3160
ef0478f6
TBA
3161/* The linux target ops object. */
3162
3163linux_process_target *the_linux_target = &the_aarch64_target;
3164
3aee8918
PA
3165void
3166initialize_low_arch (void)
3167{
3b53ae99
YQ
3168 initialize_low_arch_aarch32 ();
3169
3aee8918 3170 initialize_regsets_info (&aarch64_regsets_info);
02895270 3171 initialize_regsets_info (&aarch64_sve_regsets_info);
3aee8918 3172}
This page took 0.975599 seconds and 4 git commands to generate.