gdbserver/linux-low: turn 'insert_point' and 'remove_point' into methods
[deliverable/binutils-gdb.git] / gdbserver / linux-aarch64-low.cc
CommitLineData
176eb98c
MS
1/* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
b811d2c2 4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
176eb98c
MS
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "server.h"
23#include "linux-low.h"
db3cb7cb 24#include "nat/aarch64-linux.h"
554717a3 25#include "nat/aarch64-linux-hw-point.h"
bb903df0 26#include "arch/aarch64-insn.h"
3b53ae99 27#include "linux-aarch32-low.h"
176eb98c 28#include "elf/common.h"
afbe19f8
PL
29#include "ax.h"
30#include "tracepoint.h"
f9d949fb 31#include "debug.h"
176eb98c
MS
32
33#include <signal.h>
34#include <sys/user.h>
5826e159 35#include "nat/gdb_ptrace.h"
e9dae05e 36#include <asm/ptrace.h>
bb903df0
PL
37#include <inttypes.h>
38#include <endian.h>
39#include <sys/uio.h>
176eb98c
MS
40
41#include "gdb_proc_service.h"
cc628f3d 42#include "arch/aarch64.h"
7cc17433 43#include "linux-aarch32-tdesc.h"
d6d7ce56 44#include "linux-aarch64-tdesc.h"
fefa175e 45#include "nat/aarch64-sve-linux-ptrace.h"
02895270 46#include "tdesc.h"
176eb98c 47
176eb98c
MS
48#ifdef HAVE_SYS_REG_H
49#include <sys/reg.h>
50#endif
51
ef0478f6
TBA
52/* Linux target op definitions for the AArch64 architecture. */
53
54class aarch64_target : public linux_process_target
55{
56public:
57
aa8d21c9
TBA
58 const regs_info *get_regs_info () override;
59
06250e4e
TBA
60 int breakpoint_kind_from_pc (CORE_ADDR *pcptr) override;
61
62 int breakpoint_kind_from_current_state (CORE_ADDR *pcptr) override;
63
3ca4edb6
TBA
64 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
65
007c9b97
TBA
66 bool supports_z_point_type (char z_type) override;
67
797bcff5
TBA
68protected:
69
70 void low_arch_setup () override;
daca57a7
TBA
71
72 bool low_cannot_fetch_register (int regno) override;
73
74 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
75
76 bool low_supports_breakpoints () override;
77
78 CORE_ADDR low_get_pc (regcache *regcache) override;
79
80 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
d7146cda
TBA
81
82 bool low_breakpoint_at (CORE_ADDR pc) override;
9db9aa23
TBA
83
84 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
85 int size, raw_breakpoint *bp) override;
86
87 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
88 int size, raw_breakpoint *bp) override;
ef0478f6
TBA
89};
90
91/* The singleton target ops object. */
92
93static aarch64_target the_aarch64_target;
94
daca57a7
TBA
95bool
96aarch64_target::low_cannot_fetch_register (int regno)
97{
98 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
99 "is not implemented by the target");
100}
101
102bool
103aarch64_target::low_cannot_store_register (int regno)
104{
105 gdb_assert_not_reached ("linux target op low_cannot_store_register "
106 "is not implemented by the target");
107}
108
176eb98c
MS
109/* Per-process arch-specific data we want to keep. */
110
111struct arch_process_info
112{
113 /* Hardware breakpoint/watchpoint data.
114 The reason for them to be per-process rather than per-thread is
115 due to the lack of information in the gdbserver environment;
116 gdbserver is not told that whether a requested hardware
117 breakpoint/watchpoint is thread specific or not, so it has to set
118 each hw bp/wp for every thread in the current process. The
119 higher level bp/wp management in gdb will resume a thread if a hw
120 bp/wp trap is not expected for it. Since the hw bp/wp setting is
121 same for each thread, it is reasonable for the data to live here.
122 */
123 struct aarch64_debug_reg_state debug_reg_state;
124};
125
3b53ae99
YQ
126/* Return true if the size of register 0 is 8 byte. */
127
128static int
129is_64bit_tdesc (void)
130{
131 struct regcache *regcache = get_thread_regcache (current_thread, 0);
132
133 return register_size (regcache->tdesc, 0) == 8;
134}
135
02895270
AH
136/* Return true if the regcache contains the number of SVE registers. */
137
138static bool
139is_sve_tdesc (void)
140{
141 struct regcache *regcache = get_thread_regcache (current_thread, 0);
142
6cdd651f 143 return tdesc_contains_feature (regcache->tdesc, "org.gnu.gdb.aarch64.sve");
02895270
AH
144}
145
176eb98c
MS
146static void
147aarch64_fill_gregset (struct regcache *regcache, void *buf)
148{
6a69a054 149 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
176eb98c
MS
150 int i;
151
152 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
153 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
154 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
155 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
156 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
157}
158
159static void
160aarch64_store_gregset (struct regcache *regcache, const void *buf)
161{
6a69a054 162 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
176eb98c
MS
163 int i;
164
165 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
166 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
167 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
168 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
169 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
170}
171
172static void
173aarch64_fill_fpregset (struct regcache *regcache, void *buf)
174{
9caa3311 175 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
176eb98c
MS
176 int i;
177
178 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
179 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
180 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
181 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
182}
183
184static void
185aarch64_store_fpregset (struct regcache *regcache, const void *buf)
186{
9caa3311
YQ
187 const struct user_fpsimd_state *regset
188 = (const struct user_fpsimd_state *) buf;
176eb98c
MS
189 int i;
190
191 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
192 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
193 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
194 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
195}
196
1ef53e6b
AH
197/* Store the pauth registers to regcache. */
198
199static void
200aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
201{
202 uint64_t *pauth_regset = (uint64_t *) buf;
203 int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
204
205 if (pauth_base == 0)
206 return;
207
208 supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
209 &pauth_regset[0]);
210 supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
211 &pauth_regset[1]);
212}
213
bf9ae9d8
TBA
214bool
215aarch64_target::low_supports_breakpoints ()
216{
217 return true;
218}
219
220/* Implementation of linux target ops method "low_get_pc". */
421530db 221
bf9ae9d8
TBA
222CORE_ADDR
223aarch64_target::low_get_pc (regcache *regcache)
176eb98c 224{
8a7e4587 225 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 226 return linux_get_pc_64bit (regcache);
8a7e4587 227 else
a5652c21 228 return linux_get_pc_32bit (regcache);
176eb98c
MS
229}
230
bf9ae9d8 231/* Implementation of linux target ops method "low_set_pc". */
421530db 232
bf9ae9d8
TBA
233void
234aarch64_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
176eb98c 235{
8a7e4587 236 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 237 linux_set_pc_64bit (regcache, pc);
8a7e4587 238 else
a5652c21 239 linux_set_pc_32bit (regcache, pc);
176eb98c
MS
240}
241
176eb98c
MS
242#define aarch64_breakpoint_len 4
243
37d66942
PL
244/* AArch64 BRK software debug mode instruction.
245 This instruction needs to match gdb/aarch64-tdep.c
246 (aarch64_default_breakpoint). */
247static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
176eb98c 248
d7146cda 249/* Implementation of linux target ops method "low_breakpoint_at". */
421530db 250
d7146cda
TBA
251bool
252aarch64_target::low_breakpoint_at (CORE_ADDR where)
176eb98c 253{
db91f502
YQ
254 if (is_64bit_tdesc ())
255 {
256 gdb_byte insn[aarch64_breakpoint_len];
176eb98c 257
d7146cda 258 read_memory (where, (unsigned char *) &insn, aarch64_breakpoint_len);
db91f502 259 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
d7146cda 260 return true;
176eb98c 261
d7146cda 262 return false;
db91f502
YQ
263 }
264 else
265 return arm_breakpoint_at (where);
176eb98c
MS
266}
267
176eb98c
MS
268static void
269aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
270{
271 int i;
272
273 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
274 {
275 state->dr_addr_bp[i] = 0;
276 state->dr_ctrl_bp[i] = 0;
277 state->dr_ref_count_bp[i] = 0;
278 }
279
280 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
281 {
282 state->dr_addr_wp[i] = 0;
283 state->dr_ctrl_wp[i] = 0;
284 state->dr_ref_count_wp[i] = 0;
285 }
286}
287
176eb98c
MS
288/* Return the pointer to the debug register state structure in the
289 current process' arch-specific data area. */
290
db3cb7cb 291struct aarch64_debug_reg_state *
88e2cf7e 292aarch64_get_debug_reg_state (pid_t pid)
176eb98c 293{
88e2cf7e 294 struct process_info *proc = find_process_pid (pid);
176eb98c 295
fe978cb0 296 return &proc->priv->arch_private->debug_reg_state;
176eb98c
MS
297}
298
007c9b97 299/* Implementation of target ops method "supports_z_point_type". */
421530db 300
007c9b97
TBA
301bool
302aarch64_target::supports_z_point_type (char z_type)
4ff0d3d8
PA
303{
304 switch (z_type)
305 {
96c97461 306 case Z_PACKET_SW_BP:
4ff0d3d8
PA
307 case Z_PACKET_HW_BP:
308 case Z_PACKET_WRITE_WP:
309 case Z_PACKET_READ_WP:
310 case Z_PACKET_ACCESS_WP:
007c9b97 311 return true;
4ff0d3d8 312 default:
007c9b97 313 return false;
4ff0d3d8
PA
314 }
315}
316
9db9aa23 317/* Implementation of linux target ops method "low_insert_point".
176eb98c 318
421530db
PL
319 It actually only records the info of the to-be-inserted bp/wp;
320 the actual insertion will happen when threads are resumed. */
176eb98c 321
9db9aa23
TBA
322int
323aarch64_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
324 int len, raw_breakpoint *bp)
176eb98c
MS
325{
326 int ret;
4ff0d3d8 327 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
328 struct aarch64_debug_reg_state *state
329 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 330
c5e92cca 331 if (show_debug_regs)
176eb98c
MS
332 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
333 (unsigned long) addr, len);
334
802e8e6d
PA
335 /* Determine the type from the raw breakpoint type. */
336 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
337
338 if (targ_type != hw_execute)
39edd165
YQ
339 {
340 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
341 ret = aarch64_handle_watchpoint (targ_type, addr, len,
342 1 /* is_insert */, state);
343 else
344 ret = -1;
345 }
176eb98c 346 else
8d689ee5
YQ
347 {
348 if (len == 3)
349 {
350 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
351 instruction. Set it to 2 to correctly encode length bit
352 mask in hardware/watchpoint control register. */
353 len = 2;
354 }
355 ret = aarch64_handle_breakpoint (targ_type, addr, len,
356 1 /* is_insert */, state);
357 }
176eb98c 358
60a191ed 359 if (show_debug_regs)
88e2cf7e
YQ
360 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
361 targ_type);
176eb98c
MS
362
363 return ret;
364}
365
9db9aa23 366/* Implementation of linux target ops method "low_remove_point".
176eb98c 367
421530db
PL
368 It actually only records the info of the to-be-removed bp/wp,
369 the actual removal will be done when threads are resumed. */
176eb98c 370
9db9aa23
TBA
371int
372aarch64_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
373 int len, raw_breakpoint *bp)
176eb98c
MS
374{
375 int ret;
4ff0d3d8 376 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
377 struct aarch64_debug_reg_state *state
378 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 379
c5e92cca 380 if (show_debug_regs)
176eb98c
MS
381 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
382 (unsigned long) addr, len);
383
802e8e6d
PA
384 /* Determine the type from the raw breakpoint type. */
385 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
386
387 /* Set up state pointers. */
388 if (targ_type != hw_execute)
389 ret =
c67ca4de
YQ
390 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
391 state);
176eb98c 392 else
8d689ee5
YQ
393 {
394 if (len == 3)
395 {
396 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
397 instruction. Set it to 2 to correctly encode length bit
398 mask in hardware/watchpoint control register. */
399 len = 2;
400 }
401 ret = aarch64_handle_breakpoint (targ_type, addr, len,
402 0 /* is_insert */, state);
403 }
176eb98c 404
60a191ed 405 if (show_debug_regs)
88e2cf7e
YQ
406 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
407 targ_type);
176eb98c
MS
408
409 return ret;
410}
411
421530db 412/* Implementation of linux_target_ops method "stopped_data_address". */
176eb98c
MS
413
414static CORE_ADDR
415aarch64_stopped_data_address (void)
416{
417 siginfo_t siginfo;
418 int pid, i;
419 struct aarch64_debug_reg_state *state;
420
0bfdf32f 421 pid = lwpid_of (current_thread);
176eb98c
MS
422
423 /* Get the siginfo. */
424 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
425 return (CORE_ADDR) 0;
426
427 /* Need to be a hardware breakpoint/watchpoint trap. */
428 if (siginfo.si_signo != SIGTRAP
429 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
430 return (CORE_ADDR) 0;
431
432 /* Check if the address matches any watched address. */
88e2cf7e 433 state = aarch64_get_debug_reg_state (pid_of (current_thread));
176eb98c
MS
434 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
435 {
a3b60e45
JK
436 const unsigned int offset
437 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
176eb98c
MS
438 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
439 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
a3b60e45
JK
440 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
441 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
442 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
443
176eb98c
MS
444 if (state->dr_ref_count_wp[i]
445 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
a3b60e45 446 && addr_trap >= addr_watch_aligned
176eb98c 447 && addr_trap < addr_watch + len)
a3b60e45
JK
448 {
449 /* ADDR_TRAP reports the first address of the memory range
450 accessed by the CPU, regardless of what was the memory
451 range watched. Thus, a large CPU access that straddles
452 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
453 ADDR_TRAP that is lower than the
454 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
455
456 addr: | 4 | 5 | 6 | 7 | 8 |
457 |---- range watched ----|
458 |----------- range accessed ------------|
459
460 In this case, ADDR_TRAP will be 4.
461
462 To match a watchpoint known to GDB core, we must never
463 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
464 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
465 positive on kernels older than 4.10. See PR
466 external/20207. */
467 return addr_orig;
468 }
176eb98c
MS
469 }
470
471 return (CORE_ADDR) 0;
472}
473
421530db 474/* Implementation of linux_target_ops method "stopped_by_watchpoint". */
176eb98c
MS
475
476static int
477aarch64_stopped_by_watchpoint (void)
478{
479 if (aarch64_stopped_data_address () != 0)
480 return 1;
481 else
482 return 0;
483}
484
485/* Fetch the thread-local storage pointer for libthread_db. */
486
487ps_err_e
754653a7 488ps_get_thread_area (struct ps_prochandle *ph,
176eb98c
MS
489 lwpid_t lwpid, int idx, void **base)
490{
a0cc84cd
YQ
491 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
492 is_64bit_tdesc ());
176eb98c
MS
493}
494
ade90bde
YQ
495/* Implementation of linux_target_ops method "siginfo_fixup". */
496
497static int
8adce034 498aarch64_linux_siginfo_fixup (siginfo_t *native, gdb_byte *inf, int direction)
ade90bde
YQ
499{
500 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
501 if (!is_64bit_tdesc ())
502 {
503 if (direction == 0)
504 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
505 native);
506 else
507 aarch64_siginfo_from_compat_siginfo (native,
508 (struct compat_siginfo *) inf);
509
510 return 1;
511 }
512
513 return 0;
514}
515
04ec7890 516/* Implementation of linux_target_ops method "new_process". */
176eb98c
MS
517
518static struct arch_process_info *
519aarch64_linux_new_process (void)
520{
8d749320 521 struct arch_process_info *info = XCNEW (struct arch_process_info);
176eb98c
MS
522
523 aarch64_init_debug_reg_state (&info->debug_reg_state);
524
525 return info;
526}
527
04ec7890
SM
528/* Implementation of linux_target_ops method "delete_process". */
529
530static void
531aarch64_linux_delete_process (struct arch_process_info *info)
532{
533 xfree (info);
534}
535
421530db
PL
536/* Implementation of linux_target_ops method "linux_new_fork". */
537
3a8a0396
DB
538static void
539aarch64_linux_new_fork (struct process_info *parent,
540 struct process_info *child)
541{
542 /* These are allocated by linux_add_process. */
61a7418c
DB
543 gdb_assert (parent->priv != NULL
544 && parent->priv->arch_private != NULL);
545 gdb_assert (child->priv != NULL
546 && child->priv->arch_private != NULL);
3a8a0396
DB
547
548 /* Linux kernel before 2.6.33 commit
549 72f674d203cd230426437cdcf7dd6f681dad8b0d
550 will inherit hardware debug registers from parent
551 on fork/vfork/clone. Newer Linux kernels create such tasks with
552 zeroed debug registers.
553
554 GDB core assumes the child inherits the watchpoints/hw
555 breakpoints of the parent, and will remove them all from the
556 forked off process. Copy the debug registers mirrors into the
557 new process so that all breakpoints and watchpoints can be
558 removed together. The debug registers mirror will become zeroed
559 in the end before detaching the forked off process, thus making
560 this compatible with older Linux kernels too. */
561
61a7418c 562 *child->priv->arch_private = *parent->priv->arch_private;
3a8a0396
DB
563}
564
ee4fbcfa
AH
565/* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
566#define AARCH64_HWCAP_PACA (1 << 30)
567
797bcff5 568/* Implementation of linux target ops method "low_arch_setup". */
3b53ae99 569
797bcff5
TBA
570void
571aarch64_target::low_arch_setup ()
3b53ae99
YQ
572{
573 unsigned int machine;
574 int is_elf64;
575 int tid;
576
577 tid = lwpid_of (current_thread);
578
579 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
580
581 if (is_elf64)
fefa175e
AH
582 {
583 uint64_t vq = aarch64_sve_get_vq (tid);
974c89e0
AH
584 unsigned long hwcap = linux_get_hwcap (8);
585 bool pauth_p = hwcap & AARCH64_HWCAP_PACA;
ee4fbcfa
AH
586
587 current_process ()->tdesc = aarch64_linux_read_description (vq, pauth_p);
fefa175e 588 }
3b53ae99 589 else
7cc17433 590 current_process ()->tdesc = aarch32_linux_read_description ();
176eb98c 591
af1b22f3 592 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
176eb98c
MS
593}
594
02895270
AH
595/* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
596
597static void
598aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
599{
600 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
601}
602
603/* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
604
605static void
606aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
607{
608 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
609}
610
3aee8918 611static struct regset_info aarch64_regsets[] =
176eb98c
MS
612{
613 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
614 sizeof (struct user_pt_regs), GENERAL_REGS,
615 aarch64_fill_gregset, aarch64_store_gregset },
616 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
617 sizeof (struct user_fpsimd_state), FP_REGS,
618 aarch64_fill_fpregset, aarch64_store_fpregset
619 },
1ef53e6b
AH
620 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
621 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
622 NULL, aarch64_store_pauthregset },
50bc912a 623 NULL_REGSET
176eb98c
MS
624};
625
3aee8918
PA
626static struct regsets_info aarch64_regsets_info =
627 {
628 aarch64_regsets, /* regsets */
629 0, /* num_regsets */
630 NULL, /* disabled_regsets */
631 };
632
3b53ae99 633static struct regs_info regs_info_aarch64 =
3aee8918
PA
634 {
635 NULL, /* regset_bitmap */
c2d65f38 636 NULL, /* usrregs */
3aee8918
PA
637 &aarch64_regsets_info,
638 };
639
02895270
AH
640static struct regset_info aarch64_sve_regsets[] =
641{
642 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
643 sizeof (struct user_pt_regs), GENERAL_REGS,
644 aarch64_fill_gregset, aarch64_store_gregset },
645 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
646 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
647 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
648 },
1ef53e6b
AH
649 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
650 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
651 NULL, aarch64_store_pauthregset },
02895270
AH
652 NULL_REGSET
653};
654
655static struct regsets_info aarch64_sve_regsets_info =
656 {
657 aarch64_sve_regsets, /* regsets. */
658 0, /* num_regsets. */
659 NULL, /* disabled_regsets. */
660 };
661
662static struct regs_info regs_info_aarch64_sve =
663 {
664 NULL, /* regset_bitmap. */
665 NULL, /* usrregs. */
666 &aarch64_sve_regsets_info,
667 };
668
aa8d21c9 669/* Implementation of linux target ops method "get_regs_info". */
421530db 670
aa8d21c9
TBA
671const regs_info *
672aarch64_target::get_regs_info ()
3aee8918 673{
02895270 674 if (!is_64bit_tdesc ())
3b53ae99 675 return &regs_info_aarch32;
02895270
AH
676
677 if (is_sve_tdesc ())
678 return &regs_info_aarch64_sve;
679
680 return &regs_info_aarch64;
3aee8918
PA
681}
682
7671bf47
PL
683/* Implementation of linux_target_ops method "supports_tracepoints". */
684
685static int
686aarch64_supports_tracepoints (void)
687{
524b57e6
YQ
688 if (current_thread == NULL)
689 return 1;
690 else
691 {
692 /* We don't support tracepoints on aarch32 now. */
693 return is_64bit_tdesc ();
694 }
7671bf47
PL
695}
696
bb903df0
PL
697/* Implementation of linux_target_ops method "get_thread_area". */
698
699static int
700aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
701{
702 struct iovec iovec;
703 uint64_t reg;
704
705 iovec.iov_base = &reg;
706 iovec.iov_len = sizeof (reg);
707
708 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
709 return -1;
710
711 *addrp = reg;
712
713 return 0;
714}
715
061fc021
YQ
716/* Implementation of linux_target_ops method "get_syscall_trapinfo". */
717
718static void
719aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
720{
721 int use_64bit = register_size (regcache->tdesc, 0) == 8;
722
723 if (use_64bit)
724 {
725 long l_sysno;
726
727 collect_register_by_name (regcache, "x8", &l_sysno);
728 *sysno = (int) l_sysno;
729 }
730 else
731 collect_register_by_name (regcache, "r7", sysno);
732}
733
afbe19f8
PL
734/* List of condition codes that we need. */
735
736enum aarch64_condition_codes
737{
738 EQ = 0x0,
739 NE = 0x1,
740 LO = 0x3,
741 GE = 0xa,
742 LT = 0xb,
743 GT = 0xc,
744 LE = 0xd,
bb903df0
PL
745};
746
6c1c9a8b
YQ
747enum aarch64_operand_type
748{
749 OPERAND_IMMEDIATE,
750 OPERAND_REGISTER,
751};
752
bb903df0
PL
753/* Representation of an operand. At this time, it only supports register
754 and immediate types. */
755
756struct aarch64_operand
757{
758 /* Type of the operand. */
6c1c9a8b
YQ
759 enum aarch64_operand_type type;
760
bb903df0
PL
761 /* Value of the operand according to the type. */
762 union
763 {
764 uint32_t imm;
765 struct aarch64_register reg;
766 };
767};
768
769/* List of registers that we are currently using, we can add more here as
770 we need to use them. */
771
772/* General purpose scratch registers (64 bit). */
773static const struct aarch64_register x0 = { 0, 1 };
774static const struct aarch64_register x1 = { 1, 1 };
775static const struct aarch64_register x2 = { 2, 1 };
776static const struct aarch64_register x3 = { 3, 1 };
777static const struct aarch64_register x4 = { 4, 1 };
778
779/* General purpose scratch registers (32 bit). */
afbe19f8 780static const struct aarch64_register w0 = { 0, 0 };
bb903df0
PL
781static const struct aarch64_register w2 = { 2, 0 };
782
783/* Intra-procedure scratch registers. */
784static const struct aarch64_register ip0 = { 16, 1 };
785
786/* Special purpose registers. */
afbe19f8
PL
787static const struct aarch64_register fp = { 29, 1 };
788static const struct aarch64_register lr = { 30, 1 };
bb903df0
PL
789static const struct aarch64_register sp = { 31, 1 };
790static const struct aarch64_register xzr = { 31, 1 };
791
792/* Dynamically allocate a new register. If we know the register
793 statically, we should make it a global as above instead of using this
794 helper function. */
795
796static struct aarch64_register
797aarch64_register (unsigned num, int is64)
798{
799 return (struct aarch64_register) { num, is64 };
800}
801
802/* Helper function to create a register operand, for instructions with
803 different types of operands.
804
805 For example:
806 p += emit_mov (p, x0, register_operand (x1)); */
807
808static struct aarch64_operand
809register_operand (struct aarch64_register reg)
810{
811 struct aarch64_operand operand;
812
813 operand.type = OPERAND_REGISTER;
814 operand.reg = reg;
815
816 return operand;
817}
818
819/* Helper function to create an immediate operand, for instructions with
820 different types of operands.
821
822 For example:
823 p += emit_mov (p, x0, immediate_operand (12)); */
824
825static struct aarch64_operand
826immediate_operand (uint32_t imm)
827{
828 struct aarch64_operand operand;
829
830 operand.type = OPERAND_IMMEDIATE;
831 operand.imm = imm;
832
833 return operand;
834}
835
bb903df0
PL
836/* Helper function to create an offset memory operand.
837
838 For example:
839 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
840
841static struct aarch64_memory_operand
842offset_memory_operand (int32_t offset)
843{
844 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
845}
846
847/* Helper function to create a pre-index memory operand.
848
849 For example:
850 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
851
852static struct aarch64_memory_operand
853preindex_memory_operand (int32_t index)
854{
855 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
856}
857
afbe19f8
PL
858/* Helper function to create a post-index memory operand.
859
860 For example:
861 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
862
863static struct aarch64_memory_operand
864postindex_memory_operand (int32_t index)
865{
866 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
867}
868
bb903df0
PL
869/* System control registers. These special registers can be written and
870 read with the MRS and MSR instructions.
871
872 - NZCV: Condition flags. GDB refers to this register under the CPSR
873 name.
874 - FPSR: Floating-point status register.
875 - FPCR: Floating-point control registers.
876 - TPIDR_EL0: Software thread ID register. */
877
878enum aarch64_system_control_registers
879{
880 /* op0 op1 crn crm op2 */
881 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
882 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
883 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
884 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
885};
886
bb903df0
PL
887/* Write a BLR instruction into *BUF.
888
889 BLR rn
890
891 RN is the register to branch to. */
892
893static int
894emit_blr (uint32_t *buf, struct aarch64_register rn)
895{
e1c587c3 896 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
bb903df0
PL
897}
898
afbe19f8 899/* Write a RET instruction into *BUF.
bb903df0 900
afbe19f8 901 RET xn
bb903df0 902
afbe19f8 903 RN is the register to branch to. */
bb903df0
PL
904
905static int
afbe19f8
PL
906emit_ret (uint32_t *buf, struct aarch64_register rn)
907{
e1c587c3 908 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
afbe19f8
PL
909}
910
911static int
912emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
913 struct aarch64_register rt,
914 struct aarch64_register rt2,
915 struct aarch64_register rn,
916 struct aarch64_memory_operand operand)
bb903df0
PL
917{
918 uint32_t opc;
919 uint32_t pre_index;
920 uint32_t write_back;
921
922 if (rt.is64)
923 opc = ENCODE (2, 2, 30);
924 else
925 opc = ENCODE (0, 2, 30);
926
927 switch (operand.type)
928 {
929 case MEMORY_OPERAND_OFFSET:
930 {
931 pre_index = ENCODE (1, 1, 24);
932 write_back = ENCODE (0, 1, 23);
933 break;
934 }
afbe19f8
PL
935 case MEMORY_OPERAND_POSTINDEX:
936 {
937 pre_index = ENCODE (0, 1, 24);
938 write_back = ENCODE (1, 1, 23);
939 break;
940 }
bb903df0
PL
941 case MEMORY_OPERAND_PREINDEX:
942 {
943 pre_index = ENCODE (1, 1, 24);
944 write_back = ENCODE (1, 1, 23);
945 break;
946 }
947 default:
948 return 0;
949 }
950
e1c587c3
YQ
951 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
952 | ENCODE (operand.index >> 3, 7, 15)
953 | ENCODE (rt2.num, 5, 10)
954 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
955}
956
afbe19f8
PL
957/* Write a STP instruction into *BUF.
958
959 STP rt, rt2, [rn, #offset]
960 STP rt, rt2, [rn, #index]!
961 STP rt, rt2, [rn], #index
962
963 RT and RT2 are the registers to store.
964 RN is the base address register.
965 OFFSET is the immediate to add to the base address. It is limited to a
966 -512 .. 504 range (7 bits << 3). */
967
968static int
969emit_stp (uint32_t *buf, struct aarch64_register rt,
970 struct aarch64_register rt2, struct aarch64_register rn,
971 struct aarch64_memory_operand operand)
972{
973 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
974}
975
976/* Write a LDP instruction into *BUF.
977
978 LDP rt, rt2, [rn, #offset]
979 LDP rt, rt2, [rn, #index]!
980 LDP rt, rt2, [rn], #index
981
982 RT and RT2 are the registers to store.
983 RN is the base address register.
984 OFFSET is the immediate to add to the base address. It is limited to a
985 -512 .. 504 range (7 bits << 3). */
986
987static int
988emit_ldp (uint32_t *buf, struct aarch64_register rt,
989 struct aarch64_register rt2, struct aarch64_register rn,
990 struct aarch64_memory_operand operand)
991{
992 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
993}
994
bb903df0
PL
995/* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
996
997 LDP qt, qt2, [rn, #offset]
998
999 RT and RT2 are the Q registers to store.
1000 RN is the base address register.
1001 OFFSET is the immediate to add to the base address. It is limited to
1002 -1024 .. 1008 range (7 bits << 4). */
1003
1004static int
1005emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1006 struct aarch64_register rn, int32_t offset)
1007{
1008 uint32_t opc = ENCODE (2, 2, 30);
1009 uint32_t pre_index = ENCODE (1, 1, 24);
1010
e1c587c3
YQ
1011 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
1012 | ENCODE (offset >> 4, 7, 15)
1013 | ENCODE (rt2, 5, 10)
1014 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1015}
1016
1017/* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1018
1019 STP qt, qt2, [rn, #offset]
1020
1021 RT and RT2 are the Q registers to store.
1022 RN is the base address register.
1023 OFFSET is the immediate to add to the base address. It is limited to
1024 -1024 .. 1008 range (7 bits << 4). */
1025
1026static int
1027emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1028 struct aarch64_register rn, int32_t offset)
1029{
1030 uint32_t opc = ENCODE (2, 2, 30);
1031 uint32_t pre_index = ENCODE (1, 1, 24);
1032
e1c587c3 1033 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
b6542f81
YQ
1034 | ENCODE (offset >> 4, 7, 15)
1035 | ENCODE (rt2, 5, 10)
1036 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1037}
1038
afbe19f8
PL
1039/* Write a LDRH instruction into *BUF.
1040
1041 LDRH wt, [xn, #offset]
1042 LDRH wt, [xn, #index]!
1043 LDRH wt, [xn], #index
1044
1045 RT is the register to store.
1046 RN is the base address register.
1047 OFFSET is the immediate to add to the base address. It is limited to
1048 0 .. 32760 range (12 bits << 3). */
1049
1050static int
1051emit_ldrh (uint32_t *buf, struct aarch64_register rt,
1052 struct aarch64_register rn,
1053 struct aarch64_memory_operand operand)
1054{
1c2e1515 1055 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
afbe19f8
PL
1056}
1057
1058/* Write a LDRB instruction into *BUF.
1059
1060 LDRB wt, [xn, #offset]
1061 LDRB wt, [xn, #index]!
1062 LDRB wt, [xn], #index
1063
1064 RT is the register to store.
1065 RN is the base address register.
1066 OFFSET is the immediate to add to the base address. It is limited to
1067 0 .. 32760 range (12 bits << 3). */
1068
1069static int
1070emit_ldrb (uint32_t *buf, struct aarch64_register rt,
1071 struct aarch64_register rn,
1072 struct aarch64_memory_operand operand)
1073{
1c2e1515 1074 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
afbe19f8
PL
1075}
1076
bb903df0 1077
bb903df0
PL
1078
1079/* Write a STR instruction into *BUF.
1080
1081 STR rt, [rn, #offset]
1082 STR rt, [rn, #index]!
afbe19f8 1083 STR rt, [rn], #index
bb903df0
PL
1084
1085 RT is the register to store.
1086 RN is the base address register.
1087 OFFSET is the immediate to add to the base address. It is limited to
1088 0 .. 32760 range (12 bits << 3). */
1089
1090static int
1091emit_str (uint32_t *buf, struct aarch64_register rt,
1092 struct aarch64_register rn,
1093 struct aarch64_memory_operand operand)
1094{
1c2e1515 1095 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
bb903df0
PL
1096}
1097
1098/* Helper function emitting an exclusive load or store instruction. */
1099
1100static int
1101emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1102 enum aarch64_opcodes opcode,
1103 struct aarch64_register rs,
1104 struct aarch64_register rt,
1105 struct aarch64_register rt2,
1106 struct aarch64_register rn)
1107{
e1c587c3
YQ
1108 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1109 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1110 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
1111}
1112
1113/* Write a LAXR instruction into *BUF.
1114
1115 LDAXR rt, [xn]
1116
1117 RT is the destination register.
1118 RN is the base address register. */
1119
1120static int
1121emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1122 struct aarch64_register rn)
1123{
1124 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1125 xzr, rn);
1126}
1127
1128/* Write a STXR instruction into *BUF.
1129
1130 STXR ws, rt, [xn]
1131
1132 RS is the result register, it indicates if the store succeeded or not.
1133 RT is the destination register.
1134 RN is the base address register. */
1135
1136static int
1137emit_stxr (uint32_t *buf, struct aarch64_register rs,
1138 struct aarch64_register rt, struct aarch64_register rn)
1139{
1140 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1141 xzr, rn);
1142}
1143
1144/* Write a STLR instruction into *BUF.
1145
1146 STLR rt, [xn]
1147
1148 RT is the register to store.
1149 RN is the base address register. */
1150
1151static int
1152emit_stlr (uint32_t *buf, struct aarch64_register rt,
1153 struct aarch64_register rn)
1154{
1155 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1156 xzr, rn);
1157}
1158
1159/* Helper function for data processing instructions with register sources. */
1160
1161static int
231c0592 1162emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
bb903df0
PL
1163 struct aarch64_register rd,
1164 struct aarch64_register rn,
1165 struct aarch64_register rm)
1166{
1167 uint32_t size = ENCODE (rd.is64, 1, 31);
1168
e1c587c3
YQ
1169 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1170 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1171}
1172
1173/* Helper function for data processing instructions taking either a register
1174 or an immediate. */
1175
1176static int
1177emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1178 struct aarch64_register rd,
1179 struct aarch64_register rn,
1180 struct aarch64_operand operand)
1181{
1182 uint32_t size = ENCODE (rd.is64, 1, 31);
1183 /* The opcode is different for register and immediate source operands. */
1184 uint32_t operand_opcode;
1185
1186 if (operand.type == OPERAND_IMMEDIATE)
1187 {
1188 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1189 operand_opcode = ENCODE (8, 4, 25);
1190
e1c587c3
YQ
1191 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1192 | ENCODE (operand.imm, 12, 10)
1193 | ENCODE (rn.num, 5, 5)
1194 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1195 }
1196 else
1197 {
1198 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1199 operand_opcode = ENCODE (5, 4, 25);
1200
1201 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1202 rn, operand.reg);
1203 }
1204}
1205
1206/* Write an ADD instruction into *BUF.
1207
1208 ADD rd, rn, #imm
1209 ADD rd, rn, rm
1210
1211 This function handles both an immediate and register add.
1212
1213 RD is the destination register.
1214 RN is the input register.
1215 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1216 OPERAND_REGISTER. */
1217
1218static int
1219emit_add (uint32_t *buf, struct aarch64_register rd,
1220 struct aarch64_register rn, struct aarch64_operand operand)
1221{
1222 return emit_data_processing (buf, ADD, rd, rn, operand);
1223}
1224
1225/* Write a SUB instruction into *BUF.
1226
1227 SUB rd, rn, #imm
1228 SUB rd, rn, rm
1229
1230 This function handles both an immediate and register sub.
1231
1232 RD is the destination register.
1233 RN is the input register.
1234 IMM is the immediate to substract to RN. */
1235
1236static int
1237emit_sub (uint32_t *buf, struct aarch64_register rd,
1238 struct aarch64_register rn, struct aarch64_operand operand)
1239{
1240 return emit_data_processing (buf, SUB, rd, rn, operand);
1241}
1242
1243/* Write a MOV instruction into *BUF.
1244
1245 MOV rd, #imm
1246 MOV rd, rm
1247
1248 This function handles both a wide immediate move and a register move,
1249 with the condition that the source register is not xzr. xzr and the
1250 stack pointer share the same encoding and this function only supports
1251 the stack pointer.
1252
1253 RD is the destination register.
1254 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1255 OPERAND_REGISTER. */
1256
1257static int
1258emit_mov (uint32_t *buf, struct aarch64_register rd,
1259 struct aarch64_operand operand)
1260{
1261 if (operand.type == OPERAND_IMMEDIATE)
1262 {
1263 uint32_t size = ENCODE (rd.is64, 1, 31);
1264 /* Do not shift the immediate. */
1265 uint32_t shift = ENCODE (0, 2, 21);
1266
e1c587c3
YQ
1267 return aarch64_emit_insn (buf, MOV | size | shift
1268 | ENCODE (operand.imm, 16, 5)
1269 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1270 }
1271 else
1272 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1273}
1274
1275/* Write a MOVK instruction into *BUF.
1276
1277 MOVK rd, #imm, lsl #shift
1278
1279 RD is the destination register.
1280 IMM is the immediate.
1281 SHIFT is the logical shift left to apply to IMM. */
1282
1283static int
7781c06f
YQ
1284emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1285 unsigned shift)
bb903df0
PL
1286{
1287 uint32_t size = ENCODE (rd.is64, 1, 31);
1288
e1c587c3
YQ
1289 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1290 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1291}
1292
1293/* Write instructions into *BUF in order to move ADDR into a register.
1294 ADDR can be a 64-bit value.
1295
1296 This function will emit a series of MOV and MOVK instructions, such as:
1297
1298 MOV xd, #(addr)
1299 MOVK xd, #(addr >> 16), lsl #16
1300 MOVK xd, #(addr >> 32), lsl #32
1301 MOVK xd, #(addr >> 48), lsl #48 */
1302
1303static int
1304emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1305{
1306 uint32_t *p = buf;
1307
1308 /* The MOV (wide immediate) instruction clears to top bits of the
1309 register. */
1310 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1311
1312 if ((addr >> 16) != 0)
1313 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1314 else
1315 return p - buf;
1316
1317 if ((addr >> 32) != 0)
1318 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1319 else
1320 return p - buf;
1321
1322 if ((addr >> 48) != 0)
1323 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1324
1325 return p - buf;
1326}
1327
afbe19f8
PL
1328/* Write a SUBS instruction into *BUF.
1329
1330 SUBS rd, rn, rm
1331
1332 This instruction update the condition flags.
1333
1334 RD is the destination register.
1335 RN and RM are the source registers. */
1336
1337static int
1338emit_subs (uint32_t *buf, struct aarch64_register rd,
1339 struct aarch64_register rn, struct aarch64_operand operand)
1340{
1341 return emit_data_processing (buf, SUBS, rd, rn, operand);
1342}
1343
1344/* Write a CMP instruction into *BUF.
1345
1346 CMP rn, rm
1347
1348 This instruction is an alias of SUBS xzr, rn, rm.
1349
1350 RN and RM are the registers to compare. */
1351
1352static int
1353emit_cmp (uint32_t *buf, struct aarch64_register rn,
1354 struct aarch64_operand operand)
1355{
1356 return emit_subs (buf, xzr, rn, operand);
1357}
1358
1359/* Write a AND instruction into *BUF.
1360
1361 AND rd, rn, rm
1362
1363 RD is the destination register.
1364 RN and RM are the source registers. */
1365
1366static int
1367emit_and (uint32_t *buf, struct aarch64_register rd,
1368 struct aarch64_register rn, struct aarch64_register rm)
1369{
1370 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1371}
1372
1373/* Write a ORR instruction into *BUF.
1374
1375 ORR rd, rn, rm
1376
1377 RD is the destination register.
1378 RN and RM are the source registers. */
1379
1380static int
1381emit_orr (uint32_t *buf, struct aarch64_register rd,
1382 struct aarch64_register rn, struct aarch64_register rm)
1383{
1384 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1385}
1386
1387/* Write a ORN instruction into *BUF.
1388
1389 ORN rd, rn, rm
1390
1391 RD is the destination register.
1392 RN and RM are the source registers. */
1393
1394static int
1395emit_orn (uint32_t *buf, struct aarch64_register rd,
1396 struct aarch64_register rn, struct aarch64_register rm)
1397{
1398 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1399}
1400
1401/* Write a EOR instruction into *BUF.
1402
1403 EOR rd, rn, rm
1404
1405 RD is the destination register.
1406 RN and RM are the source registers. */
1407
1408static int
1409emit_eor (uint32_t *buf, struct aarch64_register rd,
1410 struct aarch64_register rn, struct aarch64_register rm)
1411{
1412 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1413}
1414
1415/* Write a MVN instruction into *BUF.
1416
1417 MVN rd, rm
1418
1419 This is an alias for ORN rd, xzr, rm.
1420
1421 RD is the destination register.
1422 RM is the source register. */
1423
1424static int
1425emit_mvn (uint32_t *buf, struct aarch64_register rd,
1426 struct aarch64_register rm)
1427{
1428 return emit_orn (buf, rd, xzr, rm);
1429}
1430
1431/* Write a LSLV instruction into *BUF.
1432
1433 LSLV rd, rn, rm
1434
1435 RD is the destination register.
1436 RN and RM are the source registers. */
1437
1438static int
1439emit_lslv (uint32_t *buf, struct aarch64_register rd,
1440 struct aarch64_register rn, struct aarch64_register rm)
1441{
1442 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1443}
1444
1445/* Write a LSRV instruction into *BUF.
1446
1447 LSRV rd, rn, rm
1448
1449 RD is the destination register.
1450 RN and RM are the source registers. */
1451
1452static int
1453emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1454 struct aarch64_register rn, struct aarch64_register rm)
1455{
1456 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1457}
1458
1459/* Write a ASRV instruction into *BUF.
1460
1461 ASRV rd, rn, rm
1462
1463 RD is the destination register.
1464 RN and RM are the source registers. */
1465
1466static int
1467emit_asrv (uint32_t *buf, struct aarch64_register rd,
1468 struct aarch64_register rn, struct aarch64_register rm)
1469{
1470 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1471}
1472
1473/* Write a MUL instruction into *BUF.
1474
1475 MUL rd, rn, rm
1476
1477 RD is the destination register.
1478 RN and RM are the source registers. */
1479
1480static int
1481emit_mul (uint32_t *buf, struct aarch64_register rd,
1482 struct aarch64_register rn, struct aarch64_register rm)
1483{
1484 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1485}
1486
bb903df0
PL
1487/* Write a MRS instruction into *BUF. The register size is 64-bit.
1488
1489 MRS xt, system_reg
1490
1491 RT is the destination register.
1492 SYSTEM_REG is special purpose register to read. */
1493
1494static int
1495emit_mrs (uint32_t *buf, struct aarch64_register rt,
1496 enum aarch64_system_control_registers system_reg)
1497{
e1c587c3
YQ
1498 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1499 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1500}
1501
1502/* Write a MSR instruction into *BUF. The register size is 64-bit.
1503
1504 MSR system_reg, xt
1505
1506 SYSTEM_REG is special purpose register to write.
1507 RT is the input register. */
1508
1509static int
1510emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1511 struct aarch64_register rt)
1512{
e1c587c3
YQ
1513 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1514 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1515}
1516
1517/* Write a SEVL instruction into *BUF.
1518
1519 This is a hint instruction telling the hardware to trigger an event. */
1520
1521static int
1522emit_sevl (uint32_t *buf)
1523{
e1c587c3 1524 return aarch64_emit_insn (buf, SEVL);
bb903df0
PL
1525}
1526
1527/* Write a WFE instruction into *BUF.
1528
1529 This is a hint instruction telling the hardware to wait for an event. */
1530
1531static int
1532emit_wfe (uint32_t *buf)
1533{
e1c587c3 1534 return aarch64_emit_insn (buf, WFE);
bb903df0
PL
1535}
1536
afbe19f8
PL
1537/* Write a SBFM instruction into *BUF.
1538
1539 SBFM rd, rn, #immr, #imms
1540
1541 This instruction moves the bits from #immr to #imms into the
1542 destination, sign extending the result.
1543
1544 RD is the destination register.
1545 RN is the source register.
1546 IMMR is the bit number to start at (least significant bit).
1547 IMMS is the bit number to stop at (most significant bit). */
1548
1549static int
1550emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1551 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1552{
1553 uint32_t size = ENCODE (rd.is64, 1, 31);
1554 uint32_t n = ENCODE (rd.is64, 1, 22);
1555
e1c587c3
YQ
1556 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1557 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1558 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1559}
1560
1561/* Write a SBFX instruction into *BUF.
1562
1563 SBFX rd, rn, #lsb, #width
1564
1565 This instruction moves #width bits from #lsb into the destination, sign
1566 extending the result. This is an alias for:
1567
1568 SBFM rd, rn, #lsb, #(lsb + width - 1)
1569
1570 RD is the destination register.
1571 RN is the source register.
1572 LSB is the bit number to start at (least significant bit).
1573 WIDTH is the number of bits to move. */
1574
1575static int
1576emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1577 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1578{
1579 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1580}
1581
1582/* Write a UBFM instruction into *BUF.
1583
1584 UBFM rd, rn, #immr, #imms
1585
1586 This instruction moves the bits from #immr to #imms into the
1587 destination, extending the result with zeros.
1588
1589 RD is the destination register.
1590 RN is the source register.
1591 IMMR is the bit number to start at (least significant bit).
1592 IMMS is the bit number to stop at (most significant bit). */
1593
1594static int
1595emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1596 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1597{
1598 uint32_t size = ENCODE (rd.is64, 1, 31);
1599 uint32_t n = ENCODE (rd.is64, 1, 22);
1600
e1c587c3
YQ
1601 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1602 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1603 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1604}
1605
1606/* Write a UBFX instruction into *BUF.
1607
1608 UBFX rd, rn, #lsb, #width
1609
1610 This instruction moves #width bits from #lsb into the destination,
1611 extending the result with zeros. This is an alias for:
1612
1613 UBFM rd, rn, #lsb, #(lsb + width - 1)
1614
1615 RD is the destination register.
1616 RN is the source register.
1617 LSB is the bit number to start at (least significant bit).
1618 WIDTH is the number of bits to move. */
1619
1620static int
1621emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1622 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1623{
1624 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1625}
1626
1627/* Write a CSINC instruction into *BUF.
1628
1629 CSINC rd, rn, rm, cond
1630
1631 This instruction conditionally increments rn or rm and places the result
1632 in rd. rn is chosen is the condition is true.
1633
1634 RD is the destination register.
1635 RN and RM are the source registers.
1636 COND is the encoded condition. */
1637
1638static int
1639emit_csinc (uint32_t *buf, struct aarch64_register rd,
1640 struct aarch64_register rn, struct aarch64_register rm,
1641 unsigned cond)
1642{
1643 uint32_t size = ENCODE (rd.is64, 1, 31);
1644
e1c587c3
YQ
1645 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1646 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1647 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1648}
1649
1650/* Write a CSET instruction into *BUF.
1651
1652 CSET rd, cond
1653
1654 This instruction conditionally write 1 or 0 in the destination register.
1655 1 is written if the condition is true. This is an alias for:
1656
1657 CSINC rd, xzr, xzr, !cond
1658
1659 Note that the condition needs to be inverted.
1660
1661 RD is the destination register.
1662 RN and RM are the source registers.
1663 COND is the encoded condition. */
1664
1665static int
1666emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1667{
1668 /* The least significant bit of the condition needs toggling in order to
1669 invert it. */
1670 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1671}
1672
bb903df0
PL
1673/* Write LEN instructions from BUF into the inferior memory at *TO.
1674
1675 Note instructions are always little endian on AArch64, unlike data. */
1676
1677static void
1678append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1679{
1680 size_t byte_len = len * sizeof (uint32_t);
1681#if (__BYTE_ORDER == __BIG_ENDIAN)
cb93dc7f 1682 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
bb903df0
PL
1683 size_t i;
1684
1685 for (i = 0; i < len; i++)
1686 le_buf[i] = htole32 (buf[i]);
1687
4196ab2a 1688 target_write_memory (*to, (const unsigned char *) le_buf, byte_len);
bb903df0
PL
1689
1690 xfree (le_buf);
1691#else
4196ab2a 1692 target_write_memory (*to, (const unsigned char *) buf, byte_len);
bb903df0
PL
1693#endif
1694
1695 *to += byte_len;
1696}
1697
0badd99f
YQ
1698/* Sub-class of struct aarch64_insn_data, store information of
1699 instruction relocation for fast tracepoint. Visitor can
1700 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1701 the relocated instructions in buffer pointed by INSN_PTR. */
bb903df0 1702
0badd99f
YQ
1703struct aarch64_insn_relocation_data
1704{
1705 struct aarch64_insn_data base;
1706
1707 /* The new address the instruction is relocated to. */
1708 CORE_ADDR new_addr;
1709 /* Pointer to the buffer of relocated instruction(s). */
1710 uint32_t *insn_ptr;
1711};
1712
1713/* Implementation of aarch64_insn_visitor method "b". */
1714
1715static void
1716aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1717 struct aarch64_insn_data *data)
1718{
1719 struct aarch64_insn_relocation_data *insn_reloc
1720 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1721 int64_t new_offset
0badd99f
YQ
1722 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1723
1724 if (can_encode_int32 (new_offset, 28))
1725 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1726}
1727
1728/* Implementation of aarch64_insn_visitor method "b_cond". */
1729
1730static void
1731aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1732 struct aarch64_insn_data *data)
1733{
1734 struct aarch64_insn_relocation_data *insn_reloc
1735 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1736 int64_t new_offset
0badd99f
YQ
1737 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1738
1739 if (can_encode_int32 (new_offset, 21))
1740 {
1741 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1742 new_offset);
bb903df0 1743 }
0badd99f 1744 else if (can_encode_int32 (new_offset, 28))
bb903df0 1745 {
0badd99f
YQ
1746 /* The offset is out of range for a conditional branch
1747 instruction but not for a unconditional branch. We can use
1748 the following instructions instead:
bb903df0 1749
0badd99f
YQ
1750 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1751 B NOT_TAKEN ; Else jump over TAKEN and continue.
1752 TAKEN:
1753 B #(offset - 8)
1754 NOT_TAKEN:
1755
1756 */
1757
1758 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1759 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1760 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
bb903df0 1761 }
0badd99f 1762}
bb903df0 1763
0badd99f
YQ
1764/* Implementation of aarch64_insn_visitor method "cb". */
1765
1766static void
1767aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1768 const unsigned rn, int is64,
1769 struct aarch64_insn_data *data)
1770{
1771 struct aarch64_insn_relocation_data *insn_reloc
1772 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1773 int64_t new_offset
0badd99f
YQ
1774 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1775
1776 if (can_encode_int32 (new_offset, 21))
1777 {
1778 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1779 aarch64_register (rn, is64), new_offset);
bb903df0 1780 }
0badd99f 1781 else if (can_encode_int32 (new_offset, 28))
bb903df0 1782 {
0badd99f
YQ
1783 /* The offset is out of range for a compare and branch
1784 instruction but not for a unconditional branch. We can use
1785 the following instructions instead:
1786
1787 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1788 B NOT_TAKEN ; Else jump over TAKEN and continue.
1789 TAKEN:
1790 B #(offset - 8)
1791 NOT_TAKEN:
1792
1793 */
1794 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1795 aarch64_register (rn, is64), 8);
1796 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1797 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1798 }
1799}
bb903df0 1800
0badd99f 1801/* Implementation of aarch64_insn_visitor method "tb". */
bb903df0 1802
0badd99f
YQ
1803static void
1804aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1805 const unsigned rt, unsigned bit,
1806 struct aarch64_insn_data *data)
1807{
1808 struct aarch64_insn_relocation_data *insn_reloc
1809 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1810 int64_t new_offset
0badd99f
YQ
1811 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1812
1813 if (can_encode_int32 (new_offset, 16))
1814 {
1815 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1816 aarch64_register (rt, 1), new_offset);
bb903df0 1817 }
0badd99f 1818 else if (can_encode_int32 (new_offset, 28))
bb903df0 1819 {
0badd99f
YQ
1820 /* The offset is out of range for a test bit and branch
1821 instruction but not for a unconditional branch. We can use
1822 the following instructions instead:
1823
1824 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1825 B NOT_TAKEN ; Else jump over TAKEN and continue.
1826 TAKEN:
1827 B #(offset - 8)
1828 NOT_TAKEN:
1829
1830 */
1831 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1832 aarch64_register (rt, 1), 8);
1833 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1834 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1835 new_offset - 8);
1836 }
1837}
bb903df0 1838
0badd99f 1839/* Implementation of aarch64_insn_visitor method "adr". */
bb903df0 1840
0badd99f
YQ
1841static void
1842aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1843 const int is_adrp,
1844 struct aarch64_insn_data *data)
1845{
1846 struct aarch64_insn_relocation_data *insn_reloc
1847 = (struct aarch64_insn_relocation_data *) data;
1848 /* We know exactly the address the ADR{P,} instruction will compute.
1849 We can just write it to the destination register. */
1850 CORE_ADDR address = data->insn_addr + offset;
bb903df0 1851
0badd99f
YQ
1852 if (is_adrp)
1853 {
1854 /* Clear the lower 12 bits of the offset to get the 4K page. */
1855 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1856 aarch64_register (rd, 1),
1857 address & ~0xfff);
1858 }
1859 else
1860 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1861 aarch64_register (rd, 1), address);
1862}
bb903df0 1863
0badd99f 1864/* Implementation of aarch64_insn_visitor method "ldr_literal". */
bb903df0 1865
0badd99f
YQ
1866static void
1867aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1868 const unsigned rt, const int is64,
1869 struct aarch64_insn_data *data)
1870{
1871 struct aarch64_insn_relocation_data *insn_reloc
1872 = (struct aarch64_insn_relocation_data *) data;
1873 CORE_ADDR address = data->insn_addr + offset;
1874
1875 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1876 aarch64_register (rt, 1), address);
1877
1878 /* We know exactly what address to load from, and what register we
1879 can use:
1880
1881 MOV xd, #(oldloc + offset)
1882 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1883 ...
1884
1885 LDR xd, [xd] ; or LDRSW xd, [xd]
1886
1887 */
1888
1889 if (is_sw)
1890 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1891 aarch64_register (rt, 1),
1892 aarch64_register (rt, 1),
1893 offset_memory_operand (0));
bb903df0 1894 else
0badd99f
YQ
1895 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1896 aarch64_register (rt, is64),
1897 aarch64_register (rt, 1),
1898 offset_memory_operand (0));
1899}
1900
1901/* Implementation of aarch64_insn_visitor method "others". */
1902
1903static void
1904aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1905 struct aarch64_insn_data *data)
1906{
1907 struct aarch64_insn_relocation_data *insn_reloc
1908 = (struct aarch64_insn_relocation_data *) data;
bb903df0 1909
0badd99f
YQ
1910 /* The instruction is not PC relative. Just re-emit it at the new
1911 location. */
e1c587c3 1912 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
0badd99f
YQ
1913}
1914
1915static const struct aarch64_insn_visitor visitor =
1916{
1917 aarch64_ftrace_insn_reloc_b,
1918 aarch64_ftrace_insn_reloc_b_cond,
1919 aarch64_ftrace_insn_reloc_cb,
1920 aarch64_ftrace_insn_reloc_tb,
1921 aarch64_ftrace_insn_reloc_adr,
1922 aarch64_ftrace_insn_reloc_ldr_literal,
1923 aarch64_ftrace_insn_reloc_others,
1924};
1925
bb903df0
PL
1926/* Implementation of linux_target_ops method
1927 "install_fast_tracepoint_jump_pad". */
1928
1929static int
1930aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1931 CORE_ADDR tpaddr,
1932 CORE_ADDR collector,
1933 CORE_ADDR lockaddr,
1934 ULONGEST orig_size,
1935 CORE_ADDR *jump_entry,
1936 CORE_ADDR *trampoline,
1937 ULONGEST *trampoline_size,
1938 unsigned char *jjump_pad_insn,
1939 ULONGEST *jjump_pad_insn_size,
1940 CORE_ADDR *adjusted_insn_addr,
1941 CORE_ADDR *adjusted_insn_addr_end,
1942 char *err)
1943{
1944 uint32_t buf[256];
1945 uint32_t *p = buf;
2ac09a5b 1946 int64_t offset;
bb903df0 1947 int i;
70b439f0 1948 uint32_t insn;
bb903df0 1949 CORE_ADDR buildaddr = *jump_entry;
0badd99f 1950 struct aarch64_insn_relocation_data insn_data;
bb903df0
PL
1951
1952 /* We need to save the current state on the stack both to restore it
1953 later and to collect register values when the tracepoint is hit.
1954
1955 The saved registers are pushed in a layout that needs to be in sync
1956 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1957 the supply_fast_tracepoint_registers function will fill in the
1958 register cache from a pointer to saved registers on the stack we build
1959 here.
1960
1961 For simplicity, we set the size of each cell on the stack to 16 bytes.
1962 This way one cell can hold any register type, from system registers
1963 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1964 has to be 16 bytes aligned anyway.
1965
1966 Note that the CPSR register does not exist on AArch64. Instead we
1967 can access system bits describing the process state with the
1968 MRS/MSR instructions, namely the condition flags. We save them as
1969 if they are part of a CPSR register because that's how GDB
1970 interprets these system bits. At the moment, only the condition
1971 flags are saved in CPSR (NZCV).
1972
1973 Stack layout, each cell is 16 bytes (descending):
1974
1975 High *-------- SIMD&FP registers from 31 down to 0. --------*
1976 | q31 |
1977 . .
1978 . . 32 cells
1979 . .
1980 | q0 |
1981 *---- General purpose registers from 30 down to 0. ----*
1982 | x30 |
1983 . .
1984 . . 31 cells
1985 . .
1986 | x0 |
1987 *------------- Special purpose registers. -------------*
1988 | SP |
1989 | PC |
1990 | CPSR (NZCV) | 5 cells
1991 | FPSR |
1992 | FPCR | <- SP + 16
1993 *------------- collecting_t object --------------------*
1994 | TPIDR_EL0 | struct tracepoint * |
1995 Low *------------------------------------------------------*
1996
1997 After this stack is set up, we issue a call to the collector, passing
1998 it the saved registers at (SP + 16). */
1999
2000 /* Push SIMD&FP registers on the stack:
2001
2002 SUB sp, sp, #(32 * 16)
2003
2004 STP q30, q31, [sp, #(30 * 16)]
2005 ...
2006 STP q0, q1, [sp]
2007
2008 */
2009 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
2010 for (i = 30; i >= 0; i -= 2)
2011 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
2012
30baf67b 2013 /* Push general purpose registers on the stack. Note that we do not need
bb903df0
PL
2014 to push x31 as it represents the xzr register and not the stack
2015 pointer in a STR instruction.
2016
2017 SUB sp, sp, #(31 * 16)
2018
2019 STR x30, [sp, #(30 * 16)]
2020 ...
2021 STR x0, [sp]
2022
2023 */
2024 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
2025 for (i = 30; i >= 0; i -= 1)
2026 p += emit_str (p, aarch64_register (i, 1), sp,
2027 offset_memory_operand (i * 16));
2028
2029 /* Make space for 5 more cells.
2030
2031 SUB sp, sp, #(5 * 16)
2032
2033 */
2034 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
2035
2036
2037 /* Save SP:
2038
2039 ADD x4, sp, #((32 + 31 + 5) * 16)
2040 STR x4, [sp, #(4 * 16)]
2041
2042 */
2043 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
2044 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
2045
2046 /* Save PC (tracepoint address):
2047
2048 MOV x3, #(tpaddr)
2049 ...
2050
2051 STR x3, [sp, #(3 * 16)]
2052
2053 */
2054
2055 p += emit_mov_addr (p, x3, tpaddr);
2056 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
2057
2058 /* Save CPSR (NZCV), FPSR and FPCR:
2059
2060 MRS x2, nzcv
2061 MRS x1, fpsr
2062 MRS x0, fpcr
2063
2064 STR x2, [sp, #(2 * 16)]
2065 STR x1, [sp, #(1 * 16)]
2066 STR x0, [sp, #(0 * 16)]
2067
2068 */
2069 p += emit_mrs (p, x2, NZCV);
2070 p += emit_mrs (p, x1, FPSR);
2071 p += emit_mrs (p, x0, FPCR);
2072 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2073 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2074 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2075
2076 /* Push the collecting_t object. It consist of the address of the
2077 tracepoint and an ID for the current thread. We get the latter by
2078 reading the tpidr_el0 system register. It corresponds to the
2079 NT_ARM_TLS register accessible with ptrace.
2080
2081 MOV x0, #(tpoint)
2082 ...
2083
2084 MRS x1, tpidr_el0
2085
2086 STP x0, x1, [sp, #-16]!
2087
2088 */
2089
2090 p += emit_mov_addr (p, x0, tpoint);
2091 p += emit_mrs (p, x1, TPIDR_EL0);
2092 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2093
2094 /* Spin-lock:
2095
2096 The shared memory for the lock is at lockaddr. It will hold zero
2097 if no-one is holding the lock, otherwise it contains the address of
2098 the collecting_t object on the stack of the thread which acquired it.
2099
2100 At this stage, the stack pointer points to this thread's collecting_t
2101 object.
2102
2103 We use the following registers:
2104 - x0: Address of the lock.
2105 - x1: Pointer to collecting_t object.
2106 - x2: Scratch register.
2107
2108 MOV x0, #(lockaddr)
2109 ...
2110 MOV x1, sp
2111
2112 ; Trigger an event local to this core. So the following WFE
2113 ; instruction is ignored.
2114 SEVL
2115 again:
2116 ; Wait for an event. The event is triggered by either the SEVL
2117 ; or STLR instructions (store release).
2118 WFE
2119
2120 ; Atomically read at lockaddr. This marks the memory location as
2121 ; exclusive. This instruction also has memory constraints which
2122 ; make sure all previous data reads and writes are done before
2123 ; executing it.
2124 LDAXR x2, [x0]
2125
2126 ; Try again if another thread holds the lock.
2127 CBNZ x2, again
2128
2129 ; We can lock it! Write the address of the collecting_t object.
2130 ; This instruction will fail if the memory location is not marked
2131 ; as exclusive anymore. If it succeeds, it will remove the
2132 ; exclusive mark on the memory location. This way, if another
2133 ; thread executes this instruction before us, we will fail and try
2134 ; all over again.
2135 STXR w2, x1, [x0]
2136 CBNZ w2, again
2137
2138 */
2139
2140 p += emit_mov_addr (p, x0, lockaddr);
2141 p += emit_mov (p, x1, register_operand (sp));
2142
2143 p += emit_sevl (p);
2144 p += emit_wfe (p);
2145 p += emit_ldaxr (p, x2, x0);
2146 p += emit_cb (p, 1, w2, -2 * 4);
2147 p += emit_stxr (p, w2, x1, x0);
2148 p += emit_cb (p, 1, x2, -4 * 4);
2149
2150 /* Call collector (struct tracepoint *, unsigned char *):
2151
2152 MOV x0, #(tpoint)
2153 ...
2154
2155 ; Saved registers start after the collecting_t object.
2156 ADD x1, sp, #16
2157
2158 ; We use an intra-procedure-call scratch register.
2159 MOV ip0, #(collector)
2160 ...
2161
2162 ; And call back to C!
2163 BLR ip0
2164
2165 */
2166
2167 p += emit_mov_addr (p, x0, tpoint);
2168 p += emit_add (p, x1, sp, immediate_operand (16));
2169
2170 p += emit_mov_addr (p, ip0, collector);
2171 p += emit_blr (p, ip0);
2172
2173 /* Release the lock.
2174
2175 MOV x0, #(lockaddr)
2176 ...
2177
2178 ; This instruction is a normal store with memory ordering
2179 ; constraints. Thanks to this we do not have to put a data
2180 ; barrier instruction to make sure all data read and writes are done
30baf67b 2181 ; before this instruction is executed. Furthermore, this instruction
bb903df0
PL
2182 ; will trigger an event, letting other threads know they can grab
2183 ; the lock.
2184 STLR xzr, [x0]
2185
2186 */
2187 p += emit_mov_addr (p, x0, lockaddr);
2188 p += emit_stlr (p, xzr, x0);
2189
2190 /* Free collecting_t object:
2191
2192 ADD sp, sp, #16
2193
2194 */
2195 p += emit_add (p, sp, sp, immediate_operand (16));
2196
2197 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2198 registers from the stack.
2199
2200 LDR x2, [sp, #(2 * 16)]
2201 LDR x1, [sp, #(1 * 16)]
2202 LDR x0, [sp, #(0 * 16)]
2203
2204 MSR NZCV, x2
2205 MSR FPSR, x1
2206 MSR FPCR, x0
2207
2208 ADD sp, sp #(5 * 16)
2209
2210 */
2211 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2212 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2213 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2214 p += emit_msr (p, NZCV, x2);
2215 p += emit_msr (p, FPSR, x1);
2216 p += emit_msr (p, FPCR, x0);
2217
2218 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2219
2220 /* Pop general purpose registers:
2221
2222 LDR x0, [sp]
2223 ...
2224 LDR x30, [sp, #(30 * 16)]
2225
2226 ADD sp, sp, #(31 * 16)
2227
2228 */
2229 for (i = 0; i <= 30; i += 1)
2230 p += emit_ldr (p, aarch64_register (i, 1), sp,
2231 offset_memory_operand (i * 16));
2232 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2233
2234 /* Pop SIMD&FP registers:
2235
2236 LDP q0, q1, [sp]
2237 ...
2238 LDP q30, q31, [sp, #(30 * 16)]
2239
2240 ADD sp, sp, #(32 * 16)
2241
2242 */
2243 for (i = 0; i <= 30; i += 2)
2244 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2245 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2246
2247 /* Write the code into the inferior memory. */
2248 append_insns (&buildaddr, p - buf, buf);
2249
2250 /* Now emit the relocated instruction. */
2251 *adjusted_insn_addr = buildaddr;
70b439f0 2252 target_read_uint32 (tpaddr, &insn);
0badd99f
YQ
2253
2254 insn_data.base.insn_addr = tpaddr;
2255 insn_data.new_addr = buildaddr;
2256 insn_data.insn_ptr = buf;
2257
2258 aarch64_relocate_instruction (insn, &visitor,
2259 (struct aarch64_insn_data *) &insn_data);
2260
bb903df0 2261 /* We may not have been able to relocate the instruction. */
0badd99f 2262 if (insn_data.insn_ptr == buf)
bb903df0
PL
2263 {
2264 sprintf (err,
2265 "E.Could not relocate instruction from %s to %s.",
2266 core_addr_to_string_nz (tpaddr),
2267 core_addr_to_string_nz (buildaddr));
2268 return 1;
2269 }
dfaffe9d 2270 else
0badd99f 2271 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
dfaffe9d 2272 *adjusted_insn_addr_end = buildaddr;
bb903df0
PL
2273
2274 /* Go back to the start of the buffer. */
2275 p = buf;
2276
2277 /* Emit a branch back from the jump pad. */
2278 offset = (tpaddr + orig_size - buildaddr);
2279 if (!can_encode_int32 (offset, 28))
2280 {
2281 sprintf (err,
2282 "E.Jump back from jump pad too far from tracepoint "
2ac09a5b 2283 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2284 offset);
2285 return 1;
2286 }
2287
2288 p += emit_b (p, 0, offset);
2289 append_insns (&buildaddr, p - buf, buf);
2290
2291 /* Give the caller a branch instruction into the jump pad. */
2292 offset = (*jump_entry - tpaddr);
2293 if (!can_encode_int32 (offset, 28))
2294 {
2295 sprintf (err,
2296 "E.Jump pad too far from tracepoint "
2ac09a5b 2297 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2298 offset);
2299 return 1;
2300 }
2301
2302 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2303 *jjump_pad_insn_size = 4;
2304
2305 /* Return the end address of our pad. */
2306 *jump_entry = buildaddr;
2307
2308 return 0;
2309}
2310
afbe19f8
PL
2311/* Helper function writing LEN instructions from START into
2312 current_insn_ptr. */
2313
2314static void
2315emit_ops_insns (const uint32_t *start, int len)
2316{
2317 CORE_ADDR buildaddr = current_insn_ptr;
2318
2319 if (debug_threads)
2320 debug_printf ("Adding %d instrucions at %s\n",
2321 len, paddress (buildaddr));
2322
2323 append_insns (&buildaddr, len, start);
2324 current_insn_ptr = buildaddr;
2325}
2326
2327/* Pop a register from the stack. */
2328
2329static int
2330emit_pop (uint32_t *buf, struct aarch64_register rt)
2331{
2332 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2333}
2334
2335/* Push a register on the stack. */
2336
2337static int
2338emit_push (uint32_t *buf, struct aarch64_register rt)
2339{
2340 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2341}
2342
2343/* Implementation of emit_ops method "emit_prologue". */
2344
2345static void
2346aarch64_emit_prologue (void)
2347{
2348 uint32_t buf[16];
2349 uint32_t *p = buf;
2350
2351 /* This function emit a prologue for the following function prototype:
2352
2353 enum eval_result_type f (unsigned char *regs,
2354 ULONGEST *value);
2355
2356 The first argument is a buffer of raw registers. The second
2357 argument is the result of
2358 evaluating the expression, which will be set to whatever is on top of
2359 the stack at the end.
2360
2361 The stack set up by the prologue is as such:
2362
2363 High *------------------------------------------------------*
2364 | LR |
2365 | FP | <- FP
2366 | x1 (ULONGEST *value) |
2367 | x0 (unsigned char *regs) |
2368 Low *------------------------------------------------------*
2369
2370 As we are implementing a stack machine, each opcode can expand the
2371 stack so we never know how far we are from the data saved by this
2372 prologue. In order to be able refer to value and regs later, we save
2373 the current stack pointer in the frame pointer. This way, it is not
2374 clobbered when calling C functions.
2375
30baf67b 2376 Finally, throughout every operation, we are using register x0 as the
afbe19f8
PL
2377 top of the stack, and x1 as a scratch register. */
2378
2379 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2380 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2381 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2382
2383 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2384
2385
2386 emit_ops_insns (buf, p - buf);
2387}
2388
2389/* Implementation of emit_ops method "emit_epilogue". */
2390
2391static void
2392aarch64_emit_epilogue (void)
2393{
2394 uint32_t buf[16];
2395 uint32_t *p = buf;
2396
2397 /* Store the result of the expression (x0) in *value. */
2398 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2399 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2400 p += emit_str (p, x0, x1, offset_memory_operand (0));
2401
2402 /* Restore the previous state. */
2403 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2404 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2405
2406 /* Return expr_eval_no_error. */
2407 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2408 p += emit_ret (p, lr);
2409
2410 emit_ops_insns (buf, p - buf);
2411}
2412
2413/* Implementation of emit_ops method "emit_add". */
2414
2415static void
2416aarch64_emit_add (void)
2417{
2418 uint32_t buf[16];
2419 uint32_t *p = buf;
2420
2421 p += emit_pop (p, x1);
45e3745e 2422 p += emit_add (p, x0, x1, register_operand (x0));
afbe19f8
PL
2423
2424 emit_ops_insns (buf, p - buf);
2425}
2426
2427/* Implementation of emit_ops method "emit_sub". */
2428
2429static void
2430aarch64_emit_sub (void)
2431{
2432 uint32_t buf[16];
2433 uint32_t *p = buf;
2434
2435 p += emit_pop (p, x1);
45e3745e 2436 p += emit_sub (p, x0, x1, register_operand (x0));
afbe19f8
PL
2437
2438 emit_ops_insns (buf, p - buf);
2439}
2440
2441/* Implementation of emit_ops method "emit_mul". */
2442
2443static void
2444aarch64_emit_mul (void)
2445{
2446 uint32_t buf[16];
2447 uint32_t *p = buf;
2448
2449 p += emit_pop (p, x1);
2450 p += emit_mul (p, x0, x1, x0);
2451
2452 emit_ops_insns (buf, p - buf);
2453}
2454
2455/* Implementation of emit_ops method "emit_lsh". */
2456
2457static void
2458aarch64_emit_lsh (void)
2459{
2460 uint32_t buf[16];
2461 uint32_t *p = buf;
2462
2463 p += emit_pop (p, x1);
2464 p += emit_lslv (p, x0, x1, x0);
2465
2466 emit_ops_insns (buf, p - buf);
2467}
2468
2469/* Implementation of emit_ops method "emit_rsh_signed". */
2470
2471static void
2472aarch64_emit_rsh_signed (void)
2473{
2474 uint32_t buf[16];
2475 uint32_t *p = buf;
2476
2477 p += emit_pop (p, x1);
2478 p += emit_asrv (p, x0, x1, x0);
2479
2480 emit_ops_insns (buf, p - buf);
2481}
2482
2483/* Implementation of emit_ops method "emit_rsh_unsigned". */
2484
2485static void
2486aarch64_emit_rsh_unsigned (void)
2487{
2488 uint32_t buf[16];
2489 uint32_t *p = buf;
2490
2491 p += emit_pop (p, x1);
2492 p += emit_lsrv (p, x0, x1, x0);
2493
2494 emit_ops_insns (buf, p - buf);
2495}
2496
2497/* Implementation of emit_ops method "emit_ext". */
2498
2499static void
2500aarch64_emit_ext (int arg)
2501{
2502 uint32_t buf[16];
2503 uint32_t *p = buf;
2504
2505 p += emit_sbfx (p, x0, x0, 0, arg);
2506
2507 emit_ops_insns (buf, p - buf);
2508}
2509
2510/* Implementation of emit_ops method "emit_log_not". */
2511
2512static void
2513aarch64_emit_log_not (void)
2514{
2515 uint32_t buf[16];
2516 uint32_t *p = buf;
2517
2518 /* If the top of the stack is 0, replace it with 1. Else replace it with
2519 0. */
2520
2521 p += emit_cmp (p, x0, immediate_operand (0));
2522 p += emit_cset (p, x0, EQ);
2523
2524 emit_ops_insns (buf, p - buf);
2525}
2526
2527/* Implementation of emit_ops method "emit_bit_and". */
2528
2529static void
2530aarch64_emit_bit_and (void)
2531{
2532 uint32_t buf[16];
2533 uint32_t *p = buf;
2534
2535 p += emit_pop (p, x1);
2536 p += emit_and (p, x0, x0, x1);
2537
2538 emit_ops_insns (buf, p - buf);
2539}
2540
2541/* Implementation of emit_ops method "emit_bit_or". */
2542
2543static void
2544aarch64_emit_bit_or (void)
2545{
2546 uint32_t buf[16];
2547 uint32_t *p = buf;
2548
2549 p += emit_pop (p, x1);
2550 p += emit_orr (p, x0, x0, x1);
2551
2552 emit_ops_insns (buf, p - buf);
2553}
2554
2555/* Implementation of emit_ops method "emit_bit_xor". */
2556
2557static void
2558aarch64_emit_bit_xor (void)
2559{
2560 uint32_t buf[16];
2561 uint32_t *p = buf;
2562
2563 p += emit_pop (p, x1);
2564 p += emit_eor (p, x0, x0, x1);
2565
2566 emit_ops_insns (buf, p - buf);
2567}
2568
2569/* Implementation of emit_ops method "emit_bit_not". */
2570
2571static void
2572aarch64_emit_bit_not (void)
2573{
2574 uint32_t buf[16];
2575 uint32_t *p = buf;
2576
2577 p += emit_mvn (p, x0, x0);
2578
2579 emit_ops_insns (buf, p - buf);
2580}
2581
2582/* Implementation of emit_ops method "emit_equal". */
2583
2584static void
2585aarch64_emit_equal (void)
2586{
2587 uint32_t buf[16];
2588 uint32_t *p = buf;
2589
2590 p += emit_pop (p, x1);
2591 p += emit_cmp (p, x0, register_operand (x1));
2592 p += emit_cset (p, x0, EQ);
2593
2594 emit_ops_insns (buf, p - buf);
2595}
2596
2597/* Implementation of emit_ops method "emit_less_signed". */
2598
2599static void
2600aarch64_emit_less_signed (void)
2601{
2602 uint32_t buf[16];
2603 uint32_t *p = buf;
2604
2605 p += emit_pop (p, x1);
2606 p += emit_cmp (p, x1, register_operand (x0));
2607 p += emit_cset (p, x0, LT);
2608
2609 emit_ops_insns (buf, p - buf);
2610}
2611
2612/* Implementation of emit_ops method "emit_less_unsigned". */
2613
2614static void
2615aarch64_emit_less_unsigned (void)
2616{
2617 uint32_t buf[16];
2618 uint32_t *p = buf;
2619
2620 p += emit_pop (p, x1);
2621 p += emit_cmp (p, x1, register_operand (x0));
2622 p += emit_cset (p, x0, LO);
2623
2624 emit_ops_insns (buf, p - buf);
2625}
2626
2627/* Implementation of emit_ops method "emit_ref". */
2628
2629static void
2630aarch64_emit_ref (int size)
2631{
2632 uint32_t buf[16];
2633 uint32_t *p = buf;
2634
2635 switch (size)
2636 {
2637 case 1:
2638 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2639 break;
2640 case 2:
2641 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2642 break;
2643 case 4:
2644 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2645 break;
2646 case 8:
2647 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2648 break;
2649 default:
2650 /* Unknown size, bail on compilation. */
2651 emit_error = 1;
2652 break;
2653 }
2654
2655 emit_ops_insns (buf, p - buf);
2656}
2657
2658/* Implementation of emit_ops method "emit_if_goto". */
2659
2660static void
2661aarch64_emit_if_goto (int *offset_p, int *size_p)
2662{
2663 uint32_t buf[16];
2664 uint32_t *p = buf;
2665
2666 /* The Z flag is set or cleared here. */
2667 p += emit_cmp (p, x0, immediate_operand (0));
2668 /* This instruction must not change the Z flag. */
2669 p += emit_pop (p, x0);
2670 /* Branch over the next instruction if x0 == 0. */
2671 p += emit_bcond (p, EQ, 8);
2672
2673 /* The NOP instruction will be patched with an unconditional branch. */
2674 if (offset_p)
2675 *offset_p = (p - buf) * 4;
2676 if (size_p)
2677 *size_p = 4;
2678 p += emit_nop (p);
2679
2680 emit_ops_insns (buf, p - buf);
2681}
2682
2683/* Implementation of emit_ops method "emit_goto". */
2684
2685static void
2686aarch64_emit_goto (int *offset_p, int *size_p)
2687{
2688 uint32_t buf[16];
2689 uint32_t *p = buf;
2690
2691 /* The NOP instruction will be patched with an unconditional branch. */
2692 if (offset_p)
2693 *offset_p = 0;
2694 if (size_p)
2695 *size_p = 4;
2696 p += emit_nop (p);
2697
2698 emit_ops_insns (buf, p - buf);
2699}
2700
2701/* Implementation of emit_ops method "write_goto_address". */
2702
bb1183e2 2703static void
afbe19f8
PL
2704aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2705{
2706 uint32_t insn;
2707
2708 emit_b (&insn, 0, to - from);
2709 append_insns (&from, 1, &insn);
2710}
2711
2712/* Implementation of emit_ops method "emit_const". */
2713
2714static void
2715aarch64_emit_const (LONGEST num)
2716{
2717 uint32_t buf[16];
2718 uint32_t *p = buf;
2719
2720 p += emit_mov_addr (p, x0, num);
2721
2722 emit_ops_insns (buf, p - buf);
2723}
2724
2725/* Implementation of emit_ops method "emit_call". */
2726
2727static void
2728aarch64_emit_call (CORE_ADDR fn)
2729{
2730 uint32_t buf[16];
2731 uint32_t *p = buf;
2732
2733 p += emit_mov_addr (p, ip0, fn);
2734 p += emit_blr (p, ip0);
2735
2736 emit_ops_insns (buf, p - buf);
2737}
2738
2739/* Implementation of emit_ops method "emit_reg". */
2740
2741static void
2742aarch64_emit_reg (int reg)
2743{
2744 uint32_t buf[16];
2745 uint32_t *p = buf;
2746
2747 /* Set x0 to unsigned char *regs. */
2748 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2749 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2750 p += emit_mov (p, x1, immediate_operand (reg));
2751
2752 emit_ops_insns (buf, p - buf);
2753
2754 aarch64_emit_call (get_raw_reg_func_addr ());
2755}
2756
2757/* Implementation of emit_ops method "emit_pop". */
2758
2759static void
2760aarch64_emit_pop (void)
2761{
2762 uint32_t buf[16];
2763 uint32_t *p = buf;
2764
2765 p += emit_pop (p, x0);
2766
2767 emit_ops_insns (buf, p - buf);
2768}
2769
2770/* Implementation of emit_ops method "emit_stack_flush". */
2771
2772static void
2773aarch64_emit_stack_flush (void)
2774{
2775 uint32_t buf[16];
2776 uint32_t *p = buf;
2777
2778 p += emit_push (p, x0);
2779
2780 emit_ops_insns (buf, p - buf);
2781}
2782
2783/* Implementation of emit_ops method "emit_zero_ext". */
2784
2785static void
2786aarch64_emit_zero_ext (int arg)
2787{
2788 uint32_t buf[16];
2789 uint32_t *p = buf;
2790
2791 p += emit_ubfx (p, x0, x0, 0, arg);
2792
2793 emit_ops_insns (buf, p - buf);
2794}
2795
2796/* Implementation of emit_ops method "emit_swap". */
2797
2798static void
2799aarch64_emit_swap (void)
2800{
2801 uint32_t buf[16];
2802 uint32_t *p = buf;
2803
2804 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2805 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2806 p += emit_mov (p, x0, register_operand (x1));
2807
2808 emit_ops_insns (buf, p - buf);
2809}
2810
2811/* Implementation of emit_ops method "emit_stack_adjust". */
2812
2813static void
2814aarch64_emit_stack_adjust (int n)
2815{
2816 /* This is not needed with our design. */
2817 uint32_t buf[16];
2818 uint32_t *p = buf;
2819
2820 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2821
2822 emit_ops_insns (buf, p - buf);
2823}
2824
2825/* Implementation of emit_ops method "emit_int_call_1". */
2826
2827static void
2828aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2829{
2830 uint32_t buf[16];
2831 uint32_t *p = buf;
2832
2833 p += emit_mov (p, x0, immediate_operand (arg1));
2834
2835 emit_ops_insns (buf, p - buf);
2836
2837 aarch64_emit_call (fn);
2838}
2839
2840/* Implementation of emit_ops method "emit_void_call_2". */
2841
2842static void
2843aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2844{
2845 uint32_t buf[16];
2846 uint32_t *p = buf;
2847
2848 /* Push x0 on the stack. */
2849 aarch64_emit_stack_flush ();
2850
2851 /* Setup arguments for the function call:
2852
2853 x0: arg1
2854 x1: top of the stack
2855
2856 MOV x1, x0
2857 MOV x0, #arg1 */
2858
2859 p += emit_mov (p, x1, register_operand (x0));
2860 p += emit_mov (p, x0, immediate_operand (arg1));
2861
2862 emit_ops_insns (buf, p - buf);
2863
2864 aarch64_emit_call (fn);
2865
2866 /* Restore x0. */
2867 aarch64_emit_pop ();
2868}
2869
2870/* Implementation of emit_ops method "emit_eq_goto". */
2871
2872static void
2873aarch64_emit_eq_goto (int *offset_p, int *size_p)
2874{
2875 uint32_t buf[16];
2876 uint32_t *p = buf;
2877
2878 p += emit_pop (p, x1);
2879 p += emit_cmp (p, x1, register_operand (x0));
2880 /* Branch over the next instruction if x0 != x1. */
2881 p += emit_bcond (p, NE, 8);
2882 /* The NOP instruction will be patched with an unconditional branch. */
2883 if (offset_p)
2884 *offset_p = (p - buf) * 4;
2885 if (size_p)
2886 *size_p = 4;
2887 p += emit_nop (p);
2888
2889 emit_ops_insns (buf, p - buf);
2890}
2891
2892/* Implementation of emit_ops method "emit_ne_goto". */
2893
2894static void
2895aarch64_emit_ne_goto (int *offset_p, int *size_p)
2896{
2897 uint32_t buf[16];
2898 uint32_t *p = buf;
2899
2900 p += emit_pop (p, x1);
2901 p += emit_cmp (p, x1, register_operand (x0));
2902 /* Branch over the next instruction if x0 == x1. */
2903 p += emit_bcond (p, EQ, 8);
2904 /* The NOP instruction will be patched with an unconditional branch. */
2905 if (offset_p)
2906 *offset_p = (p - buf) * 4;
2907 if (size_p)
2908 *size_p = 4;
2909 p += emit_nop (p);
2910
2911 emit_ops_insns (buf, p - buf);
2912}
2913
2914/* Implementation of emit_ops method "emit_lt_goto". */
2915
2916static void
2917aarch64_emit_lt_goto (int *offset_p, int *size_p)
2918{
2919 uint32_t buf[16];
2920 uint32_t *p = buf;
2921
2922 p += emit_pop (p, x1);
2923 p += emit_cmp (p, x1, register_operand (x0));
2924 /* Branch over the next instruction if x0 >= x1. */
2925 p += emit_bcond (p, GE, 8);
2926 /* The NOP instruction will be patched with an unconditional branch. */
2927 if (offset_p)
2928 *offset_p = (p - buf) * 4;
2929 if (size_p)
2930 *size_p = 4;
2931 p += emit_nop (p);
2932
2933 emit_ops_insns (buf, p - buf);
2934}
2935
2936/* Implementation of emit_ops method "emit_le_goto". */
2937
2938static void
2939aarch64_emit_le_goto (int *offset_p, int *size_p)
2940{
2941 uint32_t buf[16];
2942 uint32_t *p = buf;
2943
2944 p += emit_pop (p, x1);
2945 p += emit_cmp (p, x1, register_operand (x0));
2946 /* Branch over the next instruction if x0 > x1. */
2947 p += emit_bcond (p, GT, 8);
2948 /* The NOP instruction will be patched with an unconditional branch. */
2949 if (offset_p)
2950 *offset_p = (p - buf) * 4;
2951 if (size_p)
2952 *size_p = 4;
2953 p += emit_nop (p);
2954
2955 emit_ops_insns (buf, p - buf);
2956}
2957
2958/* Implementation of emit_ops method "emit_gt_goto". */
2959
2960static void
2961aarch64_emit_gt_goto (int *offset_p, int *size_p)
2962{
2963 uint32_t buf[16];
2964 uint32_t *p = buf;
2965
2966 p += emit_pop (p, x1);
2967 p += emit_cmp (p, x1, register_operand (x0));
2968 /* Branch over the next instruction if x0 <= x1. */
2969 p += emit_bcond (p, LE, 8);
2970 /* The NOP instruction will be patched with an unconditional branch. */
2971 if (offset_p)
2972 *offset_p = (p - buf) * 4;
2973 if (size_p)
2974 *size_p = 4;
2975 p += emit_nop (p);
2976
2977 emit_ops_insns (buf, p - buf);
2978}
2979
2980/* Implementation of emit_ops method "emit_ge_got". */
2981
2982static void
2983aarch64_emit_ge_got (int *offset_p, int *size_p)
2984{
2985 uint32_t buf[16];
2986 uint32_t *p = buf;
2987
2988 p += emit_pop (p, x1);
2989 p += emit_cmp (p, x1, register_operand (x0));
2990 /* Branch over the next instruction if x0 <= x1. */
2991 p += emit_bcond (p, LT, 8);
2992 /* The NOP instruction will be patched with an unconditional branch. */
2993 if (offset_p)
2994 *offset_p = (p - buf) * 4;
2995 if (size_p)
2996 *size_p = 4;
2997 p += emit_nop (p);
2998
2999 emit_ops_insns (buf, p - buf);
3000}
3001
3002static struct emit_ops aarch64_emit_ops_impl =
3003{
3004 aarch64_emit_prologue,
3005 aarch64_emit_epilogue,
3006 aarch64_emit_add,
3007 aarch64_emit_sub,
3008 aarch64_emit_mul,
3009 aarch64_emit_lsh,
3010 aarch64_emit_rsh_signed,
3011 aarch64_emit_rsh_unsigned,
3012 aarch64_emit_ext,
3013 aarch64_emit_log_not,
3014 aarch64_emit_bit_and,
3015 aarch64_emit_bit_or,
3016 aarch64_emit_bit_xor,
3017 aarch64_emit_bit_not,
3018 aarch64_emit_equal,
3019 aarch64_emit_less_signed,
3020 aarch64_emit_less_unsigned,
3021 aarch64_emit_ref,
3022 aarch64_emit_if_goto,
3023 aarch64_emit_goto,
3024 aarch64_write_goto_address,
3025 aarch64_emit_const,
3026 aarch64_emit_call,
3027 aarch64_emit_reg,
3028 aarch64_emit_pop,
3029 aarch64_emit_stack_flush,
3030 aarch64_emit_zero_ext,
3031 aarch64_emit_swap,
3032 aarch64_emit_stack_adjust,
3033 aarch64_emit_int_call_1,
3034 aarch64_emit_void_call_2,
3035 aarch64_emit_eq_goto,
3036 aarch64_emit_ne_goto,
3037 aarch64_emit_lt_goto,
3038 aarch64_emit_le_goto,
3039 aarch64_emit_gt_goto,
3040 aarch64_emit_ge_got,
3041};
3042
3043/* Implementation of linux_target_ops method "emit_ops". */
3044
3045static struct emit_ops *
3046aarch64_emit_ops (void)
3047{
3048 return &aarch64_emit_ops_impl;
3049}
3050
bb903df0
PL
3051/* Implementation of linux_target_ops method
3052 "get_min_fast_tracepoint_insn_len". */
3053
3054static int
3055aarch64_get_min_fast_tracepoint_insn_len (void)
3056{
3057 return 4;
3058}
3059
d1d0aea1
PL
3060/* Implementation of linux_target_ops method "supports_range_stepping". */
3061
3062static int
3063aarch64_supports_range_stepping (void)
3064{
3065 return 1;
3066}
3067
3ca4edb6 3068/* Implementation of target ops method "sw_breakpoint_from_kind". */
dd373349 3069
3ca4edb6
TBA
3070const gdb_byte *
3071aarch64_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349 3072{
17b1509a
YQ
3073 if (is_64bit_tdesc ())
3074 {
3075 *size = aarch64_breakpoint_len;
3076 return aarch64_breakpoint;
3077 }
3078 else
3079 return arm_sw_breakpoint_from_kind (kind, size);
3080}
3081
06250e4e 3082/* Implementation of target ops method "breakpoint_kind_from_pc". */
17b1509a 3083
06250e4e
TBA
3084int
3085aarch64_target::breakpoint_kind_from_pc (CORE_ADDR *pcptr)
17b1509a
YQ
3086{
3087 if (is_64bit_tdesc ())
3088 return aarch64_breakpoint_len;
3089 else
3090 return arm_breakpoint_kind_from_pc (pcptr);
3091}
3092
06250e4e 3093/* Implementation of the target ops method
17b1509a
YQ
3094 "breakpoint_kind_from_current_state". */
3095
06250e4e
TBA
3096int
3097aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
17b1509a
YQ
3098{
3099 if (is_64bit_tdesc ())
3100 return aarch64_breakpoint_len;
3101 else
3102 return arm_breakpoint_kind_from_current_state (pcptr);
dd373349
AT
3103}
3104
7d00775e
AT
3105/* Support for hardware single step. */
3106
3107static int
3108aarch64_supports_hardware_single_step (void)
3109{
3110 return 1;
3111}
3112
176eb98c
MS
3113struct linux_target_ops the_low_target =
3114{
176eb98c
MS
3115 aarch64_stopped_by_watchpoint,
3116 aarch64_stopped_data_address,
421530db
PL
3117 NULL, /* collect_ptrace_register */
3118 NULL, /* supply_ptrace_register */
ade90bde 3119 aarch64_linux_siginfo_fixup,
176eb98c 3120 aarch64_linux_new_process,
04ec7890 3121 aarch64_linux_delete_process,
176eb98c 3122 aarch64_linux_new_thread,
466eecee 3123 aarch64_linux_delete_thread,
3a8a0396 3124 aarch64_linux_new_fork,
176eb98c 3125 aarch64_linux_prepare_to_resume,
421530db 3126 NULL, /* process_qsupported */
7671bf47 3127 aarch64_supports_tracepoints,
bb903df0
PL
3128 aarch64_get_thread_area,
3129 aarch64_install_fast_tracepoint_jump_pad,
afbe19f8 3130 aarch64_emit_ops,
bb903df0 3131 aarch64_get_min_fast_tracepoint_insn_len,
d1d0aea1 3132 aarch64_supports_range_stepping,
7d00775e 3133 aarch64_supports_hardware_single_step,
061fc021 3134 aarch64_get_syscall_trapinfo,
176eb98c 3135};
3aee8918 3136
ef0478f6
TBA
3137/* The linux target ops object. */
3138
3139linux_process_target *the_linux_target = &the_aarch64_target;
3140
3aee8918
PA
3141void
3142initialize_low_arch (void)
3143{
3b53ae99
YQ
3144 initialize_low_arch_aarch32 ();
3145
3aee8918 3146 initialize_regsets_info (&aarch64_regsets_info);
02895270 3147 initialize_regsets_info (&aarch64_sve_regsets_info);
3aee8918 3148}
This page took 0.741131 seconds and 4 git commands to generate.