gdbserver/linux-low: turn 'siginfo_fixup' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-aarch64-low.cc
CommitLineData
176eb98c
MS
1/* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
b811d2c2 4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
176eb98c
MS
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "server.h"
23#include "linux-low.h"
db3cb7cb 24#include "nat/aarch64-linux.h"
554717a3 25#include "nat/aarch64-linux-hw-point.h"
bb903df0 26#include "arch/aarch64-insn.h"
3b53ae99 27#include "linux-aarch32-low.h"
176eb98c 28#include "elf/common.h"
afbe19f8
PL
29#include "ax.h"
30#include "tracepoint.h"
f9d949fb 31#include "debug.h"
176eb98c
MS
32
33#include <signal.h>
34#include <sys/user.h>
5826e159 35#include "nat/gdb_ptrace.h"
e9dae05e 36#include <asm/ptrace.h>
bb903df0
PL
37#include <inttypes.h>
38#include <endian.h>
39#include <sys/uio.h>
176eb98c
MS
40
41#include "gdb_proc_service.h"
cc628f3d 42#include "arch/aarch64.h"
7cc17433 43#include "linux-aarch32-tdesc.h"
d6d7ce56 44#include "linux-aarch64-tdesc.h"
fefa175e 45#include "nat/aarch64-sve-linux-ptrace.h"
02895270 46#include "tdesc.h"
176eb98c 47
176eb98c
MS
48#ifdef HAVE_SYS_REG_H
49#include <sys/reg.h>
50#endif
51
ef0478f6
TBA
52/* Linux target op definitions for the AArch64 architecture. */
53
54class aarch64_target : public linux_process_target
55{
56public:
57
aa8d21c9
TBA
58 const regs_info *get_regs_info () override;
59
06250e4e
TBA
60 int breakpoint_kind_from_pc (CORE_ADDR *pcptr) override;
61
62 int breakpoint_kind_from_current_state (CORE_ADDR *pcptr) override;
63
3ca4edb6
TBA
64 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
65
007c9b97
TBA
66 bool supports_z_point_type (char z_type) override;
67
797bcff5
TBA
68protected:
69
70 void low_arch_setup () override;
daca57a7
TBA
71
72 bool low_cannot_fetch_register (int regno) override;
73
74 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
75
76 bool low_supports_breakpoints () override;
77
78 CORE_ADDR low_get_pc (regcache *regcache) override;
79
80 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
d7146cda
TBA
81
82 bool low_breakpoint_at (CORE_ADDR pc) override;
9db9aa23
TBA
83
84 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
85 int size, raw_breakpoint *bp) override;
86
87 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
88 int size, raw_breakpoint *bp) override;
ac1bbaca
TBA
89
90 bool low_stopped_by_watchpoint () override;
91
92 CORE_ADDR low_stopped_data_address () override;
cb63de7c
TBA
93
94 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
95 int direction) override;
ef0478f6
TBA
96};
97
98/* The singleton target ops object. */
99
100static aarch64_target the_aarch64_target;
101
daca57a7
TBA
102bool
103aarch64_target::low_cannot_fetch_register (int regno)
104{
105 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
106 "is not implemented by the target");
107}
108
109bool
110aarch64_target::low_cannot_store_register (int regno)
111{
112 gdb_assert_not_reached ("linux target op low_cannot_store_register "
113 "is not implemented by the target");
114}
115
176eb98c
MS
116/* Per-process arch-specific data we want to keep. */
117
118struct arch_process_info
119{
120 /* Hardware breakpoint/watchpoint data.
121 The reason for them to be per-process rather than per-thread is
122 due to the lack of information in the gdbserver environment;
123 gdbserver is not told that whether a requested hardware
124 breakpoint/watchpoint is thread specific or not, so it has to set
125 each hw bp/wp for every thread in the current process. The
126 higher level bp/wp management in gdb will resume a thread if a hw
127 bp/wp trap is not expected for it. Since the hw bp/wp setting is
128 same for each thread, it is reasonable for the data to live here.
129 */
130 struct aarch64_debug_reg_state debug_reg_state;
131};
132
3b53ae99
YQ
133/* Return true if the size of register 0 is 8 byte. */
134
135static int
136is_64bit_tdesc (void)
137{
138 struct regcache *regcache = get_thread_regcache (current_thread, 0);
139
140 return register_size (regcache->tdesc, 0) == 8;
141}
142
02895270
AH
143/* Return true if the regcache contains the number of SVE registers. */
144
145static bool
146is_sve_tdesc (void)
147{
148 struct regcache *regcache = get_thread_regcache (current_thread, 0);
149
6cdd651f 150 return tdesc_contains_feature (regcache->tdesc, "org.gnu.gdb.aarch64.sve");
02895270
AH
151}
152
176eb98c
MS
153static void
154aarch64_fill_gregset (struct regcache *regcache, void *buf)
155{
6a69a054 156 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
176eb98c
MS
157 int i;
158
159 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
160 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
161 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
162 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
163 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
164}
165
166static void
167aarch64_store_gregset (struct regcache *regcache, const void *buf)
168{
6a69a054 169 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
176eb98c
MS
170 int i;
171
172 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
173 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
174 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
175 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
176 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
177}
178
179static void
180aarch64_fill_fpregset (struct regcache *regcache, void *buf)
181{
9caa3311 182 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
176eb98c
MS
183 int i;
184
185 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
186 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
187 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
188 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
189}
190
191static void
192aarch64_store_fpregset (struct regcache *regcache, const void *buf)
193{
9caa3311
YQ
194 const struct user_fpsimd_state *regset
195 = (const struct user_fpsimd_state *) buf;
176eb98c
MS
196 int i;
197
198 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
199 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
200 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
201 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
202}
203
1ef53e6b
AH
204/* Store the pauth registers to regcache. */
205
206static void
207aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
208{
209 uint64_t *pauth_regset = (uint64_t *) buf;
210 int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
211
212 if (pauth_base == 0)
213 return;
214
215 supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
216 &pauth_regset[0]);
217 supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
218 &pauth_regset[1]);
219}
220
bf9ae9d8
TBA
221bool
222aarch64_target::low_supports_breakpoints ()
223{
224 return true;
225}
226
227/* Implementation of linux target ops method "low_get_pc". */
421530db 228
bf9ae9d8
TBA
229CORE_ADDR
230aarch64_target::low_get_pc (regcache *regcache)
176eb98c 231{
8a7e4587 232 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 233 return linux_get_pc_64bit (regcache);
8a7e4587 234 else
a5652c21 235 return linux_get_pc_32bit (regcache);
176eb98c
MS
236}
237
bf9ae9d8 238/* Implementation of linux target ops method "low_set_pc". */
421530db 239
bf9ae9d8
TBA
240void
241aarch64_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
176eb98c 242{
8a7e4587 243 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 244 linux_set_pc_64bit (regcache, pc);
8a7e4587 245 else
a5652c21 246 linux_set_pc_32bit (regcache, pc);
176eb98c
MS
247}
248
176eb98c
MS
249#define aarch64_breakpoint_len 4
250
37d66942
PL
251/* AArch64 BRK software debug mode instruction.
252 This instruction needs to match gdb/aarch64-tdep.c
253 (aarch64_default_breakpoint). */
254static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
176eb98c 255
d7146cda 256/* Implementation of linux target ops method "low_breakpoint_at". */
421530db 257
d7146cda
TBA
258bool
259aarch64_target::low_breakpoint_at (CORE_ADDR where)
176eb98c 260{
db91f502
YQ
261 if (is_64bit_tdesc ())
262 {
263 gdb_byte insn[aarch64_breakpoint_len];
176eb98c 264
d7146cda 265 read_memory (where, (unsigned char *) &insn, aarch64_breakpoint_len);
db91f502 266 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
d7146cda 267 return true;
176eb98c 268
d7146cda 269 return false;
db91f502
YQ
270 }
271 else
272 return arm_breakpoint_at (where);
176eb98c
MS
273}
274
176eb98c
MS
275static void
276aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
277{
278 int i;
279
280 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
281 {
282 state->dr_addr_bp[i] = 0;
283 state->dr_ctrl_bp[i] = 0;
284 state->dr_ref_count_bp[i] = 0;
285 }
286
287 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
288 {
289 state->dr_addr_wp[i] = 0;
290 state->dr_ctrl_wp[i] = 0;
291 state->dr_ref_count_wp[i] = 0;
292 }
293}
294
176eb98c
MS
295/* Return the pointer to the debug register state structure in the
296 current process' arch-specific data area. */
297
db3cb7cb 298struct aarch64_debug_reg_state *
88e2cf7e 299aarch64_get_debug_reg_state (pid_t pid)
176eb98c 300{
88e2cf7e 301 struct process_info *proc = find_process_pid (pid);
176eb98c 302
fe978cb0 303 return &proc->priv->arch_private->debug_reg_state;
176eb98c
MS
304}
305
007c9b97 306/* Implementation of target ops method "supports_z_point_type". */
421530db 307
007c9b97
TBA
308bool
309aarch64_target::supports_z_point_type (char z_type)
4ff0d3d8
PA
310{
311 switch (z_type)
312 {
96c97461 313 case Z_PACKET_SW_BP:
4ff0d3d8
PA
314 case Z_PACKET_HW_BP:
315 case Z_PACKET_WRITE_WP:
316 case Z_PACKET_READ_WP:
317 case Z_PACKET_ACCESS_WP:
007c9b97 318 return true;
4ff0d3d8 319 default:
007c9b97 320 return false;
4ff0d3d8
PA
321 }
322}
323
9db9aa23 324/* Implementation of linux target ops method "low_insert_point".
176eb98c 325
421530db
PL
326 It actually only records the info of the to-be-inserted bp/wp;
327 the actual insertion will happen when threads are resumed. */
176eb98c 328
9db9aa23
TBA
329int
330aarch64_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
331 int len, raw_breakpoint *bp)
176eb98c
MS
332{
333 int ret;
4ff0d3d8 334 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
335 struct aarch64_debug_reg_state *state
336 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 337
c5e92cca 338 if (show_debug_regs)
176eb98c
MS
339 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
340 (unsigned long) addr, len);
341
802e8e6d
PA
342 /* Determine the type from the raw breakpoint type. */
343 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
344
345 if (targ_type != hw_execute)
39edd165
YQ
346 {
347 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
348 ret = aarch64_handle_watchpoint (targ_type, addr, len,
349 1 /* is_insert */, state);
350 else
351 ret = -1;
352 }
176eb98c 353 else
8d689ee5
YQ
354 {
355 if (len == 3)
356 {
357 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
358 instruction. Set it to 2 to correctly encode length bit
359 mask in hardware/watchpoint control register. */
360 len = 2;
361 }
362 ret = aarch64_handle_breakpoint (targ_type, addr, len,
363 1 /* is_insert */, state);
364 }
176eb98c 365
60a191ed 366 if (show_debug_regs)
88e2cf7e
YQ
367 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
368 targ_type);
176eb98c
MS
369
370 return ret;
371}
372
9db9aa23 373/* Implementation of linux target ops method "low_remove_point".
176eb98c 374
421530db
PL
375 It actually only records the info of the to-be-removed bp/wp,
376 the actual removal will be done when threads are resumed. */
176eb98c 377
9db9aa23
TBA
378int
379aarch64_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
380 int len, raw_breakpoint *bp)
176eb98c
MS
381{
382 int ret;
4ff0d3d8 383 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
384 struct aarch64_debug_reg_state *state
385 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 386
c5e92cca 387 if (show_debug_regs)
176eb98c
MS
388 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
389 (unsigned long) addr, len);
390
802e8e6d
PA
391 /* Determine the type from the raw breakpoint type. */
392 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
393
394 /* Set up state pointers. */
395 if (targ_type != hw_execute)
396 ret =
c67ca4de
YQ
397 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
398 state);
176eb98c 399 else
8d689ee5
YQ
400 {
401 if (len == 3)
402 {
403 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
404 instruction. Set it to 2 to correctly encode length bit
405 mask in hardware/watchpoint control register. */
406 len = 2;
407 }
408 ret = aarch64_handle_breakpoint (targ_type, addr, len,
409 0 /* is_insert */, state);
410 }
176eb98c 411
60a191ed 412 if (show_debug_regs)
88e2cf7e
YQ
413 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
414 targ_type);
176eb98c
MS
415
416 return ret;
417}
418
ac1bbaca 419/* Implementation of linux target ops method "low_stopped_data_address". */
176eb98c 420
ac1bbaca
TBA
421CORE_ADDR
422aarch64_target::low_stopped_data_address ()
176eb98c
MS
423{
424 siginfo_t siginfo;
425 int pid, i;
426 struct aarch64_debug_reg_state *state;
427
0bfdf32f 428 pid = lwpid_of (current_thread);
176eb98c
MS
429
430 /* Get the siginfo. */
431 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
432 return (CORE_ADDR) 0;
433
434 /* Need to be a hardware breakpoint/watchpoint trap. */
435 if (siginfo.si_signo != SIGTRAP
436 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
437 return (CORE_ADDR) 0;
438
439 /* Check if the address matches any watched address. */
88e2cf7e 440 state = aarch64_get_debug_reg_state (pid_of (current_thread));
176eb98c
MS
441 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
442 {
a3b60e45
JK
443 const unsigned int offset
444 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
176eb98c
MS
445 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
446 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
a3b60e45
JK
447 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
448 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
449 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
450
176eb98c
MS
451 if (state->dr_ref_count_wp[i]
452 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
a3b60e45 453 && addr_trap >= addr_watch_aligned
176eb98c 454 && addr_trap < addr_watch + len)
a3b60e45
JK
455 {
456 /* ADDR_TRAP reports the first address of the memory range
457 accessed by the CPU, regardless of what was the memory
458 range watched. Thus, a large CPU access that straddles
459 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
460 ADDR_TRAP that is lower than the
461 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
462
463 addr: | 4 | 5 | 6 | 7 | 8 |
464 |---- range watched ----|
465 |----------- range accessed ------------|
466
467 In this case, ADDR_TRAP will be 4.
468
469 To match a watchpoint known to GDB core, we must never
470 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
471 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
472 positive on kernels older than 4.10. See PR
473 external/20207. */
474 return addr_orig;
475 }
176eb98c
MS
476 }
477
478 return (CORE_ADDR) 0;
479}
480
ac1bbaca 481/* Implementation of linux target ops method "low_stopped_by_watchpoint". */
176eb98c 482
ac1bbaca
TBA
483bool
484aarch64_target::low_stopped_by_watchpoint ()
176eb98c 485{
ac1bbaca 486 return (low_stopped_data_address () != 0);
176eb98c
MS
487}
488
489/* Fetch the thread-local storage pointer for libthread_db. */
490
491ps_err_e
754653a7 492ps_get_thread_area (struct ps_prochandle *ph,
176eb98c
MS
493 lwpid_t lwpid, int idx, void **base)
494{
a0cc84cd
YQ
495 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
496 is_64bit_tdesc ());
176eb98c
MS
497}
498
cb63de7c 499/* Implementation of linux target ops method "low_siginfo_fixup". */
ade90bde 500
cb63de7c
TBA
501bool
502aarch64_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
503 int direction)
ade90bde
YQ
504{
505 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
506 if (!is_64bit_tdesc ())
507 {
508 if (direction == 0)
509 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
510 native);
511 else
512 aarch64_siginfo_from_compat_siginfo (native,
513 (struct compat_siginfo *) inf);
514
cb63de7c 515 return true;
ade90bde
YQ
516 }
517
cb63de7c 518 return false;
ade90bde
YQ
519}
520
04ec7890 521/* Implementation of linux_target_ops method "new_process". */
176eb98c
MS
522
523static struct arch_process_info *
524aarch64_linux_new_process (void)
525{
8d749320 526 struct arch_process_info *info = XCNEW (struct arch_process_info);
176eb98c
MS
527
528 aarch64_init_debug_reg_state (&info->debug_reg_state);
529
530 return info;
531}
532
04ec7890
SM
533/* Implementation of linux_target_ops method "delete_process". */
534
535static void
536aarch64_linux_delete_process (struct arch_process_info *info)
537{
538 xfree (info);
539}
540
421530db
PL
541/* Implementation of linux_target_ops method "linux_new_fork". */
542
3a8a0396
DB
543static void
544aarch64_linux_new_fork (struct process_info *parent,
545 struct process_info *child)
546{
547 /* These are allocated by linux_add_process. */
61a7418c
DB
548 gdb_assert (parent->priv != NULL
549 && parent->priv->arch_private != NULL);
550 gdb_assert (child->priv != NULL
551 && child->priv->arch_private != NULL);
3a8a0396
DB
552
553 /* Linux kernel before 2.6.33 commit
554 72f674d203cd230426437cdcf7dd6f681dad8b0d
555 will inherit hardware debug registers from parent
556 on fork/vfork/clone. Newer Linux kernels create such tasks with
557 zeroed debug registers.
558
559 GDB core assumes the child inherits the watchpoints/hw
560 breakpoints of the parent, and will remove them all from the
561 forked off process. Copy the debug registers mirrors into the
562 new process so that all breakpoints and watchpoints can be
563 removed together. The debug registers mirror will become zeroed
564 in the end before detaching the forked off process, thus making
565 this compatible with older Linux kernels too. */
566
61a7418c 567 *child->priv->arch_private = *parent->priv->arch_private;
3a8a0396
DB
568}
569
ee4fbcfa
AH
570/* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
571#define AARCH64_HWCAP_PACA (1 << 30)
572
797bcff5 573/* Implementation of linux target ops method "low_arch_setup". */
3b53ae99 574
797bcff5
TBA
575void
576aarch64_target::low_arch_setup ()
3b53ae99
YQ
577{
578 unsigned int machine;
579 int is_elf64;
580 int tid;
581
582 tid = lwpid_of (current_thread);
583
584 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
585
586 if (is_elf64)
fefa175e
AH
587 {
588 uint64_t vq = aarch64_sve_get_vq (tid);
974c89e0
AH
589 unsigned long hwcap = linux_get_hwcap (8);
590 bool pauth_p = hwcap & AARCH64_HWCAP_PACA;
ee4fbcfa
AH
591
592 current_process ()->tdesc = aarch64_linux_read_description (vq, pauth_p);
fefa175e 593 }
3b53ae99 594 else
7cc17433 595 current_process ()->tdesc = aarch32_linux_read_description ();
176eb98c 596
af1b22f3 597 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
176eb98c
MS
598}
599
02895270
AH
600/* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
601
602static void
603aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
604{
605 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
606}
607
608/* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
609
610static void
611aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
612{
613 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
614}
615
3aee8918 616static struct regset_info aarch64_regsets[] =
176eb98c
MS
617{
618 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
619 sizeof (struct user_pt_regs), GENERAL_REGS,
620 aarch64_fill_gregset, aarch64_store_gregset },
621 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
622 sizeof (struct user_fpsimd_state), FP_REGS,
623 aarch64_fill_fpregset, aarch64_store_fpregset
624 },
1ef53e6b
AH
625 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
626 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
627 NULL, aarch64_store_pauthregset },
50bc912a 628 NULL_REGSET
176eb98c
MS
629};
630
3aee8918
PA
631static struct regsets_info aarch64_regsets_info =
632 {
633 aarch64_regsets, /* regsets */
634 0, /* num_regsets */
635 NULL, /* disabled_regsets */
636 };
637
3b53ae99 638static struct regs_info regs_info_aarch64 =
3aee8918
PA
639 {
640 NULL, /* regset_bitmap */
c2d65f38 641 NULL, /* usrregs */
3aee8918
PA
642 &aarch64_regsets_info,
643 };
644
02895270
AH
645static struct regset_info aarch64_sve_regsets[] =
646{
647 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
648 sizeof (struct user_pt_regs), GENERAL_REGS,
649 aarch64_fill_gregset, aarch64_store_gregset },
650 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
651 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
652 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
653 },
1ef53e6b
AH
654 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
655 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
656 NULL, aarch64_store_pauthregset },
02895270
AH
657 NULL_REGSET
658};
659
660static struct regsets_info aarch64_sve_regsets_info =
661 {
662 aarch64_sve_regsets, /* regsets. */
663 0, /* num_regsets. */
664 NULL, /* disabled_regsets. */
665 };
666
667static struct regs_info regs_info_aarch64_sve =
668 {
669 NULL, /* regset_bitmap. */
670 NULL, /* usrregs. */
671 &aarch64_sve_regsets_info,
672 };
673
aa8d21c9 674/* Implementation of linux target ops method "get_regs_info". */
421530db 675
aa8d21c9
TBA
676const regs_info *
677aarch64_target::get_regs_info ()
3aee8918 678{
02895270 679 if (!is_64bit_tdesc ())
3b53ae99 680 return &regs_info_aarch32;
02895270
AH
681
682 if (is_sve_tdesc ())
683 return &regs_info_aarch64_sve;
684
685 return &regs_info_aarch64;
3aee8918
PA
686}
687
7671bf47
PL
688/* Implementation of linux_target_ops method "supports_tracepoints". */
689
690static int
691aarch64_supports_tracepoints (void)
692{
524b57e6
YQ
693 if (current_thread == NULL)
694 return 1;
695 else
696 {
697 /* We don't support tracepoints on aarch32 now. */
698 return is_64bit_tdesc ();
699 }
7671bf47
PL
700}
701
bb903df0
PL
702/* Implementation of linux_target_ops method "get_thread_area". */
703
704static int
705aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
706{
707 struct iovec iovec;
708 uint64_t reg;
709
710 iovec.iov_base = &reg;
711 iovec.iov_len = sizeof (reg);
712
713 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
714 return -1;
715
716 *addrp = reg;
717
718 return 0;
719}
720
061fc021
YQ
721/* Implementation of linux_target_ops method "get_syscall_trapinfo". */
722
723static void
724aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
725{
726 int use_64bit = register_size (regcache->tdesc, 0) == 8;
727
728 if (use_64bit)
729 {
730 long l_sysno;
731
732 collect_register_by_name (regcache, "x8", &l_sysno);
733 *sysno = (int) l_sysno;
734 }
735 else
736 collect_register_by_name (regcache, "r7", sysno);
737}
738
afbe19f8
PL
739/* List of condition codes that we need. */
740
741enum aarch64_condition_codes
742{
743 EQ = 0x0,
744 NE = 0x1,
745 LO = 0x3,
746 GE = 0xa,
747 LT = 0xb,
748 GT = 0xc,
749 LE = 0xd,
bb903df0
PL
750};
751
6c1c9a8b
YQ
752enum aarch64_operand_type
753{
754 OPERAND_IMMEDIATE,
755 OPERAND_REGISTER,
756};
757
bb903df0
PL
758/* Representation of an operand. At this time, it only supports register
759 and immediate types. */
760
761struct aarch64_operand
762{
763 /* Type of the operand. */
6c1c9a8b
YQ
764 enum aarch64_operand_type type;
765
bb903df0
PL
766 /* Value of the operand according to the type. */
767 union
768 {
769 uint32_t imm;
770 struct aarch64_register reg;
771 };
772};
773
774/* List of registers that we are currently using, we can add more here as
775 we need to use them. */
776
777/* General purpose scratch registers (64 bit). */
778static const struct aarch64_register x0 = { 0, 1 };
779static const struct aarch64_register x1 = { 1, 1 };
780static const struct aarch64_register x2 = { 2, 1 };
781static const struct aarch64_register x3 = { 3, 1 };
782static const struct aarch64_register x4 = { 4, 1 };
783
784/* General purpose scratch registers (32 bit). */
afbe19f8 785static const struct aarch64_register w0 = { 0, 0 };
bb903df0
PL
786static const struct aarch64_register w2 = { 2, 0 };
787
788/* Intra-procedure scratch registers. */
789static const struct aarch64_register ip0 = { 16, 1 };
790
791/* Special purpose registers. */
afbe19f8
PL
792static const struct aarch64_register fp = { 29, 1 };
793static const struct aarch64_register lr = { 30, 1 };
bb903df0
PL
794static const struct aarch64_register sp = { 31, 1 };
795static const struct aarch64_register xzr = { 31, 1 };
796
797/* Dynamically allocate a new register. If we know the register
798 statically, we should make it a global as above instead of using this
799 helper function. */
800
801static struct aarch64_register
802aarch64_register (unsigned num, int is64)
803{
804 return (struct aarch64_register) { num, is64 };
805}
806
807/* Helper function to create a register operand, for instructions with
808 different types of operands.
809
810 For example:
811 p += emit_mov (p, x0, register_operand (x1)); */
812
813static struct aarch64_operand
814register_operand (struct aarch64_register reg)
815{
816 struct aarch64_operand operand;
817
818 operand.type = OPERAND_REGISTER;
819 operand.reg = reg;
820
821 return operand;
822}
823
824/* Helper function to create an immediate operand, for instructions with
825 different types of operands.
826
827 For example:
828 p += emit_mov (p, x0, immediate_operand (12)); */
829
830static struct aarch64_operand
831immediate_operand (uint32_t imm)
832{
833 struct aarch64_operand operand;
834
835 operand.type = OPERAND_IMMEDIATE;
836 operand.imm = imm;
837
838 return operand;
839}
840
bb903df0
PL
841/* Helper function to create an offset memory operand.
842
843 For example:
844 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
845
846static struct aarch64_memory_operand
847offset_memory_operand (int32_t offset)
848{
849 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
850}
851
852/* Helper function to create a pre-index memory operand.
853
854 For example:
855 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
856
857static struct aarch64_memory_operand
858preindex_memory_operand (int32_t index)
859{
860 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
861}
862
afbe19f8
PL
863/* Helper function to create a post-index memory operand.
864
865 For example:
866 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
867
868static struct aarch64_memory_operand
869postindex_memory_operand (int32_t index)
870{
871 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
872}
873
bb903df0
PL
874/* System control registers. These special registers can be written and
875 read with the MRS and MSR instructions.
876
877 - NZCV: Condition flags. GDB refers to this register under the CPSR
878 name.
879 - FPSR: Floating-point status register.
880 - FPCR: Floating-point control registers.
881 - TPIDR_EL0: Software thread ID register. */
882
883enum aarch64_system_control_registers
884{
885 /* op0 op1 crn crm op2 */
886 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
887 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
888 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
889 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
890};
891
bb903df0
PL
892/* Write a BLR instruction into *BUF.
893
894 BLR rn
895
896 RN is the register to branch to. */
897
898static int
899emit_blr (uint32_t *buf, struct aarch64_register rn)
900{
e1c587c3 901 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
bb903df0
PL
902}
903
afbe19f8 904/* Write a RET instruction into *BUF.
bb903df0 905
afbe19f8 906 RET xn
bb903df0 907
afbe19f8 908 RN is the register to branch to. */
bb903df0
PL
909
910static int
afbe19f8
PL
911emit_ret (uint32_t *buf, struct aarch64_register rn)
912{
e1c587c3 913 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
afbe19f8
PL
914}
915
916static int
917emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
918 struct aarch64_register rt,
919 struct aarch64_register rt2,
920 struct aarch64_register rn,
921 struct aarch64_memory_operand operand)
bb903df0
PL
922{
923 uint32_t opc;
924 uint32_t pre_index;
925 uint32_t write_back;
926
927 if (rt.is64)
928 opc = ENCODE (2, 2, 30);
929 else
930 opc = ENCODE (0, 2, 30);
931
932 switch (operand.type)
933 {
934 case MEMORY_OPERAND_OFFSET:
935 {
936 pre_index = ENCODE (1, 1, 24);
937 write_back = ENCODE (0, 1, 23);
938 break;
939 }
afbe19f8
PL
940 case MEMORY_OPERAND_POSTINDEX:
941 {
942 pre_index = ENCODE (0, 1, 24);
943 write_back = ENCODE (1, 1, 23);
944 break;
945 }
bb903df0
PL
946 case MEMORY_OPERAND_PREINDEX:
947 {
948 pre_index = ENCODE (1, 1, 24);
949 write_back = ENCODE (1, 1, 23);
950 break;
951 }
952 default:
953 return 0;
954 }
955
e1c587c3
YQ
956 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
957 | ENCODE (operand.index >> 3, 7, 15)
958 | ENCODE (rt2.num, 5, 10)
959 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
960}
961
afbe19f8
PL
962/* Write a STP instruction into *BUF.
963
964 STP rt, rt2, [rn, #offset]
965 STP rt, rt2, [rn, #index]!
966 STP rt, rt2, [rn], #index
967
968 RT and RT2 are the registers to store.
969 RN is the base address register.
970 OFFSET is the immediate to add to the base address. It is limited to a
971 -512 .. 504 range (7 bits << 3). */
972
973static int
974emit_stp (uint32_t *buf, struct aarch64_register rt,
975 struct aarch64_register rt2, struct aarch64_register rn,
976 struct aarch64_memory_operand operand)
977{
978 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
979}
980
981/* Write a LDP instruction into *BUF.
982
983 LDP rt, rt2, [rn, #offset]
984 LDP rt, rt2, [rn, #index]!
985 LDP rt, rt2, [rn], #index
986
987 RT and RT2 are the registers to store.
988 RN is the base address register.
989 OFFSET is the immediate to add to the base address. It is limited to a
990 -512 .. 504 range (7 bits << 3). */
991
992static int
993emit_ldp (uint32_t *buf, struct aarch64_register rt,
994 struct aarch64_register rt2, struct aarch64_register rn,
995 struct aarch64_memory_operand operand)
996{
997 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
998}
999
bb903df0
PL
1000/* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
1001
1002 LDP qt, qt2, [rn, #offset]
1003
1004 RT and RT2 are the Q registers to store.
1005 RN is the base address register.
1006 OFFSET is the immediate to add to the base address. It is limited to
1007 -1024 .. 1008 range (7 bits << 4). */
1008
1009static int
1010emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1011 struct aarch64_register rn, int32_t offset)
1012{
1013 uint32_t opc = ENCODE (2, 2, 30);
1014 uint32_t pre_index = ENCODE (1, 1, 24);
1015
e1c587c3
YQ
1016 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
1017 | ENCODE (offset >> 4, 7, 15)
1018 | ENCODE (rt2, 5, 10)
1019 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1020}
1021
1022/* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1023
1024 STP qt, qt2, [rn, #offset]
1025
1026 RT and RT2 are the Q registers to store.
1027 RN is the base address register.
1028 OFFSET is the immediate to add to the base address. It is limited to
1029 -1024 .. 1008 range (7 bits << 4). */
1030
1031static int
1032emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1033 struct aarch64_register rn, int32_t offset)
1034{
1035 uint32_t opc = ENCODE (2, 2, 30);
1036 uint32_t pre_index = ENCODE (1, 1, 24);
1037
e1c587c3 1038 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
b6542f81
YQ
1039 | ENCODE (offset >> 4, 7, 15)
1040 | ENCODE (rt2, 5, 10)
1041 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1042}
1043
afbe19f8
PL
1044/* Write a LDRH instruction into *BUF.
1045
1046 LDRH wt, [xn, #offset]
1047 LDRH wt, [xn, #index]!
1048 LDRH wt, [xn], #index
1049
1050 RT is the register to store.
1051 RN is the base address register.
1052 OFFSET is the immediate to add to the base address. It is limited to
1053 0 .. 32760 range (12 bits << 3). */
1054
1055static int
1056emit_ldrh (uint32_t *buf, struct aarch64_register rt,
1057 struct aarch64_register rn,
1058 struct aarch64_memory_operand operand)
1059{
1c2e1515 1060 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
afbe19f8
PL
1061}
1062
1063/* Write a LDRB instruction into *BUF.
1064
1065 LDRB wt, [xn, #offset]
1066 LDRB wt, [xn, #index]!
1067 LDRB wt, [xn], #index
1068
1069 RT is the register to store.
1070 RN is the base address register.
1071 OFFSET is the immediate to add to the base address. It is limited to
1072 0 .. 32760 range (12 bits << 3). */
1073
1074static int
1075emit_ldrb (uint32_t *buf, struct aarch64_register rt,
1076 struct aarch64_register rn,
1077 struct aarch64_memory_operand operand)
1078{
1c2e1515 1079 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
afbe19f8
PL
1080}
1081
bb903df0 1082
bb903df0
PL
1083
1084/* Write a STR instruction into *BUF.
1085
1086 STR rt, [rn, #offset]
1087 STR rt, [rn, #index]!
afbe19f8 1088 STR rt, [rn], #index
bb903df0
PL
1089
1090 RT is the register to store.
1091 RN is the base address register.
1092 OFFSET is the immediate to add to the base address. It is limited to
1093 0 .. 32760 range (12 bits << 3). */
1094
1095static int
1096emit_str (uint32_t *buf, struct aarch64_register rt,
1097 struct aarch64_register rn,
1098 struct aarch64_memory_operand operand)
1099{
1c2e1515 1100 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
bb903df0
PL
1101}
1102
1103/* Helper function emitting an exclusive load or store instruction. */
1104
1105static int
1106emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1107 enum aarch64_opcodes opcode,
1108 struct aarch64_register rs,
1109 struct aarch64_register rt,
1110 struct aarch64_register rt2,
1111 struct aarch64_register rn)
1112{
e1c587c3
YQ
1113 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1114 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1115 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
1116}
1117
1118/* Write a LAXR instruction into *BUF.
1119
1120 LDAXR rt, [xn]
1121
1122 RT is the destination register.
1123 RN is the base address register. */
1124
1125static int
1126emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1127 struct aarch64_register rn)
1128{
1129 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1130 xzr, rn);
1131}
1132
1133/* Write a STXR instruction into *BUF.
1134
1135 STXR ws, rt, [xn]
1136
1137 RS is the result register, it indicates if the store succeeded or not.
1138 RT is the destination register.
1139 RN is the base address register. */
1140
1141static int
1142emit_stxr (uint32_t *buf, struct aarch64_register rs,
1143 struct aarch64_register rt, struct aarch64_register rn)
1144{
1145 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1146 xzr, rn);
1147}
1148
1149/* Write a STLR instruction into *BUF.
1150
1151 STLR rt, [xn]
1152
1153 RT is the register to store.
1154 RN is the base address register. */
1155
1156static int
1157emit_stlr (uint32_t *buf, struct aarch64_register rt,
1158 struct aarch64_register rn)
1159{
1160 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1161 xzr, rn);
1162}
1163
1164/* Helper function for data processing instructions with register sources. */
1165
1166static int
231c0592 1167emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
bb903df0
PL
1168 struct aarch64_register rd,
1169 struct aarch64_register rn,
1170 struct aarch64_register rm)
1171{
1172 uint32_t size = ENCODE (rd.is64, 1, 31);
1173
e1c587c3
YQ
1174 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1175 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1176}
1177
1178/* Helper function for data processing instructions taking either a register
1179 or an immediate. */
1180
1181static int
1182emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1183 struct aarch64_register rd,
1184 struct aarch64_register rn,
1185 struct aarch64_operand operand)
1186{
1187 uint32_t size = ENCODE (rd.is64, 1, 31);
1188 /* The opcode is different for register and immediate source operands. */
1189 uint32_t operand_opcode;
1190
1191 if (operand.type == OPERAND_IMMEDIATE)
1192 {
1193 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1194 operand_opcode = ENCODE (8, 4, 25);
1195
e1c587c3
YQ
1196 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1197 | ENCODE (operand.imm, 12, 10)
1198 | ENCODE (rn.num, 5, 5)
1199 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1200 }
1201 else
1202 {
1203 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1204 operand_opcode = ENCODE (5, 4, 25);
1205
1206 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1207 rn, operand.reg);
1208 }
1209}
1210
1211/* Write an ADD instruction into *BUF.
1212
1213 ADD rd, rn, #imm
1214 ADD rd, rn, rm
1215
1216 This function handles both an immediate and register add.
1217
1218 RD is the destination register.
1219 RN is the input register.
1220 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1221 OPERAND_REGISTER. */
1222
1223static int
1224emit_add (uint32_t *buf, struct aarch64_register rd,
1225 struct aarch64_register rn, struct aarch64_operand operand)
1226{
1227 return emit_data_processing (buf, ADD, rd, rn, operand);
1228}
1229
1230/* Write a SUB instruction into *BUF.
1231
1232 SUB rd, rn, #imm
1233 SUB rd, rn, rm
1234
1235 This function handles both an immediate and register sub.
1236
1237 RD is the destination register.
1238 RN is the input register.
1239 IMM is the immediate to substract to RN. */
1240
1241static int
1242emit_sub (uint32_t *buf, struct aarch64_register rd,
1243 struct aarch64_register rn, struct aarch64_operand operand)
1244{
1245 return emit_data_processing (buf, SUB, rd, rn, operand);
1246}
1247
1248/* Write a MOV instruction into *BUF.
1249
1250 MOV rd, #imm
1251 MOV rd, rm
1252
1253 This function handles both a wide immediate move and a register move,
1254 with the condition that the source register is not xzr. xzr and the
1255 stack pointer share the same encoding and this function only supports
1256 the stack pointer.
1257
1258 RD is the destination register.
1259 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1260 OPERAND_REGISTER. */
1261
1262static int
1263emit_mov (uint32_t *buf, struct aarch64_register rd,
1264 struct aarch64_operand operand)
1265{
1266 if (operand.type == OPERAND_IMMEDIATE)
1267 {
1268 uint32_t size = ENCODE (rd.is64, 1, 31);
1269 /* Do not shift the immediate. */
1270 uint32_t shift = ENCODE (0, 2, 21);
1271
e1c587c3
YQ
1272 return aarch64_emit_insn (buf, MOV | size | shift
1273 | ENCODE (operand.imm, 16, 5)
1274 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1275 }
1276 else
1277 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1278}
1279
1280/* Write a MOVK instruction into *BUF.
1281
1282 MOVK rd, #imm, lsl #shift
1283
1284 RD is the destination register.
1285 IMM is the immediate.
1286 SHIFT is the logical shift left to apply to IMM. */
1287
1288static int
7781c06f
YQ
1289emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1290 unsigned shift)
bb903df0
PL
1291{
1292 uint32_t size = ENCODE (rd.is64, 1, 31);
1293
e1c587c3
YQ
1294 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1295 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1296}
1297
1298/* Write instructions into *BUF in order to move ADDR into a register.
1299 ADDR can be a 64-bit value.
1300
1301 This function will emit a series of MOV and MOVK instructions, such as:
1302
1303 MOV xd, #(addr)
1304 MOVK xd, #(addr >> 16), lsl #16
1305 MOVK xd, #(addr >> 32), lsl #32
1306 MOVK xd, #(addr >> 48), lsl #48 */
1307
1308static int
1309emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1310{
1311 uint32_t *p = buf;
1312
1313 /* The MOV (wide immediate) instruction clears to top bits of the
1314 register. */
1315 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1316
1317 if ((addr >> 16) != 0)
1318 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1319 else
1320 return p - buf;
1321
1322 if ((addr >> 32) != 0)
1323 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1324 else
1325 return p - buf;
1326
1327 if ((addr >> 48) != 0)
1328 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1329
1330 return p - buf;
1331}
1332
afbe19f8
PL
1333/* Write a SUBS instruction into *BUF.
1334
1335 SUBS rd, rn, rm
1336
1337 This instruction update the condition flags.
1338
1339 RD is the destination register.
1340 RN and RM are the source registers. */
1341
1342static int
1343emit_subs (uint32_t *buf, struct aarch64_register rd,
1344 struct aarch64_register rn, struct aarch64_operand operand)
1345{
1346 return emit_data_processing (buf, SUBS, rd, rn, operand);
1347}
1348
1349/* Write a CMP instruction into *BUF.
1350
1351 CMP rn, rm
1352
1353 This instruction is an alias of SUBS xzr, rn, rm.
1354
1355 RN and RM are the registers to compare. */
1356
1357static int
1358emit_cmp (uint32_t *buf, struct aarch64_register rn,
1359 struct aarch64_operand operand)
1360{
1361 return emit_subs (buf, xzr, rn, operand);
1362}
1363
1364/* Write a AND instruction into *BUF.
1365
1366 AND rd, rn, rm
1367
1368 RD is the destination register.
1369 RN and RM are the source registers. */
1370
1371static int
1372emit_and (uint32_t *buf, struct aarch64_register rd,
1373 struct aarch64_register rn, struct aarch64_register rm)
1374{
1375 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1376}
1377
1378/* Write a ORR instruction into *BUF.
1379
1380 ORR rd, rn, rm
1381
1382 RD is the destination register.
1383 RN and RM are the source registers. */
1384
1385static int
1386emit_orr (uint32_t *buf, struct aarch64_register rd,
1387 struct aarch64_register rn, struct aarch64_register rm)
1388{
1389 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1390}
1391
1392/* Write a ORN instruction into *BUF.
1393
1394 ORN rd, rn, rm
1395
1396 RD is the destination register.
1397 RN and RM are the source registers. */
1398
1399static int
1400emit_orn (uint32_t *buf, struct aarch64_register rd,
1401 struct aarch64_register rn, struct aarch64_register rm)
1402{
1403 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1404}
1405
1406/* Write a EOR instruction into *BUF.
1407
1408 EOR rd, rn, rm
1409
1410 RD is the destination register.
1411 RN and RM are the source registers. */
1412
1413static int
1414emit_eor (uint32_t *buf, struct aarch64_register rd,
1415 struct aarch64_register rn, struct aarch64_register rm)
1416{
1417 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1418}
1419
1420/* Write a MVN instruction into *BUF.
1421
1422 MVN rd, rm
1423
1424 This is an alias for ORN rd, xzr, rm.
1425
1426 RD is the destination register.
1427 RM is the source register. */
1428
1429static int
1430emit_mvn (uint32_t *buf, struct aarch64_register rd,
1431 struct aarch64_register rm)
1432{
1433 return emit_orn (buf, rd, xzr, rm);
1434}
1435
1436/* Write a LSLV instruction into *BUF.
1437
1438 LSLV rd, rn, rm
1439
1440 RD is the destination register.
1441 RN and RM are the source registers. */
1442
1443static int
1444emit_lslv (uint32_t *buf, struct aarch64_register rd,
1445 struct aarch64_register rn, struct aarch64_register rm)
1446{
1447 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1448}
1449
1450/* Write a LSRV instruction into *BUF.
1451
1452 LSRV rd, rn, rm
1453
1454 RD is the destination register.
1455 RN and RM are the source registers. */
1456
1457static int
1458emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1459 struct aarch64_register rn, struct aarch64_register rm)
1460{
1461 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1462}
1463
1464/* Write a ASRV instruction into *BUF.
1465
1466 ASRV rd, rn, rm
1467
1468 RD is the destination register.
1469 RN and RM are the source registers. */
1470
1471static int
1472emit_asrv (uint32_t *buf, struct aarch64_register rd,
1473 struct aarch64_register rn, struct aarch64_register rm)
1474{
1475 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1476}
1477
1478/* Write a MUL instruction into *BUF.
1479
1480 MUL rd, rn, rm
1481
1482 RD is the destination register.
1483 RN and RM are the source registers. */
1484
1485static int
1486emit_mul (uint32_t *buf, struct aarch64_register rd,
1487 struct aarch64_register rn, struct aarch64_register rm)
1488{
1489 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1490}
1491
bb903df0
PL
1492/* Write a MRS instruction into *BUF. The register size is 64-bit.
1493
1494 MRS xt, system_reg
1495
1496 RT is the destination register.
1497 SYSTEM_REG is special purpose register to read. */
1498
1499static int
1500emit_mrs (uint32_t *buf, struct aarch64_register rt,
1501 enum aarch64_system_control_registers system_reg)
1502{
e1c587c3
YQ
1503 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1504 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1505}
1506
1507/* Write a MSR instruction into *BUF. The register size is 64-bit.
1508
1509 MSR system_reg, xt
1510
1511 SYSTEM_REG is special purpose register to write.
1512 RT is the input register. */
1513
1514static int
1515emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1516 struct aarch64_register rt)
1517{
e1c587c3
YQ
1518 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1519 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1520}
1521
1522/* Write a SEVL instruction into *BUF.
1523
1524 This is a hint instruction telling the hardware to trigger an event. */
1525
1526static int
1527emit_sevl (uint32_t *buf)
1528{
e1c587c3 1529 return aarch64_emit_insn (buf, SEVL);
bb903df0
PL
1530}
1531
1532/* Write a WFE instruction into *BUF.
1533
1534 This is a hint instruction telling the hardware to wait for an event. */
1535
1536static int
1537emit_wfe (uint32_t *buf)
1538{
e1c587c3 1539 return aarch64_emit_insn (buf, WFE);
bb903df0
PL
1540}
1541
afbe19f8
PL
1542/* Write a SBFM instruction into *BUF.
1543
1544 SBFM rd, rn, #immr, #imms
1545
1546 This instruction moves the bits from #immr to #imms into the
1547 destination, sign extending the result.
1548
1549 RD is the destination register.
1550 RN is the source register.
1551 IMMR is the bit number to start at (least significant bit).
1552 IMMS is the bit number to stop at (most significant bit). */
1553
1554static int
1555emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1556 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1557{
1558 uint32_t size = ENCODE (rd.is64, 1, 31);
1559 uint32_t n = ENCODE (rd.is64, 1, 22);
1560
e1c587c3
YQ
1561 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1562 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1563 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1564}
1565
1566/* Write a SBFX instruction into *BUF.
1567
1568 SBFX rd, rn, #lsb, #width
1569
1570 This instruction moves #width bits from #lsb into the destination, sign
1571 extending the result. This is an alias for:
1572
1573 SBFM rd, rn, #lsb, #(lsb + width - 1)
1574
1575 RD is the destination register.
1576 RN is the source register.
1577 LSB is the bit number to start at (least significant bit).
1578 WIDTH is the number of bits to move. */
1579
1580static int
1581emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1582 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1583{
1584 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1585}
1586
1587/* Write a UBFM instruction into *BUF.
1588
1589 UBFM rd, rn, #immr, #imms
1590
1591 This instruction moves the bits from #immr to #imms into the
1592 destination, extending the result with zeros.
1593
1594 RD is the destination register.
1595 RN is the source register.
1596 IMMR is the bit number to start at (least significant bit).
1597 IMMS is the bit number to stop at (most significant bit). */
1598
1599static int
1600emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1601 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1602{
1603 uint32_t size = ENCODE (rd.is64, 1, 31);
1604 uint32_t n = ENCODE (rd.is64, 1, 22);
1605
e1c587c3
YQ
1606 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1607 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1608 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1609}
1610
1611/* Write a UBFX instruction into *BUF.
1612
1613 UBFX rd, rn, #lsb, #width
1614
1615 This instruction moves #width bits from #lsb into the destination,
1616 extending the result with zeros. This is an alias for:
1617
1618 UBFM rd, rn, #lsb, #(lsb + width - 1)
1619
1620 RD is the destination register.
1621 RN is the source register.
1622 LSB is the bit number to start at (least significant bit).
1623 WIDTH is the number of bits to move. */
1624
1625static int
1626emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1627 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1628{
1629 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1630}
1631
1632/* Write a CSINC instruction into *BUF.
1633
1634 CSINC rd, rn, rm, cond
1635
1636 This instruction conditionally increments rn or rm and places the result
1637 in rd. rn is chosen is the condition is true.
1638
1639 RD is the destination register.
1640 RN and RM are the source registers.
1641 COND is the encoded condition. */
1642
1643static int
1644emit_csinc (uint32_t *buf, struct aarch64_register rd,
1645 struct aarch64_register rn, struct aarch64_register rm,
1646 unsigned cond)
1647{
1648 uint32_t size = ENCODE (rd.is64, 1, 31);
1649
e1c587c3
YQ
1650 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1651 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1652 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1653}
1654
1655/* Write a CSET instruction into *BUF.
1656
1657 CSET rd, cond
1658
1659 This instruction conditionally write 1 or 0 in the destination register.
1660 1 is written if the condition is true. This is an alias for:
1661
1662 CSINC rd, xzr, xzr, !cond
1663
1664 Note that the condition needs to be inverted.
1665
1666 RD is the destination register.
1667 RN and RM are the source registers.
1668 COND is the encoded condition. */
1669
1670static int
1671emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1672{
1673 /* The least significant bit of the condition needs toggling in order to
1674 invert it. */
1675 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1676}
1677
bb903df0
PL
1678/* Write LEN instructions from BUF into the inferior memory at *TO.
1679
1680 Note instructions are always little endian on AArch64, unlike data. */
1681
1682static void
1683append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1684{
1685 size_t byte_len = len * sizeof (uint32_t);
1686#if (__BYTE_ORDER == __BIG_ENDIAN)
cb93dc7f 1687 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
bb903df0
PL
1688 size_t i;
1689
1690 for (i = 0; i < len; i++)
1691 le_buf[i] = htole32 (buf[i]);
1692
4196ab2a 1693 target_write_memory (*to, (const unsigned char *) le_buf, byte_len);
bb903df0
PL
1694
1695 xfree (le_buf);
1696#else
4196ab2a 1697 target_write_memory (*to, (const unsigned char *) buf, byte_len);
bb903df0
PL
1698#endif
1699
1700 *to += byte_len;
1701}
1702
0badd99f
YQ
1703/* Sub-class of struct aarch64_insn_data, store information of
1704 instruction relocation for fast tracepoint. Visitor can
1705 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1706 the relocated instructions in buffer pointed by INSN_PTR. */
bb903df0 1707
0badd99f
YQ
1708struct aarch64_insn_relocation_data
1709{
1710 struct aarch64_insn_data base;
1711
1712 /* The new address the instruction is relocated to. */
1713 CORE_ADDR new_addr;
1714 /* Pointer to the buffer of relocated instruction(s). */
1715 uint32_t *insn_ptr;
1716};
1717
1718/* Implementation of aarch64_insn_visitor method "b". */
1719
1720static void
1721aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1722 struct aarch64_insn_data *data)
1723{
1724 struct aarch64_insn_relocation_data *insn_reloc
1725 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1726 int64_t new_offset
0badd99f
YQ
1727 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1728
1729 if (can_encode_int32 (new_offset, 28))
1730 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1731}
1732
1733/* Implementation of aarch64_insn_visitor method "b_cond". */
1734
1735static void
1736aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1737 struct aarch64_insn_data *data)
1738{
1739 struct aarch64_insn_relocation_data *insn_reloc
1740 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1741 int64_t new_offset
0badd99f
YQ
1742 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1743
1744 if (can_encode_int32 (new_offset, 21))
1745 {
1746 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1747 new_offset);
bb903df0 1748 }
0badd99f 1749 else if (can_encode_int32 (new_offset, 28))
bb903df0 1750 {
0badd99f
YQ
1751 /* The offset is out of range for a conditional branch
1752 instruction but not for a unconditional branch. We can use
1753 the following instructions instead:
bb903df0 1754
0badd99f
YQ
1755 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1756 B NOT_TAKEN ; Else jump over TAKEN and continue.
1757 TAKEN:
1758 B #(offset - 8)
1759 NOT_TAKEN:
1760
1761 */
1762
1763 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1764 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1765 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
bb903df0 1766 }
0badd99f 1767}
bb903df0 1768
0badd99f
YQ
1769/* Implementation of aarch64_insn_visitor method "cb". */
1770
1771static void
1772aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1773 const unsigned rn, int is64,
1774 struct aarch64_insn_data *data)
1775{
1776 struct aarch64_insn_relocation_data *insn_reloc
1777 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1778 int64_t new_offset
0badd99f
YQ
1779 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1780
1781 if (can_encode_int32 (new_offset, 21))
1782 {
1783 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1784 aarch64_register (rn, is64), new_offset);
bb903df0 1785 }
0badd99f 1786 else if (can_encode_int32 (new_offset, 28))
bb903df0 1787 {
0badd99f
YQ
1788 /* The offset is out of range for a compare and branch
1789 instruction but not for a unconditional branch. We can use
1790 the following instructions instead:
1791
1792 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1793 B NOT_TAKEN ; Else jump over TAKEN and continue.
1794 TAKEN:
1795 B #(offset - 8)
1796 NOT_TAKEN:
1797
1798 */
1799 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1800 aarch64_register (rn, is64), 8);
1801 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1802 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1803 }
1804}
bb903df0 1805
0badd99f 1806/* Implementation of aarch64_insn_visitor method "tb". */
bb903df0 1807
0badd99f
YQ
1808static void
1809aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1810 const unsigned rt, unsigned bit,
1811 struct aarch64_insn_data *data)
1812{
1813 struct aarch64_insn_relocation_data *insn_reloc
1814 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1815 int64_t new_offset
0badd99f
YQ
1816 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1817
1818 if (can_encode_int32 (new_offset, 16))
1819 {
1820 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1821 aarch64_register (rt, 1), new_offset);
bb903df0 1822 }
0badd99f 1823 else if (can_encode_int32 (new_offset, 28))
bb903df0 1824 {
0badd99f
YQ
1825 /* The offset is out of range for a test bit and branch
1826 instruction but not for a unconditional branch. We can use
1827 the following instructions instead:
1828
1829 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1830 B NOT_TAKEN ; Else jump over TAKEN and continue.
1831 TAKEN:
1832 B #(offset - 8)
1833 NOT_TAKEN:
1834
1835 */
1836 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1837 aarch64_register (rt, 1), 8);
1838 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1839 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1840 new_offset - 8);
1841 }
1842}
bb903df0 1843
0badd99f 1844/* Implementation of aarch64_insn_visitor method "adr". */
bb903df0 1845
0badd99f
YQ
1846static void
1847aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1848 const int is_adrp,
1849 struct aarch64_insn_data *data)
1850{
1851 struct aarch64_insn_relocation_data *insn_reloc
1852 = (struct aarch64_insn_relocation_data *) data;
1853 /* We know exactly the address the ADR{P,} instruction will compute.
1854 We can just write it to the destination register. */
1855 CORE_ADDR address = data->insn_addr + offset;
bb903df0 1856
0badd99f
YQ
1857 if (is_adrp)
1858 {
1859 /* Clear the lower 12 bits of the offset to get the 4K page. */
1860 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1861 aarch64_register (rd, 1),
1862 address & ~0xfff);
1863 }
1864 else
1865 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1866 aarch64_register (rd, 1), address);
1867}
bb903df0 1868
0badd99f 1869/* Implementation of aarch64_insn_visitor method "ldr_literal". */
bb903df0 1870
0badd99f
YQ
1871static void
1872aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1873 const unsigned rt, const int is64,
1874 struct aarch64_insn_data *data)
1875{
1876 struct aarch64_insn_relocation_data *insn_reloc
1877 = (struct aarch64_insn_relocation_data *) data;
1878 CORE_ADDR address = data->insn_addr + offset;
1879
1880 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1881 aarch64_register (rt, 1), address);
1882
1883 /* We know exactly what address to load from, and what register we
1884 can use:
1885
1886 MOV xd, #(oldloc + offset)
1887 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1888 ...
1889
1890 LDR xd, [xd] ; or LDRSW xd, [xd]
1891
1892 */
1893
1894 if (is_sw)
1895 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1896 aarch64_register (rt, 1),
1897 aarch64_register (rt, 1),
1898 offset_memory_operand (0));
bb903df0 1899 else
0badd99f
YQ
1900 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1901 aarch64_register (rt, is64),
1902 aarch64_register (rt, 1),
1903 offset_memory_operand (0));
1904}
1905
1906/* Implementation of aarch64_insn_visitor method "others". */
1907
1908static void
1909aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1910 struct aarch64_insn_data *data)
1911{
1912 struct aarch64_insn_relocation_data *insn_reloc
1913 = (struct aarch64_insn_relocation_data *) data;
bb903df0 1914
0badd99f
YQ
1915 /* The instruction is not PC relative. Just re-emit it at the new
1916 location. */
e1c587c3 1917 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
0badd99f
YQ
1918}
1919
1920static const struct aarch64_insn_visitor visitor =
1921{
1922 aarch64_ftrace_insn_reloc_b,
1923 aarch64_ftrace_insn_reloc_b_cond,
1924 aarch64_ftrace_insn_reloc_cb,
1925 aarch64_ftrace_insn_reloc_tb,
1926 aarch64_ftrace_insn_reloc_adr,
1927 aarch64_ftrace_insn_reloc_ldr_literal,
1928 aarch64_ftrace_insn_reloc_others,
1929};
1930
bb903df0
PL
1931/* Implementation of linux_target_ops method
1932 "install_fast_tracepoint_jump_pad". */
1933
1934static int
1935aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1936 CORE_ADDR tpaddr,
1937 CORE_ADDR collector,
1938 CORE_ADDR lockaddr,
1939 ULONGEST orig_size,
1940 CORE_ADDR *jump_entry,
1941 CORE_ADDR *trampoline,
1942 ULONGEST *trampoline_size,
1943 unsigned char *jjump_pad_insn,
1944 ULONGEST *jjump_pad_insn_size,
1945 CORE_ADDR *adjusted_insn_addr,
1946 CORE_ADDR *adjusted_insn_addr_end,
1947 char *err)
1948{
1949 uint32_t buf[256];
1950 uint32_t *p = buf;
2ac09a5b 1951 int64_t offset;
bb903df0 1952 int i;
70b439f0 1953 uint32_t insn;
bb903df0 1954 CORE_ADDR buildaddr = *jump_entry;
0badd99f 1955 struct aarch64_insn_relocation_data insn_data;
bb903df0
PL
1956
1957 /* We need to save the current state on the stack both to restore it
1958 later and to collect register values when the tracepoint is hit.
1959
1960 The saved registers are pushed in a layout that needs to be in sync
1961 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1962 the supply_fast_tracepoint_registers function will fill in the
1963 register cache from a pointer to saved registers on the stack we build
1964 here.
1965
1966 For simplicity, we set the size of each cell on the stack to 16 bytes.
1967 This way one cell can hold any register type, from system registers
1968 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1969 has to be 16 bytes aligned anyway.
1970
1971 Note that the CPSR register does not exist on AArch64. Instead we
1972 can access system bits describing the process state with the
1973 MRS/MSR instructions, namely the condition flags. We save them as
1974 if they are part of a CPSR register because that's how GDB
1975 interprets these system bits. At the moment, only the condition
1976 flags are saved in CPSR (NZCV).
1977
1978 Stack layout, each cell is 16 bytes (descending):
1979
1980 High *-------- SIMD&FP registers from 31 down to 0. --------*
1981 | q31 |
1982 . .
1983 . . 32 cells
1984 . .
1985 | q0 |
1986 *---- General purpose registers from 30 down to 0. ----*
1987 | x30 |
1988 . .
1989 . . 31 cells
1990 . .
1991 | x0 |
1992 *------------- Special purpose registers. -------------*
1993 | SP |
1994 | PC |
1995 | CPSR (NZCV) | 5 cells
1996 | FPSR |
1997 | FPCR | <- SP + 16
1998 *------------- collecting_t object --------------------*
1999 | TPIDR_EL0 | struct tracepoint * |
2000 Low *------------------------------------------------------*
2001
2002 After this stack is set up, we issue a call to the collector, passing
2003 it the saved registers at (SP + 16). */
2004
2005 /* Push SIMD&FP registers on the stack:
2006
2007 SUB sp, sp, #(32 * 16)
2008
2009 STP q30, q31, [sp, #(30 * 16)]
2010 ...
2011 STP q0, q1, [sp]
2012
2013 */
2014 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
2015 for (i = 30; i >= 0; i -= 2)
2016 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
2017
30baf67b 2018 /* Push general purpose registers on the stack. Note that we do not need
bb903df0
PL
2019 to push x31 as it represents the xzr register and not the stack
2020 pointer in a STR instruction.
2021
2022 SUB sp, sp, #(31 * 16)
2023
2024 STR x30, [sp, #(30 * 16)]
2025 ...
2026 STR x0, [sp]
2027
2028 */
2029 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
2030 for (i = 30; i >= 0; i -= 1)
2031 p += emit_str (p, aarch64_register (i, 1), sp,
2032 offset_memory_operand (i * 16));
2033
2034 /* Make space for 5 more cells.
2035
2036 SUB sp, sp, #(5 * 16)
2037
2038 */
2039 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
2040
2041
2042 /* Save SP:
2043
2044 ADD x4, sp, #((32 + 31 + 5) * 16)
2045 STR x4, [sp, #(4 * 16)]
2046
2047 */
2048 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
2049 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
2050
2051 /* Save PC (tracepoint address):
2052
2053 MOV x3, #(tpaddr)
2054 ...
2055
2056 STR x3, [sp, #(3 * 16)]
2057
2058 */
2059
2060 p += emit_mov_addr (p, x3, tpaddr);
2061 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
2062
2063 /* Save CPSR (NZCV), FPSR and FPCR:
2064
2065 MRS x2, nzcv
2066 MRS x1, fpsr
2067 MRS x0, fpcr
2068
2069 STR x2, [sp, #(2 * 16)]
2070 STR x1, [sp, #(1 * 16)]
2071 STR x0, [sp, #(0 * 16)]
2072
2073 */
2074 p += emit_mrs (p, x2, NZCV);
2075 p += emit_mrs (p, x1, FPSR);
2076 p += emit_mrs (p, x0, FPCR);
2077 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2078 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2079 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2080
2081 /* Push the collecting_t object. It consist of the address of the
2082 tracepoint and an ID for the current thread. We get the latter by
2083 reading the tpidr_el0 system register. It corresponds to the
2084 NT_ARM_TLS register accessible with ptrace.
2085
2086 MOV x0, #(tpoint)
2087 ...
2088
2089 MRS x1, tpidr_el0
2090
2091 STP x0, x1, [sp, #-16]!
2092
2093 */
2094
2095 p += emit_mov_addr (p, x0, tpoint);
2096 p += emit_mrs (p, x1, TPIDR_EL0);
2097 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2098
2099 /* Spin-lock:
2100
2101 The shared memory for the lock is at lockaddr. It will hold zero
2102 if no-one is holding the lock, otherwise it contains the address of
2103 the collecting_t object on the stack of the thread which acquired it.
2104
2105 At this stage, the stack pointer points to this thread's collecting_t
2106 object.
2107
2108 We use the following registers:
2109 - x0: Address of the lock.
2110 - x1: Pointer to collecting_t object.
2111 - x2: Scratch register.
2112
2113 MOV x0, #(lockaddr)
2114 ...
2115 MOV x1, sp
2116
2117 ; Trigger an event local to this core. So the following WFE
2118 ; instruction is ignored.
2119 SEVL
2120 again:
2121 ; Wait for an event. The event is triggered by either the SEVL
2122 ; or STLR instructions (store release).
2123 WFE
2124
2125 ; Atomically read at lockaddr. This marks the memory location as
2126 ; exclusive. This instruction also has memory constraints which
2127 ; make sure all previous data reads and writes are done before
2128 ; executing it.
2129 LDAXR x2, [x0]
2130
2131 ; Try again if another thread holds the lock.
2132 CBNZ x2, again
2133
2134 ; We can lock it! Write the address of the collecting_t object.
2135 ; This instruction will fail if the memory location is not marked
2136 ; as exclusive anymore. If it succeeds, it will remove the
2137 ; exclusive mark on the memory location. This way, if another
2138 ; thread executes this instruction before us, we will fail and try
2139 ; all over again.
2140 STXR w2, x1, [x0]
2141 CBNZ w2, again
2142
2143 */
2144
2145 p += emit_mov_addr (p, x0, lockaddr);
2146 p += emit_mov (p, x1, register_operand (sp));
2147
2148 p += emit_sevl (p);
2149 p += emit_wfe (p);
2150 p += emit_ldaxr (p, x2, x0);
2151 p += emit_cb (p, 1, w2, -2 * 4);
2152 p += emit_stxr (p, w2, x1, x0);
2153 p += emit_cb (p, 1, x2, -4 * 4);
2154
2155 /* Call collector (struct tracepoint *, unsigned char *):
2156
2157 MOV x0, #(tpoint)
2158 ...
2159
2160 ; Saved registers start after the collecting_t object.
2161 ADD x1, sp, #16
2162
2163 ; We use an intra-procedure-call scratch register.
2164 MOV ip0, #(collector)
2165 ...
2166
2167 ; And call back to C!
2168 BLR ip0
2169
2170 */
2171
2172 p += emit_mov_addr (p, x0, tpoint);
2173 p += emit_add (p, x1, sp, immediate_operand (16));
2174
2175 p += emit_mov_addr (p, ip0, collector);
2176 p += emit_blr (p, ip0);
2177
2178 /* Release the lock.
2179
2180 MOV x0, #(lockaddr)
2181 ...
2182
2183 ; This instruction is a normal store with memory ordering
2184 ; constraints. Thanks to this we do not have to put a data
2185 ; barrier instruction to make sure all data read and writes are done
30baf67b 2186 ; before this instruction is executed. Furthermore, this instruction
bb903df0
PL
2187 ; will trigger an event, letting other threads know they can grab
2188 ; the lock.
2189 STLR xzr, [x0]
2190
2191 */
2192 p += emit_mov_addr (p, x0, lockaddr);
2193 p += emit_stlr (p, xzr, x0);
2194
2195 /* Free collecting_t object:
2196
2197 ADD sp, sp, #16
2198
2199 */
2200 p += emit_add (p, sp, sp, immediate_operand (16));
2201
2202 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2203 registers from the stack.
2204
2205 LDR x2, [sp, #(2 * 16)]
2206 LDR x1, [sp, #(1 * 16)]
2207 LDR x0, [sp, #(0 * 16)]
2208
2209 MSR NZCV, x2
2210 MSR FPSR, x1
2211 MSR FPCR, x0
2212
2213 ADD sp, sp #(5 * 16)
2214
2215 */
2216 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2217 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2218 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2219 p += emit_msr (p, NZCV, x2);
2220 p += emit_msr (p, FPSR, x1);
2221 p += emit_msr (p, FPCR, x0);
2222
2223 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2224
2225 /* Pop general purpose registers:
2226
2227 LDR x0, [sp]
2228 ...
2229 LDR x30, [sp, #(30 * 16)]
2230
2231 ADD sp, sp, #(31 * 16)
2232
2233 */
2234 for (i = 0; i <= 30; i += 1)
2235 p += emit_ldr (p, aarch64_register (i, 1), sp,
2236 offset_memory_operand (i * 16));
2237 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2238
2239 /* Pop SIMD&FP registers:
2240
2241 LDP q0, q1, [sp]
2242 ...
2243 LDP q30, q31, [sp, #(30 * 16)]
2244
2245 ADD sp, sp, #(32 * 16)
2246
2247 */
2248 for (i = 0; i <= 30; i += 2)
2249 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2250 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2251
2252 /* Write the code into the inferior memory. */
2253 append_insns (&buildaddr, p - buf, buf);
2254
2255 /* Now emit the relocated instruction. */
2256 *adjusted_insn_addr = buildaddr;
70b439f0 2257 target_read_uint32 (tpaddr, &insn);
0badd99f
YQ
2258
2259 insn_data.base.insn_addr = tpaddr;
2260 insn_data.new_addr = buildaddr;
2261 insn_data.insn_ptr = buf;
2262
2263 aarch64_relocate_instruction (insn, &visitor,
2264 (struct aarch64_insn_data *) &insn_data);
2265
bb903df0 2266 /* We may not have been able to relocate the instruction. */
0badd99f 2267 if (insn_data.insn_ptr == buf)
bb903df0
PL
2268 {
2269 sprintf (err,
2270 "E.Could not relocate instruction from %s to %s.",
2271 core_addr_to_string_nz (tpaddr),
2272 core_addr_to_string_nz (buildaddr));
2273 return 1;
2274 }
dfaffe9d 2275 else
0badd99f 2276 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
dfaffe9d 2277 *adjusted_insn_addr_end = buildaddr;
bb903df0
PL
2278
2279 /* Go back to the start of the buffer. */
2280 p = buf;
2281
2282 /* Emit a branch back from the jump pad. */
2283 offset = (tpaddr + orig_size - buildaddr);
2284 if (!can_encode_int32 (offset, 28))
2285 {
2286 sprintf (err,
2287 "E.Jump back from jump pad too far from tracepoint "
2ac09a5b 2288 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2289 offset);
2290 return 1;
2291 }
2292
2293 p += emit_b (p, 0, offset);
2294 append_insns (&buildaddr, p - buf, buf);
2295
2296 /* Give the caller a branch instruction into the jump pad. */
2297 offset = (*jump_entry - tpaddr);
2298 if (!can_encode_int32 (offset, 28))
2299 {
2300 sprintf (err,
2301 "E.Jump pad too far from tracepoint "
2ac09a5b 2302 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2303 offset);
2304 return 1;
2305 }
2306
2307 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2308 *jjump_pad_insn_size = 4;
2309
2310 /* Return the end address of our pad. */
2311 *jump_entry = buildaddr;
2312
2313 return 0;
2314}
2315
afbe19f8
PL
2316/* Helper function writing LEN instructions from START into
2317 current_insn_ptr. */
2318
2319static void
2320emit_ops_insns (const uint32_t *start, int len)
2321{
2322 CORE_ADDR buildaddr = current_insn_ptr;
2323
2324 if (debug_threads)
2325 debug_printf ("Adding %d instrucions at %s\n",
2326 len, paddress (buildaddr));
2327
2328 append_insns (&buildaddr, len, start);
2329 current_insn_ptr = buildaddr;
2330}
2331
2332/* Pop a register from the stack. */
2333
2334static int
2335emit_pop (uint32_t *buf, struct aarch64_register rt)
2336{
2337 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2338}
2339
2340/* Push a register on the stack. */
2341
2342static int
2343emit_push (uint32_t *buf, struct aarch64_register rt)
2344{
2345 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2346}
2347
2348/* Implementation of emit_ops method "emit_prologue". */
2349
2350static void
2351aarch64_emit_prologue (void)
2352{
2353 uint32_t buf[16];
2354 uint32_t *p = buf;
2355
2356 /* This function emit a prologue for the following function prototype:
2357
2358 enum eval_result_type f (unsigned char *regs,
2359 ULONGEST *value);
2360
2361 The first argument is a buffer of raw registers. The second
2362 argument is the result of
2363 evaluating the expression, which will be set to whatever is on top of
2364 the stack at the end.
2365
2366 The stack set up by the prologue is as such:
2367
2368 High *------------------------------------------------------*
2369 | LR |
2370 | FP | <- FP
2371 | x1 (ULONGEST *value) |
2372 | x0 (unsigned char *regs) |
2373 Low *------------------------------------------------------*
2374
2375 As we are implementing a stack machine, each opcode can expand the
2376 stack so we never know how far we are from the data saved by this
2377 prologue. In order to be able refer to value and regs later, we save
2378 the current stack pointer in the frame pointer. This way, it is not
2379 clobbered when calling C functions.
2380
30baf67b 2381 Finally, throughout every operation, we are using register x0 as the
afbe19f8
PL
2382 top of the stack, and x1 as a scratch register. */
2383
2384 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2385 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2386 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2387
2388 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2389
2390
2391 emit_ops_insns (buf, p - buf);
2392}
2393
2394/* Implementation of emit_ops method "emit_epilogue". */
2395
2396static void
2397aarch64_emit_epilogue (void)
2398{
2399 uint32_t buf[16];
2400 uint32_t *p = buf;
2401
2402 /* Store the result of the expression (x0) in *value. */
2403 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2404 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2405 p += emit_str (p, x0, x1, offset_memory_operand (0));
2406
2407 /* Restore the previous state. */
2408 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2409 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2410
2411 /* Return expr_eval_no_error. */
2412 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2413 p += emit_ret (p, lr);
2414
2415 emit_ops_insns (buf, p - buf);
2416}
2417
2418/* Implementation of emit_ops method "emit_add". */
2419
2420static void
2421aarch64_emit_add (void)
2422{
2423 uint32_t buf[16];
2424 uint32_t *p = buf;
2425
2426 p += emit_pop (p, x1);
45e3745e 2427 p += emit_add (p, x0, x1, register_operand (x0));
afbe19f8
PL
2428
2429 emit_ops_insns (buf, p - buf);
2430}
2431
2432/* Implementation of emit_ops method "emit_sub". */
2433
2434static void
2435aarch64_emit_sub (void)
2436{
2437 uint32_t buf[16];
2438 uint32_t *p = buf;
2439
2440 p += emit_pop (p, x1);
45e3745e 2441 p += emit_sub (p, x0, x1, register_operand (x0));
afbe19f8
PL
2442
2443 emit_ops_insns (buf, p - buf);
2444}
2445
2446/* Implementation of emit_ops method "emit_mul". */
2447
2448static void
2449aarch64_emit_mul (void)
2450{
2451 uint32_t buf[16];
2452 uint32_t *p = buf;
2453
2454 p += emit_pop (p, x1);
2455 p += emit_mul (p, x0, x1, x0);
2456
2457 emit_ops_insns (buf, p - buf);
2458}
2459
2460/* Implementation of emit_ops method "emit_lsh". */
2461
2462static void
2463aarch64_emit_lsh (void)
2464{
2465 uint32_t buf[16];
2466 uint32_t *p = buf;
2467
2468 p += emit_pop (p, x1);
2469 p += emit_lslv (p, x0, x1, x0);
2470
2471 emit_ops_insns (buf, p - buf);
2472}
2473
2474/* Implementation of emit_ops method "emit_rsh_signed". */
2475
2476static void
2477aarch64_emit_rsh_signed (void)
2478{
2479 uint32_t buf[16];
2480 uint32_t *p = buf;
2481
2482 p += emit_pop (p, x1);
2483 p += emit_asrv (p, x0, x1, x0);
2484
2485 emit_ops_insns (buf, p - buf);
2486}
2487
2488/* Implementation of emit_ops method "emit_rsh_unsigned". */
2489
2490static void
2491aarch64_emit_rsh_unsigned (void)
2492{
2493 uint32_t buf[16];
2494 uint32_t *p = buf;
2495
2496 p += emit_pop (p, x1);
2497 p += emit_lsrv (p, x0, x1, x0);
2498
2499 emit_ops_insns (buf, p - buf);
2500}
2501
2502/* Implementation of emit_ops method "emit_ext". */
2503
2504static void
2505aarch64_emit_ext (int arg)
2506{
2507 uint32_t buf[16];
2508 uint32_t *p = buf;
2509
2510 p += emit_sbfx (p, x0, x0, 0, arg);
2511
2512 emit_ops_insns (buf, p - buf);
2513}
2514
2515/* Implementation of emit_ops method "emit_log_not". */
2516
2517static void
2518aarch64_emit_log_not (void)
2519{
2520 uint32_t buf[16];
2521 uint32_t *p = buf;
2522
2523 /* If the top of the stack is 0, replace it with 1. Else replace it with
2524 0. */
2525
2526 p += emit_cmp (p, x0, immediate_operand (0));
2527 p += emit_cset (p, x0, EQ);
2528
2529 emit_ops_insns (buf, p - buf);
2530}
2531
2532/* Implementation of emit_ops method "emit_bit_and". */
2533
2534static void
2535aarch64_emit_bit_and (void)
2536{
2537 uint32_t buf[16];
2538 uint32_t *p = buf;
2539
2540 p += emit_pop (p, x1);
2541 p += emit_and (p, x0, x0, x1);
2542
2543 emit_ops_insns (buf, p - buf);
2544}
2545
2546/* Implementation of emit_ops method "emit_bit_or". */
2547
2548static void
2549aarch64_emit_bit_or (void)
2550{
2551 uint32_t buf[16];
2552 uint32_t *p = buf;
2553
2554 p += emit_pop (p, x1);
2555 p += emit_orr (p, x0, x0, x1);
2556
2557 emit_ops_insns (buf, p - buf);
2558}
2559
2560/* Implementation of emit_ops method "emit_bit_xor". */
2561
2562static void
2563aarch64_emit_bit_xor (void)
2564{
2565 uint32_t buf[16];
2566 uint32_t *p = buf;
2567
2568 p += emit_pop (p, x1);
2569 p += emit_eor (p, x0, x0, x1);
2570
2571 emit_ops_insns (buf, p - buf);
2572}
2573
2574/* Implementation of emit_ops method "emit_bit_not". */
2575
2576static void
2577aarch64_emit_bit_not (void)
2578{
2579 uint32_t buf[16];
2580 uint32_t *p = buf;
2581
2582 p += emit_mvn (p, x0, x0);
2583
2584 emit_ops_insns (buf, p - buf);
2585}
2586
2587/* Implementation of emit_ops method "emit_equal". */
2588
2589static void
2590aarch64_emit_equal (void)
2591{
2592 uint32_t buf[16];
2593 uint32_t *p = buf;
2594
2595 p += emit_pop (p, x1);
2596 p += emit_cmp (p, x0, register_operand (x1));
2597 p += emit_cset (p, x0, EQ);
2598
2599 emit_ops_insns (buf, p - buf);
2600}
2601
2602/* Implementation of emit_ops method "emit_less_signed". */
2603
2604static void
2605aarch64_emit_less_signed (void)
2606{
2607 uint32_t buf[16];
2608 uint32_t *p = buf;
2609
2610 p += emit_pop (p, x1);
2611 p += emit_cmp (p, x1, register_operand (x0));
2612 p += emit_cset (p, x0, LT);
2613
2614 emit_ops_insns (buf, p - buf);
2615}
2616
2617/* Implementation of emit_ops method "emit_less_unsigned". */
2618
2619static void
2620aarch64_emit_less_unsigned (void)
2621{
2622 uint32_t buf[16];
2623 uint32_t *p = buf;
2624
2625 p += emit_pop (p, x1);
2626 p += emit_cmp (p, x1, register_operand (x0));
2627 p += emit_cset (p, x0, LO);
2628
2629 emit_ops_insns (buf, p - buf);
2630}
2631
2632/* Implementation of emit_ops method "emit_ref". */
2633
2634static void
2635aarch64_emit_ref (int size)
2636{
2637 uint32_t buf[16];
2638 uint32_t *p = buf;
2639
2640 switch (size)
2641 {
2642 case 1:
2643 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2644 break;
2645 case 2:
2646 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2647 break;
2648 case 4:
2649 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2650 break;
2651 case 8:
2652 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2653 break;
2654 default:
2655 /* Unknown size, bail on compilation. */
2656 emit_error = 1;
2657 break;
2658 }
2659
2660 emit_ops_insns (buf, p - buf);
2661}
2662
2663/* Implementation of emit_ops method "emit_if_goto". */
2664
2665static void
2666aarch64_emit_if_goto (int *offset_p, int *size_p)
2667{
2668 uint32_t buf[16];
2669 uint32_t *p = buf;
2670
2671 /* The Z flag is set or cleared here. */
2672 p += emit_cmp (p, x0, immediate_operand (0));
2673 /* This instruction must not change the Z flag. */
2674 p += emit_pop (p, x0);
2675 /* Branch over the next instruction if x0 == 0. */
2676 p += emit_bcond (p, EQ, 8);
2677
2678 /* The NOP instruction will be patched with an unconditional branch. */
2679 if (offset_p)
2680 *offset_p = (p - buf) * 4;
2681 if (size_p)
2682 *size_p = 4;
2683 p += emit_nop (p);
2684
2685 emit_ops_insns (buf, p - buf);
2686}
2687
2688/* Implementation of emit_ops method "emit_goto". */
2689
2690static void
2691aarch64_emit_goto (int *offset_p, int *size_p)
2692{
2693 uint32_t buf[16];
2694 uint32_t *p = buf;
2695
2696 /* The NOP instruction will be patched with an unconditional branch. */
2697 if (offset_p)
2698 *offset_p = 0;
2699 if (size_p)
2700 *size_p = 4;
2701 p += emit_nop (p);
2702
2703 emit_ops_insns (buf, p - buf);
2704}
2705
2706/* Implementation of emit_ops method "write_goto_address". */
2707
bb1183e2 2708static void
afbe19f8
PL
2709aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2710{
2711 uint32_t insn;
2712
2713 emit_b (&insn, 0, to - from);
2714 append_insns (&from, 1, &insn);
2715}
2716
2717/* Implementation of emit_ops method "emit_const". */
2718
2719static void
2720aarch64_emit_const (LONGEST num)
2721{
2722 uint32_t buf[16];
2723 uint32_t *p = buf;
2724
2725 p += emit_mov_addr (p, x0, num);
2726
2727 emit_ops_insns (buf, p - buf);
2728}
2729
2730/* Implementation of emit_ops method "emit_call". */
2731
2732static void
2733aarch64_emit_call (CORE_ADDR fn)
2734{
2735 uint32_t buf[16];
2736 uint32_t *p = buf;
2737
2738 p += emit_mov_addr (p, ip0, fn);
2739 p += emit_blr (p, ip0);
2740
2741 emit_ops_insns (buf, p - buf);
2742}
2743
2744/* Implementation of emit_ops method "emit_reg". */
2745
2746static void
2747aarch64_emit_reg (int reg)
2748{
2749 uint32_t buf[16];
2750 uint32_t *p = buf;
2751
2752 /* Set x0 to unsigned char *regs. */
2753 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2754 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2755 p += emit_mov (p, x1, immediate_operand (reg));
2756
2757 emit_ops_insns (buf, p - buf);
2758
2759 aarch64_emit_call (get_raw_reg_func_addr ());
2760}
2761
2762/* Implementation of emit_ops method "emit_pop". */
2763
2764static void
2765aarch64_emit_pop (void)
2766{
2767 uint32_t buf[16];
2768 uint32_t *p = buf;
2769
2770 p += emit_pop (p, x0);
2771
2772 emit_ops_insns (buf, p - buf);
2773}
2774
2775/* Implementation of emit_ops method "emit_stack_flush". */
2776
2777static void
2778aarch64_emit_stack_flush (void)
2779{
2780 uint32_t buf[16];
2781 uint32_t *p = buf;
2782
2783 p += emit_push (p, x0);
2784
2785 emit_ops_insns (buf, p - buf);
2786}
2787
2788/* Implementation of emit_ops method "emit_zero_ext". */
2789
2790static void
2791aarch64_emit_zero_ext (int arg)
2792{
2793 uint32_t buf[16];
2794 uint32_t *p = buf;
2795
2796 p += emit_ubfx (p, x0, x0, 0, arg);
2797
2798 emit_ops_insns (buf, p - buf);
2799}
2800
2801/* Implementation of emit_ops method "emit_swap". */
2802
2803static void
2804aarch64_emit_swap (void)
2805{
2806 uint32_t buf[16];
2807 uint32_t *p = buf;
2808
2809 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2810 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2811 p += emit_mov (p, x0, register_operand (x1));
2812
2813 emit_ops_insns (buf, p - buf);
2814}
2815
2816/* Implementation of emit_ops method "emit_stack_adjust". */
2817
2818static void
2819aarch64_emit_stack_adjust (int n)
2820{
2821 /* This is not needed with our design. */
2822 uint32_t buf[16];
2823 uint32_t *p = buf;
2824
2825 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2826
2827 emit_ops_insns (buf, p - buf);
2828}
2829
2830/* Implementation of emit_ops method "emit_int_call_1". */
2831
2832static void
2833aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2834{
2835 uint32_t buf[16];
2836 uint32_t *p = buf;
2837
2838 p += emit_mov (p, x0, immediate_operand (arg1));
2839
2840 emit_ops_insns (buf, p - buf);
2841
2842 aarch64_emit_call (fn);
2843}
2844
2845/* Implementation of emit_ops method "emit_void_call_2". */
2846
2847static void
2848aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2849{
2850 uint32_t buf[16];
2851 uint32_t *p = buf;
2852
2853 /* Push x0 on the stack. */
2854 aarch64_emit_stack_flush ();
2855
2856 /* Setup arguments for the function call:
2857
2858 x0: arg1
2859 x1: top of the stack
2860
2861 MOV x1, x0
2862 MOV x0, #arg1 */
2863
2864 p += emit_mov (p, x1, register_operand (x0));
2865 p += emit_mov (p, x0, immediate_operand (arg1));
2866
2867 emit_ops_insns (buf, p - buf);
2868
2869 aarch64_emit_call (fn);
2870
2871 /* Restore x0. */
2872 aarch64_emit_pop ();
2873}
2874
2875/* Implementation of emit_ops method "emit_eq_goto". */
2876
2877static void
2878aarch64_emit_eq_goto (int *offset_p, int *size_p)
2879{
2880 uint32_t buf[16];
2881 uint32_t *p = buf;
2882
2883 p += emit_pop (p, x1);
2884 p += emit_cmp (p, x1, register_operand (x0));
2885 /* Branch over the next instruction if x0 != x1. */
2886 p += emit_bcond (p, NE, 8);
2887 /* The NOP instruction will be patched with an unconditional branch. */
2888 if (offset_p)
2889 *offset_p = (p - buf) * 4;
2890 if (size_p)
2891 *size_p = 4;
2892 p += emit_nop (p);
2893
2894 emit_ops_insns (buf, p - buf);
2895}
2896
2897/* Implementation of emit_ops method "emit_ne_goto". */
2898
2899static void
2900aarch64_emit_ne_goto (int *offset_p, int *size_p)
2901{
2902 uint32_t buf[16];
2903 uint32_t *p = buf;
2904
2905 p += emit_pop (p, x1);
2906 p += emit_cmp (p, x1, register_operand (x0));
2907 /* Branch over the next instruction if x0 == x1. */
2908 p += emit_bcond (p, EQ, 8);
2909 /* The NOP instruction will be patched with an unconditional branch. */
2910 if (offset_p)
2911 *offset_p = (p - buf) * 4;
2912 if (size_p)
2913 *size_p = 4;
2914 p += emit_nop (p);
2915
2916 emit_ops_insns (buf, p - buf);
2917}
2918
2919/* Implementation of emit_ops method "emit_lt_goto". */
2920
2921static void
2922aarch64_emit_lt_goto (int *offset_p, int *size_p)
2923{
2924 uint32_t buf[16];
2925 uint32_t *p = buf;
2926
2927 p += emit_pop (p, x1);
2928 p += emit_cmp (p, x1, register_operand (x0));
2929 /* Branch over the next instruction if x0 >= x1. */
2930 p += emit_bcond (p, GE, 8);
2931 /* The NOP instruction will be patched with an unconditional branch. */
2932 if (offset_p)
2933 *offset_p = (p - buf) * 4;
2934 if (size_p)
2935 *size_p = 4;
2936 p += emit_nop (p);
2937
2938 emit_ops_insns (buf, p - buf);
2939}
2940
2941/* Implementation of emit_ops method "emit_le_goto". */
2942
2943static void
2944aarch64_emit_le_goto (int *offset_p, int *size_p)
2945{
2946 uint32_t buf[16];
2947 uint32_t *p = buf;
2948
2949 p += emit_pop (p, x1);
2950 p += emit_cmp (p, x1, register_operand (x0));
2951 /* Branch over the next instruction if x0 > x1. */
2952 p += emit_bcond (p, GT, 8);
2953 /* The NOP instruction will be patched with an unconditional branch. */
2954 if (offset_p)
2955 *offset_p = (p - buf) * 4;
2956 if (size_p)
2957 *size_p = 4;
2958 p += emit_nop (p);
2959
2960 emit_ops_insns (buf, p - buf);
2961}
2962
2963/* Implementation of emit_ops method "emit_gt_goto". */
2964
2965static void
2966aarch64_emit_gt_goto (int *offset_p, int *size_p)
2967{
2968 uint32_t buf[16];
2969 uint32_t *p = buf;
2970
2971 p += emit_pop (p, x1);
2972 p += emit_cmp (p, x1, register_operand (x0));
2973 /* Branch over the next instruction if x0 <= x1. */
2974 p += emit_bcond (p, LE, 8);
2975 /* The NOP instruction will be patched with an unconditional branch. */
2976 if (offset_p)
2977 *offset_p = (p - buf) * 4;
2978 if (size_p)
2979 *size_p = 4;
2980 p += emit_nop (p);
2981
2982 emit_ops_insns (buf, p - buf);
2983}
2984
2985/* Implementation of emit_ops method "emit_ge_got". */
2986
2987static void
2988aarch64_emit_ge_got (int *offset_p, int *size_p)
2989{
2990 uint32_t buf[16];
2991 uint32_t *p = buf;
2992
2993 p += emit_pop (p, x1);
2994 p += emit_cmp (p, x1, register_operand (x0));
2995 /* Branch over the next instruction if x0 <= x1. */
2996 p += emit_bcond (p, LT, 8);
2997 /* The NOP instruction will be patched with an unconditional branch. */
2998 if (offset_p)
2999 *offset_p = (p - buf) * 4;
3000 if (size_p)
3001 *size_p = 4;
3002 p += emit_nop (p);
3003
3004 emit_ops_insns (buf, p - buf);
3005}
3006
3007static struct emit_ops aarch64_emit_ops_impl =
3008{
3009 aarch64_emit_prologue,
3010 aarch64_emit_epilogue,
3011 aarch64_emit_add,
3012 aarch64_emit_sub,
3013 aarch64_emit_mul,
3014 aarch64_emit_lsh,
3015 aarch64_emit_rsh_signed,
3016 aarch64_emit_rsh_unsigned,
3017 aarch64_emit_ext,
3018 aarch64_emit_log_not,
3019 aarch64_emit_bit_and,
3020 aarch64_emit_bit_or,
3021 aarch64_emit_bit_xor,
3022 aarch64_emit_bit_not,
3023 aarch64_emit_equal,
3024 aarch64_emit_less_signed,
3025 aarch64_emit_less_unsigned,
3026 aarch64_emit_ref,
3027 aarch64_emit_if_goto,
3028 aarch64_emit_goto,
3029 aarch64_write_goto_address,
3030 aarch64_emit_const,
3031 aarch64_emit_call,
3032 aarch64_emit_reg,
3033 aarch64_emit_pop,
3034 aarch64_emit_stack_flush,
3035 aarch64_emit_zero_ext,
3036 aarch64_emit_swap,
3037 aarch64_emit_stack_adjust,
3038 aarch64_emit_int_call_1,
3039 aarch64_emit_void_call_2,
3040 aarch64_emit_eq_goto,
3041 aarch64_emit_ne_goto,
3042 aarch64_emit_lt_goto,
3043 aarch64_emit_le_goto,
3044 aarch64_emit_gt_goto,
3045 aarch64_emit_ge_got,
3046};
3047
3048/* Implementation of linux_target_ops method "emit_ops". */
3049
3050static struct emit_ops *
3051aarch64_emit_ops (void)
3052{
3053 return &aarch64_emit_ops_impl;
3054}
3055
bb903df0
PL
3056/* Implementation of linux_target_ops method
3057 "get_min_fast_tracepoint_insn_len". */
3058
3059static int
3060aarch64_get_min_fast_tracepoint_insn_len (void)
3061{
3062 return 4;
3063}
3064
d1d0aea1
PL
3065/* Implementation of linux_target_ops method "supports_range_stepping". */
3066
3067static int
3068aarch64_supports_range_stepping (void)
3069{
3070 return 1;
3071}
3072
3ca4edb6 3073/* Implementation of target ops method "sw_breakpoint_from_kind". */
dd373349 3074
3ca4edb6
TBA
3075const gdb_byte *
3076aarch64_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349 3077{
17b1509a
YQ
3078 if (is_64bit_tdesc ())
3079 {
3080 *size = aarch64_breakpoint_len;
3081 return aarch64_breakpoint;
3082 }
3083 else
3084 return arm_sw_breakpoint_from_kind (kind, size);
3085}
3086
06250e4e 3087/* Implementation of target ops method "breakpoint_kind_from_pc". */
17b1509a 3088
06250e4e
TBA
3089int
3090aarch64_target::breakpoint_kind_from_pc (CORE_ADDR *pcptr)
17b1509a
YQ
3091{
3092 if (is_64bit_tdesc ())
3093 return aarch64_breakpoint_len;
3094 else
3095 return arm_breakpoint_kind_from_pc (pcptr);
3096}
3097
06250e4e 3098/* Implementation of the target ops method
17b1509a
YQ
3099 "breakpoint_kind_from_current_state". */
3100
06250e4e
TBA
3101int
3102aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
17b1509a
YQ
3103{
3104 if (is_64bit_tdesc ())
3105 return aarch64_breakpoint_len;
3106 else
3107 return arm_breakpoint_kind_from_current_state (pcptr);
dd373349
AT
3108}
3109
7d00775e
AT
3110/* Support for hardware single step. */
3111
3112static int
3113aarch64_supports_hardware_single_step (void)
3114{
3115 return 1;
3116}
3117
176eb98c
MS
3118struct linux_target_ops the_low_target =
3119{
176eb98c 3120 aarch64_linux_new_process,
04ec7890 3121 aarch64_linux_delete_process,
176eb98c 3122 aarch64_linux_new_thread,
466eecee 3123 aarch64_linux_delete_thread,
3a8a0396 3124 aarch64_linux_new_fork,
176eb98c 3125 aarch64_linux_prepare_to_resume,
421530db 3126 NULL, /* process_qsupported */
7671bf47 3127 aarch64_supports_tracepoints,
bb903df0
PL
3128 aarch64_get_thread_area,
3129 aarch64_install_fast_tracepoint_jump_pad,
afbe19f8 3130 aarch64_emit_ops,
bb903df0 3131 aarch64_get_min_fast_tracepoint_insn_len,
d1d0aea1 3132 aarch64_supports_range_stepping,
7d00775e 3133 aarch64_supports_hardware_single_step,
061fc021 3134 aarch64_get_syscall_trapinfo,
176eb98c 3135};
3aee8918 3136
ef0478f6
TBA
3137/* The linux target ops object. */
3138
3139linux_process_target *the_linux_target = &the_aarch64_target;
3140
3aee8918
PA
3141void
3142initialize_low_arch (void)
3143{
3b53ae99
YQ
3144 initialize_low_arch_aarch32 ();
3145
3aee8918 3146 initialize_regsets_info (&aarch64_regsets_info);
02895270 3147 initialize_regsets_info (&aarch64_sve_regsets_info);
3aee8918 3148}
This page took 0.698416 seconds and 4 git commands to generate.