gdbserver/linux-low: turn watchpoint ops into methods
[deliverable/binutils-gdb.git] / gdbserver / linux-aarch64-low.cc
CommitLineData
176eb98c
MS
1/* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
b811d2c2 4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
176eb98c
MS
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "server.h"
23#include "linux-low.h"
db3cb7cb 24#include "nat/aarch64-linux.h"
554717a3 25#include "nat/aarch64-linux-hw-point.h"
bb903df0 26#include "arch/aarch64-insn.h"
3b53ae99 27#include "linux-aarch32-low.h"
176eb98c 28#include "elf/common.h"
afbe19f8
PL
29#include "ax.h"
30#include "tracepoint.h"
f9d949fb 31#include "debug.h"
176eb98c
MS
32
33#include <signal.h>
34#include <sys/user.h>
5826e159 35#include "nat/gdb_ptrace.h"
e9dae05e 36#include <asm/ptrace.h>
bb903df0
PL
37#include <inttypes.h>
38#include <endian.h>
39#include <sys/uio.h>
176eb98c
MS
40
41#include "gdb_proc_service.h"
cc628f3d 42#include "arch/aarch64.h"
7cc17433 43#include "linux-aarch32-tdesc.h"
d6d7ce56 44#include "linux-aarch64-tdesc.h"
fefa175e 45#include "nat/aarch64-sve-linux-ptrace.h"
02895270 46#include "tdesc.h"
176eb98c 47
176eb98c
MS
48#ifdef HAVE_SYS_REG_H
49#include <sys/reg.h>
50#endif
51
ef0478f6
TBA
52/* Linux target op definitions for the AArch64 architecture. */
53
54class aarch64_target : public linux_process_target
55{
56public:
57
aa8d21c9
TBA
58 const regs_info *get_regs_info () override;
59
06250e4e
TBA
60 int breakpoint_kind_from_pc (CORE_ADDR *pcptr) override;
61
62 int breakpoint_kind_from_current_state (CORE_ADDR *pcptr) override;
63
3ca4edb6
TBA
64 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
65
007c9b97
TBA
66 bool supports_z_point_type (char z_type) override;
67
797bcff5
TBA
68protected:
69
70 void low_arch_setup () override;
daca57a7
TBA
71
72 bool low_cannot_fetch_register (int regno) override;
73
74 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
75
76 bool low_supports_breakpoints () override;
77
78 CORE_ADDR low_get_pc (regcache *regcache) override;
79
80 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
d7146cda
TBA
81
82 bool low_breakpoint_at (CORE_ADDR pc) override;
9db9aa23
TBA
83
84 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
85 int size, raw_breakpoint *bp) override;
86
87 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
88 int size, raw_breakpoint *bp) override;
ac1bbaca
TBA
89
90 bool low_stopped_by_watchpoint () override;
91
92 CORE_ADDR low_stopped_data_address () override;
ef0478f6
TBA
93};
94
95/* The singleton target ops object. */
96
97static aarch64_target the_aarch64_target;
98
daca57a7
TBA
99bool
100aarch64_target::low_cannot_fetch_register (int regno)
101{
102 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
103 "is not implemented by the target");
104}
105
106bool
107aarch64_target::low_cannot_store_register (int regno)
108{
109 gdb_assert_not_reached ("linux target op low_cannot_store_register "
110 "is not implemented by the target");
111}
112
176eb98c
MS
113/* Per-process arch-specific data we want to keep. */
114
115struct arch_process_info
116{
117 /* Hardware breakpoint/watchpoint data.
118 The reason for them to be per-process rather than per-thread is
119 due to the lack of information in the gdbserver environment;
120 gdbserver is not told that whether a requested hardware
121 breakpoint/watchpoint is thread specific or not, so it has to set
122 each hw bp/wp for every thread in the current process. The
123 higher level bp/wp management in gdb will resume a thread if a hw
124 bp/wp trap is not expected for it. Since the hw bp/wp setting is
125 same for each thread, it is reasonable for the data to live here.
126 */
127 struct aarch64_debug_reg_state debug_reg_state;
128};
129
3b53ae99
YQ
130/* Return true if the size of register 0 is 8 byte. */
131
132static int
133is_64bit_tdesc (void)
134{
135 struct regcache *regcache = get_thread_regcache (current_thread, 0);
136
137 return register_size (regcache->tdesc, 0) == 8;
138}
139
02895270
AH
140/* Return true if the regcache contains the number of SVE registers. */
141
142static bool
143is_sve_tdesc (void)
144{
145 struct regcache *regcache = get_thread_regcache (current_thread, 0);
146
6cdd651f 147 return tdesc_contains_feature (regcache->tdesc, "org.gnu.gdb.aarch64.sve");
02895270
AH
148}
149
176eb98c
MS
150static void
151aarch64_fill_gregset (struct regcache *regcache, void *buf)
152{
6a69a054 153 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
176eb98c
MS
154 int i;
155
156 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
157 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
158 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
159 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
160 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
161}
162
163static void
164aarch64_store_gregset (struct regcache *regcache, const void *buf)
165{
6a69a054 166 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
176eb98c
MS
167 int i;
168
169 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
170 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
171 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
172 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
173 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
174}
175
176static void
177aarch64_fill_fpregset (struct regcache *regcache, void *buf)
178{
9caa3311 179 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
176eb98c
MS
180 int i;
181
182 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
183 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
184 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
185 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
186}
187
188static void
189aarch64_store_fpregset (struct regcache *regcache, const void *buf)
190{
9caa3311
YQ
191 const struct user_fpsimd_state *regset
192 = (const struct user_fpsimd_state *) buf;
176eb98c
MS
193 int i;
194
195 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
196 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
197 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
198 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
199}
200
1ef53e6b
AH
201/* Store the pauth registers to regcache. */
202
203static void
204aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
205{
206 uint64_t *pauth_regset = (uint64_t *) buf;
207 int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
208
209 if (pauth_base == 0)
210 return;
211
212 supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
213 &pauth_regset[0]);
214 supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
215 &pauth_regset[1]);
216}
217
bf9ae9d8
TBA
218bool
219aarch64_target::low_supports_breakpoints ()
220{
221 return true;
222}
223
224/* Implementation of linux target ops method "low_get_pc". */
421530db 225
bf9ae9d8
TBA
226CORE_ADDR
227aarch64_target::low_get_pc (regcache *regcache)
176eb98c 228{
8a7e4587 229 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 230 return linux_get_pc_64bit (regcache);
8a7e4587 231 else
a5652c21 232 return linux_get_pc_32bit (regcache);
176eb98c
MS
233}
234
bf9ae9d8 235/* Implementation of linux target ops method "low_set_pc". */
421530db 236
bf9ae9d8
TBA
237void
238aarch64_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
176eb98c 239{
8a7e4587 240 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 241 linux_set_pc_64bit (regcache, pc);
8a7e4587 242 else
a5652c21 243 linux_set_pc_32bit (regcache, pc);
176eb98c
MS
244}
245
176eb98c
MS
246#define aarch64_breakpoint_len 4
247
37d66942
PL
248/* AArch64 BRK software debug mode instruction.
249 This instruction needs to match gdb/aarch64-tdep.c
250 (aarch64_default_breakpoint). */
251static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
176eb98c 252
d7146cda 253/* Implementation of linux target ops method "low_breakpoint_at". */
421530db 254
d7146cda
TBA
255bool
256aarch64_target::low_breakpoint_at (CORE_ADDR where)
176eb98c 257{
db91f502
YQ
258 if (is_64bit_tdesc ())
259 {
260 gdb_byte insn[aarch64_breakpoint_len];
176eb98c 261
d7146cda 262 read_memory (where, (unsigned char *) &insn, aarch64_breakpoint_len);
db91f502 263 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
d7146cda 264 return true;
176eb98c 265
d7146cda 266 return false;
db91f502
YQ
267 }
268 else
269 return arm_breakpoint_at (where);
176eb98c
MS
270}
271
176eb98c
MS
272static void
273aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
274{
275 int i;
276
277 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
278 {
279 state->dr_addr_bp[i] = 0;
280 state->dr_ctrl_bp[i] = 0;
281 state->dr_ref_count_bp[i] = 0;
282 }
283
284 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
285 {
286 state->dr_addr_wp[i] = 0;
287 state->dr_ctrl_wp[i] = 0;
288 state->dr_ref_count_wp[i] = 0;
289 }
290}
291
176eb98c
MS
292/* Return the pointer to the debug register state structure in the
293 current process' arch-specific data area. */
294
db3cb7cb 295struct aarch64_debug_reg_state *
88e2cf7e 296aarch64_get_debug_reg_state (pid_t pid)
176eb98c 297{
88e2cf7e 298 struct process_info *proc = find_process_pid (pid);
176eb98c 299
fe978cb0 300 return &proc->priv->arch_private->debug_reg_state;
176eb98c
MS
301}
302
007c9b97 303/* Implementation of target ops method "supports_z_point_type". */
421530db 304
007c9b97
TBA
305bool
306aarch64_target::supports_z_point_type (char z_type)
4ff0d3d8
PA
307{
308 switch (z_type)
309 {
96c97461 310 case Z_PACKET_SW_BP:
4ff0d3d8
PA
311 case Z_PACKET_HW_BP:
312 case Z_PACKET_WRITE_WP:
313 case Z_PACKET_READ_WP:
314 case Z_PACKET_ACCESS_WP:
007c9b97 315 return true;
4ff0d3d8 316 default:
007c9b97 317 return false;
4ff0d3d8
PA
318 }
319}
320
9db9aa23 321/* Implementation of linux target ops method "low_insert_point".
176eb98c 322
421530db
PL
323 It actually only records the info of the to-be-inserted bp/wp;
324 the actual insertion will happen when threads are resumed. */
176eb98c 325
9db9aa23
TBA
326int
327aarch64_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
328 int len, raw_breakpoint *bp)
176eb98c
MS
329{
330 int ret;
4ff0d3d8 331 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
332 struct aarch64_debug_reg_state *state
333 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 334
c5e92cca 335 if (show_debug_regs)
176eb98c
MS
336 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
337 (unsigned long) addr, len);
338
802e8e6d
PA
339 /* Determine the type from the raw breakpoint type. */
340 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
341
342 if (targ_type != hw_execute)
39edd165
YQ
343 {
344 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
345 ret = aarch64_handle_watchpoint (targ_type, addr, len,
346 1 /* is_insert */, state);
347 else
348 ret = -1;
349 }
176eb98c 350 else
8d689ee5
YQ
351 {
352 if (len == 3)
353 {
354 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
355 instruction. Set it to 2 to correctly encode length bit
356 mask in hardware/watchpoint control register. */
357 len = 2;
358 }
359 ret = aarch64_handle_breakpoint (targ_type, addr, len,
360 1 /* is_insert */, state);
361 }
176eb98c 362
60a191ed 363 if (show_debug_regs)
88e2cf7e
YQ
364 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
365 targ_type);
176eb98c
MS
366
367 return ret;
368}
369
9db9aa23 370/* Implementation of linux target ops method "low_remove_point".
176eb98c 371
421530db
PL
372 It actually only records the info of the to-be-removed bp/wp,
373 the actual removal will be done when threads are resumed. */
176eb98c 374
9db9aa23
TBA
375int
376aarch64_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
377 int len, raw_breakpoint *bp)
176eb98c
MS
378{
379 int ret;
4ff0d3d8 380 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
381 struct aarch64_debug_reg_state *state
382 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 383
c5e92cca 384 if (show_debug_regs)
176eb98c
MS
385 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
386 (unsigned long) addr, len);
387
802e8e6d
PA
388 /* Determine the type from the raw breakpoint type. */
389 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
390
391 /* Set up state pointers. */
392 if (targ_type != hw_execute)
393 ret =
c67ca4de
YQ
394 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
395 state);
176eb98c 396 else
8d689ee5
YQ
397 {
398 if (len == 3)
399 {
400 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
401 instruction. Set it to 2 to correctly encode length bit
402 mask in hardware/watchpoint control register. */
403 len = 2;
404 }
405 ret = aarch64_handle_breakpoint (targ_type, addr, len,
406 0 /* is_insert */, state);
407 }
176eb98c 408
60a191ed 409 if (show_debug_regs)
88e2cf7e
YQ
410 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
411 targ_type);
176eb98c
MS
412
413 return ret;
414}
415
ac1bbaca 416/* Implementation of linux target ops method "low_stopped_data_address". */
176eb98c 417
ac1bbaca
TBA
418CORE_ADDR
419aarch64_target::low_stopped_data_address ()
176eb98c
MS
420{
421 siginfo_t siginfo;
422 int pid, i;
423 struct aarch64_debug_reg_state *state;
424
0bfdf32f 425 pid = lwpid_of (current_thread);
176eb98c
MS
426
427 /* Get the siginfo. */
428 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
429 return (CORE_ADDR) 0;
430
431 /* Need to be a hardware breakpoint/watchpoint trap. */
432 if (siginfo.si_signo != SIGTRAP
433 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
434 return (CORE_ADDR) 0;
435
436 /* Check if the address matches any watched address. */
88e2cf7e 437 state = aarch64_get_debug_reg_state (pid_of (current_thread));
176eb98c
MS
438 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
439 {
a3b60e45
JK
440 const unsigned int offset
441 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
176eb98c
MS
442 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
443 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
a3b60e45
JK
444 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
445 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
446 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
447
176eb98c
MS
448 if (state->dr_ref_count_wp[i]
449 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
a3b60e45 450 && addr_trap >= addr_watch_aligned
176eb98c 451 && addr_trap < addr_watch + len)
a3b60e45
JK
452 {
453 /* ADDR_TRAP reports the first address of the memory range
454 accessed by the CPU, regardless of what was the memory
455 range watched. Thus, a large CPU access that straddles
456 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
457 ADDR_TRAP that is lower than the
458 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
459
460 addr: | 4 | 5 | 6 | 7 | 8 |
461 |---- range watched ----|
462 |----------- range accessed ------------|
463
464 In this case, ADDR_TRAP will be 4.
465
466 To match a watchpoint known to GDB core, we must never
467 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
468 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
469 positive on kernels older than 4.10. See PR
470 external/20207. */
471 return addr_orig;
472 }
176eb98c
MS
473 }
474
475 return (CORE_ADDR) 0;
476}
477
ac1bbaca 478/* Implementation of linux target ops method "low_stopped_by_watchpoint". */
176eb98c 479
ac1bbaca
TBA
480bool
481aarch64_target::low_stopped_by_watchpoint ()
176eb98c 482{
ac1bbaca 483 return (low_stopped_data_address () != 0);
176eb98c
MS
484}
485
486/* Fetch the thread-local storage pointer for libthread_db. */
487
488ps_err_e
754653a7 489ps_get_thread_area (struct ps_prochandle *ph,
176eb98c
MS
490 lwpid_t lwpid, int idx, void **base)
491{
a0cc84cd
YQ
492 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
493 is_64bit_tdesc ());
176eb98c
MS
494}
495
ade90bde
YQ
496/* Implementation of linux_target_ops method "siginfo_fixup". */
497
498static int
8adce034 499aarch64_linux_siginfo_fixup (siginfo_t *native, gdb_byte *inf, int direction)
ade90bde
YQ
500{
501 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
502 if (!is_64bit_tdesc ())
503 {
504 if (direction == 0)
505 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
506 native);
507 else
508 aarch64_siginfo_from_compat_siginfo (native,
509 (struct compat_siginfo *) inf);
510
511 return 1;
512 }
513
514 return 0;
515}
516
04ec7890 517/* Implementation of linux_target_ops method "new_process". */
176eb98c
MS
518
519static struct arch_process_info *
520aarch64_linux_new_process (void)
521{
8d749320 522 struct arch_process_info *info = XCNEW (struct arch_process_info);
176eb98c
MS
523
524 aarch64_init_debug_reg_state (&info->debug_reg_state);
525
526 return info;
527}
528
04ec7890
SM
529/* Implementation of linux_target_ops method "delete_process". */
530
531static void
532aarch64_linux_delete_process (struct arch_process_info *info)
533{
534 xfree (info);
535}
536
421530db
PL
537/* Implementation of linux_target_ops method "linux_new_fork". */
538
3a8a0396
DB
539static void
540aarch64_linux_new_fork (struct process_info *parent,
541 struct process_info *child)
542{
543 /* These are allocated by linux_add_process. */
61a7418c
DB
544 gdb_assert (parent->priv != NULL
545 && parent->priv->arch_private != NULL);
546 gdb_assert (child->priv != NULL
547 && child->priv->arch_private != NULL);
3a8a0396
DB
548
549 /* Linux kernel before 2.6.33 commit
550 72f674d203cd230426437cdcf7dd6f681dad8b0d
551 will inherit hardware debug registers from parent
552 on fork/vfork/clone. Newer Linux kernels create such tasks with
553 zeroed debug registers.
554
555 GDB core assumes the child inherits the watchpoints/hw
556 breakpoints of the parent, and will remove them all from the
557 forked off process. Copy the debug registers mirrors into the
558 new process so that all breakpoints and watchpoints can be
559 removed together. The debug registers mirror will become zeroed
560 in the end before detaching the forked off process, thus making
561 this compatible with older Linux kernels too. */
562
61a7418c 563 *child->priv->arch_private = *parent->priv->arch_private;
3a8a0396
DB
564}
565
ee4fbcfa
AH
566/* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
567#define AARCH64_HWCAP_PACA (1 << 30)
568
797bcff5 569/* Implementation of linux target ops method "low_arch_setup". */
3b53ae99 570
797bcff5
TBA
571void
572aarch64_target::low_arch_setup ()
3b53ae99
YQ
573{
574 unsigned int machine;
575 int is_elf64;
576 int tid;
577
578 tid = lwpid_of (current_thread);
579
580 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
581
582 if (is_elf64)
fefa175e
AH
583 {
584 uint64_t vq = aarch64_sve_get_vq (tid);
974c89e0
AH
585 unsigned long hwcap = linux_get_hwcap (8);
586 bool pauth_p = hwcap & AARCH64_HWCAP_PACA;
ee4fbcfa
AH
587
588 current_process ()->tdesc = aarch64_linux_read_description (vq, pauth_p);
fefa175e 589 }
3b53ae99 590 else
7cc17433 591 current_process ()->tdesc = aarch32_linux_read_description ();
176eb98c 592
af1b22f3 593 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
176eb98c
MS
594}
595
02895270
AH
596/* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
597
598static void
599aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
600{
601 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
602}
603
604/* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
605
606static void
607aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
608{
609 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
610}
611
3aee8918 612static struct regset_info aarch64_regsets[] =
176eb98c
MS
613{
614 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
615 sizeof (struct user_pt_regs), GENERAL_REGS,
616 aarch64_fill_gregset, aarch64_store_gregset },
617 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
618 sizeof (struct user_fpsimd_state), FP_REGS,
619 aarch64_fill_fpregset, aarch64_store_fpregset
620 },
1ef53e6b
AH
621 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
622 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
623 NULL, aarch64_store_pauthregset },
50bc912a 624 NULL_REGSET
176eb98c
MS
625};
626
3aee8918
PA
627static struct regsets_info aarch64_regsets_info =
628 {
629 aarch64_regsets, /* regsets */
630 0, /* num_regsets */
631 NULL, /* disabled_regsets */
632 };
633
3b53ae99 634static struct regs_info regs_info_aarch64 =
3aee8918
PA
635 {
636 NULL, /* regset_bitmap */
c2d65f38 637 NULL, /* usrregs */
3aee8918
PA
638 &aarch64_regsets_info,
639 };
640
02895270
AH
641static struct regset_info aarch64_sve_regsets[] =
642{
643 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
644 sizeof (struct user_pt_regs), GENERAL_REGS,
645 aarch64_fill_gregset, aarch64_store_gregset },
646 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
647 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
648 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
649 },
1ef53e6b
AH
650 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
651 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
652 NULL, aarch64_store_pauthregset },
02895270
AH
653 NULL_REGSET
654};
655
656static struct regsets_info aarch64_sve_regsets_info =
657 {
658 aarch64_sve_regsets, /* regsets. */
659 0, /* num_regsets. */
660 NULL, /* disabled_regsets. */
661 };
662
663static struct regs_info regs_info_aarch64_sve =
664 {
665 NULL, /* regset_bitmap. */
666 NULL, /* usrregs. */
667 &aarch64_sve_regsets_info,
668 };
669
aa8d21c9 670/* Implementation of linux target ops method "get_regs_info". */
421530db 671
aa8d21c9
TBA
672const regs_info *
673aarch64_target::get_regs_info ()
3aee8918 674{
02895270 675 if (!is_64bit_tdesc ())
3b53ae99 676 return &regs_info_aarch32;
02895270
AH
677
678 if (is_sve_tdesc ())
679 return &regs_info_aarch64_sve;
680
681 return &regs_info_aarch64;
3aee8918
PA
682}
683
7671bf47
PL
684/* Implementation of linux_target_ops method "supports_tracepoints". */
685
686static int
687aarch64_supports_tracepoints (void)
688{
524b57e6
YQ
689 if (current_thread == NULL)
690 return 1;
691 else
692 {
693 /* We don't support tracepoints on aarch32 now. */
694 return is_64bit_tdesc ();
695 }
7671bf47
PL
696}
697
bb903df0
PL
698/* Implementation of linux_target_ops method "get_thread_area". */
699
700static int
701aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
702{
703 struct iovec iovec;
704 uint64_t reg;
705
706 iovec.iov_base = &reg;
707 iovec.iov_len = sizeof (reg);
708
709 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
710 return -1;
711
712 *addrp = reg;
713
714 return 0;
715}
716
061fc021
YQ
717/* Implementation of linux_target_ops method "get_syscall_trapinfo". */
718
719static void
720aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
721{
722 int use_64bit = register_size (regcache->tdesc, 0) == 8;
723
724 if (use_64bit)
725 {
726 long l_sysno;
727
728 collect_register_by_name (regcache, "x8", &l_sysno);
729 *sysno = (int) l_sysno;
730 }
731 else
732 collect_register_by_name (regcache, "r7", sysno);
733}
734
afbe19f8
PL
735/* List of condition codes that we need. */
736
737enum aarch64_condition_codes
738{
739 EQ = 0x0,
740 NE = 0x1,
741 LO = 0x3,
742 GE = 0xa,
743 LT = 0xb,
744 GT = 0xc,
745 LE = 0xd,
bb903df0
PL
746};
747
6c1c9a8b
YQ
748enum aarch64_operand_type
749{
750 OPERAND_IMMEDIATE,
751 OPERAND_REGISTER,
752};
753
bb903df0
PL
754/* Representation of an operand. At this time, it only supports register
755 and immediate types. */
756
757struct aarch64_operand
758{
759 /* Type of the operand. */
6c1c9a8b
YQ
760 enum aarch64_operand_type type;
761
bb903df0
PL
762 /* Value of the operand according to the type. */
763 union
764 {
765 uint32_t imm;
766 struct aarch64_register reg;
767 };
768};
769
770/* List of registers that we are currently using, we can add more here as
771 we need to use them. */
772
773/* General purpose scratch registers (64 bit). */
774static const struct aarch64_register x0 = { 0, 1 };
775static const struct aarch64_register x1 = { 1, 1 };
776static const struct aarch64_register x2 = { 2, 1 };
777static const struct aarch64_register x3 = { 3, 1 };
778static const struct aarch64_register x4 = { 4, 1 };
779
780/* General purpose scratch registers (32 bit). */
afbe19f8 781static const struct aarch64_register w0 = { 0, 0 };
bb903df0
PL
782static const struct aarch64_register w2 = { 2, 0 };
783
784/* Intra-procedure scratch registers. */
785static const struct aarch64_register ip0 = { 16, 1 };
786
787/* Special purpose registers. */
afbe19f8
PL
788static const struct aarch64_register fp = { 29, 1 };
789static const struct aarch64_register lr = { 30, 1 };
bb903df0
PL
790static const struct aarch64_register sp = { 31, 1 };
791static const struct aarch64_register xzr = { 31, 1 };
792
793/* Dynamically allocate a new register. If we know the register
794 statically, we should make it a global as above instead of using this
795 helper function. */
796
797static struct aarch64_register
798aarch64_register (unsigned num, int is64)
799{
800 return (struct aarch64_register) { num, is64 };
801}
802
803/* Helper function to create a register operand, for instructions with
804 different types of operands.
805
806 For example:
807 p += emit_mov (p, x0, register_operand (x1)); */
808
809static struct aarch64_operand
810register_operand (struct aarch64_register reg)
811{
812 struct aarch64_operand operand;
813
814 operand.type = OPERAND_REGISTER;
815 operand.reg = reg;
816
817 return operand;
818}
819
820/* Helper function to create an immediate operand, for instructions with
821 different types of operands.
822
823 For example:
824 p += emit_mov (p, x0, immediate_operand (12)); */
825
826static struct aarch64_operand
827immediate_operand (uint32_t imm)
828{
829 struct aarch64_operand operand;
830
831 operand.type = OPERAND_IMMEDIATE;
832 operand.imm = imm;
833
834 return operand;
835}
836
bb903df0
PL
837/* Helper function to create an offset memory operand.
838
839 For example:
840 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
841
842static struct aarch64_memory_operand
843offset_memory_operand (int32_t offset)
844{
845 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
846}
847
848/* Helper function to create a pre-index memory operand.
849
850 For example:
851 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
852
853static struct aarch64_memory_operand
854preindex_memory_operand (int32_t index)
855{
856 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
857}
858
afbe19f8
PL
859/* Helper function to create a post-index memory operand.
860
861 For example:
862 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
863
864static struct aarch64_memory_operand
865postindex_memory_operand (int32_t index)
866{
867 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
868}
869
bb903df0
PL
870/* System control registers. These special registers can be written and
871 read with the MRS and MSR instructions.
872
873 - NZCV: Condition flags. GDB refers to this register under the CPSR
874 name.
875 - FPSR: Floating-point status register.
876 - FPCR: Floating-point control registers.
877 - TPIDR_EL0: Software thread ID register. */
878
879enum aarch64_system_control_registers
880{
881 /* op0 op1 crn crm op2 */
882 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
883 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
884 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
885 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
886};
887
bb903df0
PL
888/* Write a BLR instruction into *BUF.
889
890 BLR rn
891
892 RN is the register to branch to. */
893
894static int
895emit_blr (uint32_t *buf, struct aarch64_register rn)
896{
e1c587c3 897 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
bb903df0
PL
898}
899
afbe19f8 900/* Write a RET instruction into *BUF.
bb903df0 901
afbe19f8 902 RET xn
bb903df0 903
afbe19f8 904 RN is the register to branch to. */
bb903df0
PL
905
906static int
afbe19f8
PL
907emit_ret (uint32_t *buf, struct aarch64_register rn)
908{
e1c587c3 909 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
afbe19f8
PL
910}
911
912static int
913emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
914 struct aarch64_register rt,
915 struct aarch64_register rt2,
916 struct aarch64_register rn,
917 struct aarch64_memory_operand operand)
bb903df0
PL
918{
919 uint32_t opc;
920 uint32_t pre_index;
921 uint32_t write_back;
922
923 if (rt.is64)
924 opc = ENCODE (2, 2, 30);
925 else
926 opc = ENCODE (0, 2, 30);
927
928 switch (operand.type)
929 {
930 case MEMORY_OPERAND_OFFSET:
931 {
932 pre_index = ENCODE (1, 1, 24);
933 write_back = ENCODE (0, 1, 23);
934 break;
935 }
afbe19f8
PL
936 case MEMORY_OPERAND_POSTINDEX:
937 {
938 pre_index = ENCODE (0, 1, 24);
939 write_back = ENCODE (1, 1, 23);
940 break;
941 }
bb903df0
PL
942 case MEMORY_OPERAND_PREINDEX:
943 {
944 pre_index = ENCODE (1, 1, 24);
945 write_back = ENCODE (1, 1, 23);
946 break;
947 }
948 default:
949 return 0;
950 }
951
e1c587c3
YQ
952 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
953 | ENCODE (operand.index >> 3, 7, 15)
954 | ENCODE (rt2.num, 5, 10)
955 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
956}
957
afbe19f8
PL
958/* Write a STP instruction into *BUF.
959
960 STP rt, rt2, [rn, #offset]
961 STP rt, rt2, [rn, #index]!
962 STP rt, rt2, [rn], #index
963
964 RT and RT2 are the registers to store.
965 RN is the base address register.
966 OFFSET is the immediate to add to the base address. It is limited to a
967 -512 .. 504 range (7 bits << 3). */
968
969static int
970emit_stp (uint32_t *buf, struct aarch64_register rt,
971 struct aarch64_register rt2, struct aarch64_register rn,
972 struct aarch64_memory_operand operand)
973{
974 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
975}
976
977/* Write a LDP instruction into *BUF.
978
979 LDP rt, rt2, [rn, #offset]
980 LDP rt, rt2, [rn, #index]!
981 LDP rt, rt2, [rn], #index
982
983 RT and RT2 are the registers to store.
984 RN is the base address register.
985 OFFSET is the immediate to add to the base address. It is limited to a
986 -512 .. 504 range (7 bits << 3). */
987
988static int
989emit_ldp (uint32_t *buf, struct aarch64_register rt,
990 struct aarch64_register rt2, struct aarch64_register rn,
991 struct aarch64_memory_operand operand)
992{
993 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
994}
995
bb903df0
PL
996/* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
997
998 LDP qt, qt2, [rn, #offset]
999
1000 RT and RT2 are the Q registers to store.
1001 RN is the base address register.
1002 OFFSET is the immediate to add to the base address. It is limited to
1003 -1024 .. 1008 range (7 bits << 4). */
1004
1005static int
1006emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1007 struct aarch64_register rn, int32_t offset)
1008{
1009 uint32_t opc = ENCODE (2, 2, 30);
1010 uint32_t pre_index = ENCODE (1, 1, 24);
1011
e1c587c3
YQ
1012 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
1013 | ENCODE (offset >> 4, 7, 15)
1014 | ENCODE (rt2, 5, 10)
1015 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1016}
1017
1018/* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1019
1020 STP qt, qt2, [rn, #offset]
1021
1022 RT and RT2 are the Q registers to store.
1023 RN is the base address register.
1024 OFFSET is the immediate to add to the base address. It is limited to
1025 -1024 .. 1008 range (7 bits << 4). */
1026
1027static int
1028emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1029 struct aarch64_register rn, int32_t offset)
1030{
1031 uint32_t opc = ENCODE (2, 2, 30);
1032 uint32_t pre_index = ENCODE (1, 1, 24);
1033
e1c587c3 1034 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
b6542f81
YQ
1035 | ENCODE (offset >> 4, 7, 15)
1036 | ENCODE (rt2, 5, 10)
1037 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1038}
1039
afbe19f8
PL
1040/* Write a LDRH instruction into *BUF.
1041
1042 LDRH wt, [xn, #offset]
1043 LDRH wt, [xn, #index]!
1044 LDRH wt, [xn], #index
1045
1046 RT is the register to store.
1047 RN is the base address register.
1048 OFFSET is the immediate to add to the base address. It is limited to
1049 0 .. 32760 range (12 bits << 3). */
1050
1051static int
1052emit_ldrh (uint32_t *buf, struct aarch64_register rt,
1053 struct aarch64_register rn,
1054 struct aarch64_memory_operand operand)
1055{
1c2e1515 1056 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
afbe19f8
PL
1057}
1058
1059/* Write a LDRB instruction into *BUF.
1060
1061 LDRB wt, [xn, #offset]
1062 LDRB wt, [xn, #index]!
1063 LDRB wt, [xn], #index
1064
1065 RT is the register to store.
1066 RN is the base address register.
1067 OFFSET is the immediate to add to the base address. It is limited to
1068 0 .. 32760 range (12 bits << 3). */
1069
1070static int
1071emit_ldrb (uint32_t *buf, struct aarch64_register rt,
1072 struct aarch64_register rn,
1073 struct aarch64_memory_operand operand)
1074{
1c2e1515 1075 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
afbe19f8
PL
1076}
1077
bb903df0 1078
bb903df0
PL
1079
1080/* Write a STR instruction into *BUF.
1081
1082 STR rt, [rn, #offset]
1083 STR rt, [rn, #index]!
afbe19f8 1084 STR rt, [rn], #index
bb903df0
PL
1085
1086 RT is the register to store.
1087 RN is the base address register.
1088 OFFSET is the immediate to add to the base address. It is limited to
1089 0 .. 32760 range (12 bits << 3). */
1090
1091static int
1092emit_str (uint32_t *buf, struct aarch64_register rt,
1093 struct aarch64_register rn,
1094 struct aarch64_memory_operand operand)
1095{
1c2e1515 1096 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
bb903df0
PL
1097}
1098
1099/* Helper function emitting an exclusive load or store instruction. */
1100
1101static int
1102emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1103 enum aarch64_opcodes opcode,
1104 struct aarch64_register rs,
1105 struct aarch64_register rt,
1106 struct aarch64_register rt2,
1107 struct aarch64_register rn)
1108{
e1c587c3
YQ
1109 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1110 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1111 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
1112}
1113
1114/* Write a LAXR instruction into *BUF.
1115
1116 LDAXR rt, [xn]
1117
1118 RT is the destination register.
1119 RN is the base address register. */
1120
1121static int
1122emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1123 struct aarch64_register rn)
1124{
1125 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1126 xzr, rn);
1127}
1128
1129/* Write a STXR instruction into *BUF.
1130
1131 STXR ws, rt, [xn]
1132
1133 RS is the result register, it indicates if the store succeeded or not.
1134 RT is the destination register.
1135 RN is the base address register. */
1136
1137static int
1138emit_stxr (uint32_t *buf, struct aarch64_register rs,
1139 struct aarch64_register rt, struct aarch64_register rn)
1140{
1141 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1142 xzr, rn);
1143}
1144
1145/* Write a STLR instruction into *BUF.
1146
1147 STLR rt, [xn]
1148
1149 RT is the register to store.
1150 RN is the base address register. */
1151
1152static int
1153emit_stlr (uint32_t *buf, struct aarch64_register rt,
1154 struct aarch64_register rn)
1155{
1156 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1157 xzr, rn);
1158}
1159
1160/* Helper function for data processing instructions with register sources. */
1161
1162static int
231c0592 1163emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
bb903df0
PL
1164 struct aarch64_register rd,
1165 struct aarch64_register rn,
1166 struct aarch64_register rm)
1167{
1168 uint32_t size = ENCODE (rd.is64, 1, 31);
1169
e1c587c3
YQ
1170 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1171 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1172}
1173
1174/* Helper function for data processing instructions taking either a register
1175 or an immediate. */
1176
1177static int
1178emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1179 struct aarch64_register rd,
1180 struct aarch64_register rn,
1181 struct aarch64_operand operand)
1182{
1183 uint32_t size = ENCODE (rd.is64, 1, 31);
1184 /* The opcode is different for register and immediate source operands. */
1185 uint32_t operand_opcode;
1186
1187 if (operand.type == OPERAND_IMMEDIATE)
1188 {
1189 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1190 operand_opcode = ENCODE (8, 4, 25);
1191
e1c587c3
YQ
1192 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1193 | ENCODE (operand.imm, 12, 10)
1194 | ENCODE (rn.num, 5, 5)
1195 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1196 }
1197 else
1198 {
1199 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1200 operand_opcode = ENCODE (5, 4, 25);
1201
1202 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1203 rn, operand.reg);
1204 }
1205}
1206
1207/* Write an ADD instruction into *BUF.
1208
1209 ADD rd, rn, #imm
1210 ADD rd, rn, rm
1211
1212 This function handles both an immediate and register add.
1213
1214 RD is the destination register.
1215 RN is the input register.
1216 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1217 OPERAND_REGISTER. */
1218
1219static int
1220emit_add (uint32_t *buf, struct aarch64_register rd,
1221 struct aarch64_register rn, struct aarch64_operand operand)
1222{
1223 return emit_data_processing (buf, ADD, rd, rn, operand);
1224}
1225
1226/* Write a SUB instruction into *BUF.
1227
1228 SUB rd, rn, #imm
1229 SUB rd, rn, rm
1230
1231 This function handles both an immediate and register sub.
1232
1233 RD is the destination register.
1234 RN is the input register.
1235 IMM is the immediate to substract to RN. */
1236
1237static int
1238emit_sub (uint32_t *buf, struct aarch64_register rd,
1239 struct aarch64_register rn, struct aarch64_operand operand)
1240{
1241 return emit_data_processing (buf, SUB, rd, rn, operand);
1242}
1243
1244/* Write a MOV instruction into *BUF.
1245
1246 MOV rd, #imm
1247 MOV rd, rm
1248
1249 This function handles both a wide immediate move and a register move,
1250 with the condition that the source register is not xzr. xzr and the
1251 stack pointer share the same encoding and this function only supports
1252 the stack pointer.
1253
1254 RD is the destination register.
1255 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1256 OPERAND_REGISTER. */
1257
1258static int
1259emit_mov (uint32_t *buf, struct aarch64_register rd,
1260 struct aarch64_operand operand)
1261{
1262 if (operand.type == OPERAND_IMMEDIATE)
1263 {
1264 uint32_t size = ENCODE (rd.is64, 1, 31);
1265 /* Do not shift the immediate. */
1266 uint32_t shift = ENCODE (0, 2, 21);
1267
e1c587c3
YQ
1268 return aarch64_emit_insn (buf, MOV | size | shift
1269 | ENCODE (operand.imm, 16, 5)
1270 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1271 }
1272 else
1273 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1274}
1275
1276/* Write a MOVK instruction into *BUF.
1277
1278 MOVK rd, #imm, lsl #shift
1279
1280 RD is the destination register.
1281 IMM is the immediate.
1282 SHIFT is the logical shift left to apply to IMM. */
1283
1284static int
7781c06f
YQ
1285emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1286 unsigned shift)
bb903df0
PL
1287{
1288 uint32_t size = ENCODE (rd.is64, 1, 31);
1289
e1c587c3
YQ
1290 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1291 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1292}
1293
1294/* Write instructions into *BUF in order to move ADDR into a register.
1295 ADDR can be a 64-bit value.
1296
1297 This function will emit a series of MOV and MOVK instructions, such as:
1298
1299 MOV xd, #(addr)
1300 MOVK xd, #(addr >> 16), lsl #16
1301 MOVK xd, #(addr >> 32), lsl #32
1302 MOVK xd, #(addr >> 48), lsl #48 */
1303
1304static int
1305emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1306{
1307 uint32_t *p = buf;
1308
1309 /* The MOV (wide immediate) instruction clears to top bits of the
1310 register. */
1311 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1312
1313 if ((addr >> 16) != 0)
1314 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1315 else
1316 return p - buf;
1317
1318 if ((addr >> 32) != 0)
1319 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1320 else
1321 return p - buf;
1322
1323 if ((addr >> 48) != 0)
1324 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1325
1326 return p - buf;
1327}
1328
afbe19f8
PL
1329/* Write a SUBS instruction into *BUF.
1330
1331 SUBS rd, rn, rm
1332
1333 This instruction update the condition flags.
1334
1335 RD is the destination register.
1336 RN and RM are the source registers. */
1337
1338static int
1339emit_subs (uint32_t *buf, struct aarch64_register rd,
1340 struct aarch64_register rn, struct aarch64_operand operand)
1341{
1342 return emit_data_processing (buf, SUBS, rd, rn, operand);
1343}
1344
1345/* Write a CMP instruction into *BUF.
1346
1347 CMP rn, rm
1348
1349 This instruction is an alias of SUBS xzr, rn, rm.
1350
1351 RN and RM are the registers to compare. */
1352
1353static int
1354emit_cmp (uint32_t *buf, struct aarch64_register rn,
1355 struct aarch64_operand operand)
1356{
1357 return emit_subs (buf, xzr, rn, operand);
1358}
1359
1360/* Write a AND instruction into *BUF.
1361
1362 AND rd, rn, rm
1363
1364 RD is the destination register.
1365 RN and RM are the source registers. */
1366
1367static int
1368emit_and (uint32_t *buf, struct aarch64_register rd,
1369 struct aarch64_register rn, struct aarch64_register rm)
1370{
1371 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1372}
1373
1374/* Write a ORR instruction into *BUF.
1375
1376 ORR rd, rn, rm
1377
1378 RD is the destination register.
1379 RN and RM are the source registers. */
1380
1381static int
1382emit_orr (uint32_t *buf, struct aarch64_register rd,
1383 struct aarch64_register rn, struct aarch64_register rm)
1384{
1385 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1386}
1387
1388/* Write a ORN instruction into *BUF.
1389
1390 ORN rd, rn, rm
1391
1392 RD is the destination register.
1393 RN and RM are the source registers. */
1394
1395static int
1396emit_orn (uint32_t *buf, struct aarch64_register rd,
1397 struct aarch64_register rn, struct aarch64_register rm)
1398{
1399 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1400}
1401
1402/* Write a EOR instruction into *BUF.
1403
1404 EOR rd, rn, rm
1405
1406 RD is the destination register.
1407 RN and RM are the source registers. */
1408
1409static int
1410emit_eor (uint32_t *buf, struct aarch64_register rd,
1411 struct aarch64_register rn, struct aarch64_register rm)
1412{
1413 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1414}
1415
1416/* Write a MVN instruction into *BUF.
1417
1418 MVN rd, rm
1419
1420 This is an alias for ORN rd, xzr, rm.
1421
1422 RD is the destination register.
1423 RM is the source register. */
1424
1425static int
1426emit_mvn (uint32_t *buf, struct aarch64_register rd,
1427 struct aarch64_register rm)
1428{
1429 return emit_orn (buf, rd, xzr, rm);
1430}
1431
1432/* Write a LSLV instruction into *BUF.
1433
1434 LSLV rd, rn, rm
1435
1436 RD is the destination register.
1437 RN and RM are the source registers. */
1438
1439static int
1440emit_lslv (uint32_t *buf, struct aarch64_register rd,
1441 struct aarch64_register rn, struct aarch64_register rm)
1442{
1443 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1444}
1445
1446/* Write a LSRV instruction into *BUF.
1447
1448 LSRV rd, rn, rm
1449
1450 RD is the destination register.
1451 RN and RM are the source registers. */
1452
1453static int
1454emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1455 struct aarch64_register rn, struct aarch64_register rm)
1456{
1457 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1458}
1459
1460/* Write a ASRV instruction into *BUF.
1461
1462 ASRV rd, rn, rm
1463
1464 RD is the destination register.
1465 RN and RM are the source registers. */
1466
1467static int
1468emit_asrv (uint32_t *buf, struct aarch64_register rd,
1469 struct aarch64_register rn, struct aarch64_register rm)
1470{
1471 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1472}
1473
1474/* Write a MUL instruction into *BUF.
1475
1476 MUL rd, rn, rm
1477
1478 RD is the destination register.
1479 RN and RM are the source registers. */
1480
1481static int
1482emit_mul (uint32_t *buf, struct aarch64_register rd,
1483 struct aarch64_register rn, struct aarch64_register rm)
1484{
1485 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1486}
1487
bb903df0
PL
1488/* Write a MRS instruction into *BUF. The register size is 64-bit.
1489
1490 MRS xt, system_reg
1491
1492 RT is the destination register.
1493 SYSTEM_REG is special purpose register to read. */
1494
1495static int
1496emit_mrs (uint32_t *buf, struct aarch64_register rt,
1497 enum aarch64_system_control_registers system_reg)
1498{
e1c587c3
YQ
1499 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1500 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1501}
1502
1503/* Write a MSR instruction into *BUF. The register size is 64-bit.
1504
1505 MSR system_reg, xt
1506
1507 SYSTEM_REG is special purpose register to write.
1508 RT is the input register. */
1509
1510static int
1511emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1512 struct aarch64_register rt)
1513{
e1c587c3
YQ
1514 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1515 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1516}
1517
1518/* Write a SEVL instruction into *BUF.
1519
1520 This is a hint instruction telling the hardware to trigger an event. */
1521
1522static int
1523emit_sevl (uint32_t *buf)
1524{
e1c587c3 1525 return aarch64_emit_insn (buf, SEVL);
bb903df0
PL
1526}
1527
1528/* Write a WFE instruction into *BUF.
1529
1530 This is a hint instruction telling the hardware to wait for an event. */
1531
1532static int
1533emit_wfe (uint32_t *buf)
1534{
e1c587c3 1535 return aarch64_emit_insn (buf, WFE);
bb903df0
PL
1536}
1537
afbe19f8
PL
1538/* Write a SBFM instruction into *BUF.
1539
1540 SBFM rd, rn, #immr, #imms
1541
1542 This instruction moves the bits from #immr to #imms into the
1543 destination, sign extending the result.
1544
1545 RD is the destination register.
1546 RN is the source register.
1547 IMMR is the bit number to start at (least significant bit).
1548 IMMS is the bit number to stop at (most significant bit). */
1549
1550static int
1551emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1552 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1553{
1554 uint32_t size = ENCODE (rd.is64, 1, 31);
1555 uint32_t n = ENCODE (rd.is64, 1, 22);
1556
e1c587c3
YQ
1557 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1558 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1559 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1560}
1561
1562/* Write a SBFX instruction into *BUF.
1563
1564 SBFX rd, rn, #lsb, #width
1565
1566 This instruction moves #width bits from #lsb into the destination, sign
1567 extending the result. This is an alias for:
1568
1569 SBFM rd, rn, #lsb, #(lsb + width - 1)
1570
1571 RD is the destination register.
1572 RN is the source register.
1573 LSB is the bit number to start at (least significant bit).
1574 WIDTH is the number of bits to move. */
1575
1576static int
1577emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1578 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1579{
1580 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1581}
1582
1583/* Write a UBFM instruction into *BUF.
1584
1585 UBFM rd, rn, #immr, #imms
1586
1587 This instruction moves the bits from #immr to #imms into the
1588 destination, extending the result with zeros.
1589
1590 RD is the destination register.
1591 RN is the source register.
1592 IMMR is the bit number to start at (least significant bit).
1593 IMMS is the bit number to stop at (most significant bit). */
1594
1595static int
1596emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1597 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1598{
1599 uint32_t size = ENCODE (rd.is64, 1, 31);
1600 uint32_t n = ENCODE (rd.is64, 1, 22);
1601
e1c587c3
YQ
1602 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1603 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1604 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1605}
1606
1607/* Write a UBFX instruction into *BUF.
1608
1609 UBFX rd, rn, #lsb, #width
1610
1611 This instruction moves #width bits from #lsb into the destination,
1612 extending the result with zeros. This is an alias for:
1613
1614 UBFM rd, rn, #lsb, #(lsb + width - 1)
1615
1616 RD is the destination register.
1617 RN is the source register.
1618 LSB is the bit number to start at (least significant bit).
1619 WIDTH is the number of bits to move. */
1620
1621static int
1622emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1623 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1624{
1625 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1626}
1627
1628/* Write a CSINC instruction into *BUF.
1629
1630 CSINC rd, rn, rm, cond
1631
1632 This instruction conditionally increments rn or rm and places the result
1633 in rd. rn is chosen is the condition is true.
1634
1635 RD is the destination register.
1636 RN and RM are the source registers.
1637 COND is the encoded condition. */
1638
1639static int
1640emit_csinc (uint32_t *buf, struct aarch64_register rd,
1641 struct aarch64_register rn, struct aarch64_register rm,
1642 unsigned cond)
1643{
1644 uint32_t size = ENCODE (rd.is64, 1, 31);
1645
e1c587c3
YQ
1646 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1647 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1648 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1649}
1650
1651/* Write a CSET instruction into *BUF.
1652
1653 CSET rd, cond
1654
1655 This instruction conditionally write 1 or 0 in the destination register.
1656 1 is written if the condition is true. This is an alias for:
1657
1658 CSINC rd, xzr, xzr, !cond
1659
1660 Note that the condition needs to be inverted.
1661
1662 RD is the destination register.
1663 RN and RM are the source registers.
1664 COND is the encoded condition. */
1665
1666static int
1667emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1668{
1669 /* The least significant bit of the condition needs toggling in order to
1670 invert it. */
1671 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1672}
1673
bb903df0
PL
1674/* Write LEN instructions from BUF into the inferior memory at *TO.
1675
1676 Note instructions are always little endian on AArch64, unlike data. */
1677
1678static void
1679append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1680{
1681 size_t byte_len = len * sizeof (uint32_t);
1682#if (__BYTE_ORDER == __BIG_ENDIAN)
cb93dc7f 1683 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
bb903df0
PL
1684 size_t i;
1685
1686 for (i = 0; i < len; i++)
1687 le_buf[i] = htole32 (buf[i]);
1688
4196ab2a 1689 target_write_memory (*to, (const unsigned char *) le_buf, byte_len);
bb903df0
PL
1690
1691 xfree (le_buf);
1692#else
4196ab2a 1693 target_write_memory (*to, (const unsigned char *) buf, byte_len);
bb903df0
PL
1694#endif
1695
1696 *to += byte_len;
1697}
1698
0badd99f
YQ
1699/* Sub-class of struct aarch64_insn_data, store information of
1700 instruction relocation for fast tracepoint. Visitor can
1701 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1702 the relocated instructions in buffer pointed by INSN_PTR. */
bb903df0 1703
0badd99f
YQ
1704struct aarch64_insn_relocation_data
1705{
1706 struct aarch64_insn_data base;
1707
1708 /* The new address the instruction is relocated to. */
1709 CORE_ADDR new_addr;
1710 /* Pointer to the buffer of relocated instruction(s). */
1711 uint32_t *insn_ptr;
1712};
1713
1714/* Implementation of aarch64_insn_visitor method "b". */
1715
1716static void
1717aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1718 struct aarch64_insn_data *data)
1719{
1720 struct aarch64_insn_relocation_data *insn_reloc
1721 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1722 int64_t new_offset
0badd99f
YQ
1723 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1724
1725 if (can_encode_int32 (new_offset, 28))
1726 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1727}
1728
1729/* Implementation of aarch64_insn_visitor method "b_cond". */
1730
1731static void
1732aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1733 struct aarch64_insn_data *data)
1734{
1735 struct aarch64_insn_relocation_data *insn_reloc
1736 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1737 int64_t new_offset
0badd99f
YQ
1738 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1739
1740 if (can_encode_int32 (new_offset, 21))
1741 {
1742 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1743 new_offset);
bb903df0 1744 }
0badd99f 1745 else if (can_encode_int32 (new_offset, 28))
bb903df0 1746 {
0badd99f
YQ
1747 /* The offset is out of range for a conditional branch
1748 instruction but not for a unconditional branch. We can use
1749 the following instructions instead:
bb903df0 1750
0badd99f
YQ
1751 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1752 B NOT_TAKEN ; Else jump over TAKEN and continue.
1753 TAKEN:
1754 B #(offset - 8)
1755 NOT_TAKEN:
1756
1757 */
1758
1759 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1760 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1761 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
bb903df0 1762 }
0badd99f 1763}
bb903df0 1764
0badd99f
YQ
1765/* Implementation of aarch64_insn_visitor method "cb". */
1766
1767static void
1768aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1769 const unsigned rn, int is64,
1770 struct aarch64_insn_data *data)
1771{
1772 struct aarch64_insn_relocation_data *insn_reloc
1773 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1774 int64_t new_offset
0badd99f
YQ
1775 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1776
1777 if (can_encode_int32 (new_offset, 21))
1778 {
1779 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1780 aarch64_register (rn, is64), new_offset);
bb903df0 1781 }
0badd99f 1782 else if (can_encode_int32 (new_offset, 28))
bb903df0 1783 {
0badd99f
YQ
1784 /* The offset is out of range for a compare and branch
1785 instruction but not for a unconditional branch. We can use
1786 the following instructions instead:
1787
1788 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1789 B NOT_TAKEN ; Else jump over TAKEN and continue.
1790 TAKEN:
1791 B #(offset - 8)
1792 NOT_TAKEN:
1793
1794 */
1795 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1796 aarch64_register (rn, is64), 8);
1797 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1798 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1799 }
1800}
bb903df0 1801
0badd99f 1802/* Implementation of aarch64_insn_visitor method "tb". */
bb903df0 1803
0badd99f
YQ
1804static void
1805aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1806 const unsigned rt, unsigned bit,
1807 struct aarch64_insn_data *data)
1808{
1809 struct aarch64_insn_relocation_data *insn_reloc
1810 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1811 int64_t new_offset
0badd99f
YQ
1812 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1813
1814 if (can_encode_int32 (new_offset, 16))
1815 {
1816 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1817 aarch64_register (rt, 1), new_offset);
bb903df0 1818 }
0badd99f 1819 else if (can_encode_int32 (new_offset, 28))
bb903df0 1820 {
0badd99f
YQ
1821 /* The offset is out of range for a test bit and branch
1822 instruction but not for a unconditional branch. We can use
1823 the following instructions instead:
1824
1825 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1826 B NOT_TAKEN ; Else jump over TAKEN and continue.
1827 TAKEN:
1828 B #(offset - 8)
1829 NOT_TAKEN:
1830
1831 */
1832 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1833 aarch64_register (rt, 1), 8);
1834 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1835 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1836 new_offset - 8);
1837 }
1838}
bb903df0 1839
0badd99f 1840/* Implementation of aarch64_insn_visitor method "adr". */
bb903df0 1841
0badd99f
YQ
1842static void
1843aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1844 const int is_adrp,
1845 struct aarch64_insn_data *data)
1846{
1847 struct aarch64_insn_relocation_data *insn_reloc
1848 = (struct aarch64_insn_relocation_data *) data;
1849 /* We know exactly the address the ADR{P,} instruction will compute.
1850 We can just write it to the destination register. */
1851 CORE_ADDR address = data->insn_addr + offset;
bb903df0 1852
0badd99f
YQ
1853 if (is_adrp)
1854 {
1855 /* Clear the lower 12 bits of the offset to get the 4K page. */
1856 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1857 aarch64_register (rd, 1),
1858 address & ~0xfff);
1859 }
1860 else
1861 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1862 aarch64_register (rd, 1), address);
1863}
bb903df0 1864
0badd99f 1865/* Implementation of aarch64_insn_visitor method "ldr_literal". */
bb903df0 1866
0badd99f
YQ
1867static void
1868aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1869 const unsigned rt, const int is64,
1870 struct aarch64_insn_data *data)
1871{
1872 struct aarch64_insn_relocation_data *insn_reloc
1873 = (struct aarch64_insn_relocation_data *) data;
1874 CORE_ADDR address = data->insn_addr + offset;
1875
1876 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1877 aarch64_register (rt, 1), address);
1878
1879 /* We know exactly what address to load from, and what register we
1880 can use:
1881
1882 MOV xd, #(oldloc + offset)
1883 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1884 ...
1885
1886 LDR xd, [xd] ; or LDRSW xd, [xd]
1887
1888 */
1889
1890 if (is_sw)
1891 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1892 aarch64_register (rt, 1),
1893 aarch64_register (rt, 1),
1894 offset_memory_operand (0));
bb903df0 1895 else
0badd99f
YQ
1896 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1897 aarch64_register (rt, is64),
1898 aarch64_register (rt, 1),
1899 offset_memory_operand (0));
1900}
1901
1902/* Implementation of aarch64_insn_visitor method "others". */
1903
1904static void
1905aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1906 struct aarch64_insn_data *data)
1907{
1908 struct aarch64_insn_relocation_data *insn_reloc
1909 = (struct aarch64_insn_relocation_data *) data;
bb903df0 1910
0badd99f
YQ
1911 /* The instruction is not PC relative. Just re-emit it at the new
1912 location. */
e1c587c3 1913 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
0badd99f
YQ
1914}
1915
1916static const struct aarch64_insn_visitor visitor =
1917{
1918 aarch64_ftrace_insn_reloc_b,
1919 aarch64_ftrace_insn_reloc_b_cond,
1920 aarch64_ftrace_insn_reloc_cb,
1921 aarch64_ftrace_insn_reloc_tb,
1922 aarch64_ftrace_insn_reloc_adr,
1923 aarch64_ftrace_insn_reloc_ldr_literal,
1924 aarch64_ftrace_insn_reloc_others,
1925};
1926
bb903df0
PL
1927/* Implementation of linux_target_ops method
1928 "install_fast_tracepoint_jump_pad". */
1929
1930static int
1931aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1932 CORE_ADDR tpaddr,
1933 CORE_ADDR collector,
1934 CORE_ADDR lockaddr,
1935 ULONGEST orig_size,
1936 CORE_ADDR *jump_entry,
1937 CORE_ADDR *trampoline,
1938 ULONGEST *trampoline_size,
1939 unsigned char *jjump_pad_insn,
1940 ULONGEST *jjump_pad_insn_size,
1941 CORE_ADDR *adjusted_insn_addr,
1942 CORE_ADDR *adjusted_insn_addr_end,
1943 char *err)
1944{
1945 uint32_t buf[256];
1946 uint32_t *p = buf;
2ac09a5b 1947 int64_t offset;
bb903df0 1948 int i;
70b439f0 1949 uint32_t insn;
bb903df0 1950 CORE_ADDR buildaddr = *jump_entry;
0badd99f 1951 struct aarch64_insn_relocation_data insn_data;
bb903df0
PL
1952
1953 /* We need to save the current state on the stack both to restore it
1954 later and to collect register values when the tracepoint is hit.
1955
1956 The saved registers are pushed in a layout that needs to be in sync
1957 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1958 the supply_fast_tracepoint_registers function will fill in the
1959 register cache from a pointer to saved registers on the stack we build
1960 here.
1961
1962 For simplicity, we set the size of each cell on the stack to 16 bytes.
1963 This way one cell can hold any register type, from system registers
1964 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1965 has to be 16 bytes aligned anyway.
1966
1967 Note that the CPSR register does not exist on AArch64. Instead we
1968 can access system bits describing the process state with the
1969 MRS/MSR instructions, namely the condition flags. We save them as
1970 if they are part of a CPSR register because that's how GDB
1971 interprets these system bits. At the moment, only the condition
1972 flags are saved in CPSR (NZCV).
1973
1974 Stack layout, each cell is 16 bytes (descending):
1975
1976 High *-------- SIMD&FP registers from 31 down to 0. --------*
1977 | q31 |
1978 . .
1979 . . 32 cells
1980 . .
1981 | q0 |
1982 *---- General purpose registers from 30 down to 0. ----*
1983 | x30 |
1984 . .
1985 . . 31 cells
1986 . .
1987 | x0 |
1988 *------------- Special purpose registers. -------------*
1989 | SP |
1990 | PC |
1991 | CPSR (NZCV) | 5 cells
1992 | FPSR |
1993 | FPCR | <- SP + 16
1994 *------------- collecting_t object --------------------*
1995 | TPIDR_EL0 | struct tracepoint * |
1996 Low *------------------------------------------------------*
1997
1998 After this stack is set up, we issue a call to the collector, passing
1999 it the saved registers at (SP + 16). */
2000
2001 /* Push SIMD&FP registers on the stack:
2002
2003 SUB sp, sp, #(32 * 16)
2004
2005 STP q30, q31, [sp, #(30 * 16)]
2006 ...
2007 STP q0, q1, [sp]
2008
2009 */
2010 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
2011 for (i = 30; i >= 0; i -= 2)
2012 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
2013
30baf67b 2014 /* Push general purpose registers on the stack. Note that we do not need
bb903df0
PL
2015 to push x31 as it represents the xzr register and not the stack
2016 pointer in a STR instruction.
2017
2018 SUB sp, sp, #(31 * 16)
2019
2020 STR x30, [sp, #(30 * 16)]
2021 ...
2022 STR x0, [sp]
2023
2024 */
2025 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
2026 for (i = 30; i >= 0; i -= 1)
2027 p += emit_str (p, aarch64_register (i, 1), sp,
2028 offset_memory_operand (i * 16));
2029
2030 /* Make space for 5 more cells.
2031
2032 SUB sp, sp, #(5 * 16)
2033
2034 */
2035 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
2036
2037
2038 /* Save SP:
2039
2040 ADD x4, sp, #((32 + 31 + 5) * 16)
2041 STR x4, [sp, #(4 * 16)]
2042
2043 */
2044 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
2045 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
2046
2047 /* Save PC (tracepoint address):
2048
2049 MOV x3, #(tpaddr)
2050 ...
2051
2052 STR x3, [sp, #(3 * 16)]
2053
2054 */
2055
2056 p += emit_mov_addr (p, x3, tpaddr);
2057 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
2058
2059 /* Save CPSR (NZCV), FPSR and FPCR:
2060
2061 MRS x2, nzcv
2062 MRS x1, fpsr
2063 MRS x0, fpcr
2064
2065 STR x2, [sp, #(2 * 16)]
2066 STR x1, [sp, #(1 * 16)]
2067 STR x0, [sp, #(0 * 16)]
2068
2069 */
2070 p += emit_mrs (p, x2, NZCV);
2071 p += emit_mrs (p, x1, FPSR);
2072 p += emit_mrs (p, x0, FPCR);
2073 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2074 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2075 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2076
2077 /* Push the collecting_t object. It consist of the address of the
2078 tracepoint and an ID for the current thread. We get the latter by
2079 reading the tpidr_el0 system register. It corresponds to the
2080 NT_ARM_TLS register accessible with ptrace.
2081
2082 MOV x0, #(tpoint)
2083 ...
2084
2085 MRS x1, tpidr_el0
2086
2087 STP x0, x1, [sp, #-16]!
2088
2089 */
2090
2091 p += emit_mov_addr (p, x0, tpoint);
2092 p += emit_mrs (p, x1, TPIDR_EL0);
2093 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2094
2095 /* Spin-lock:
2096
2097 The shared memory for the lock is at lockaddr. It will hold zero
2098 if no-one is holding the lock, otherwise it contains the address of
2099 the collecting_t object on the stack of the thread which acquired it.
2100
2101 At this stage, the stack pointer points to this thread's collecting_t
2102 object.
2103
2104 We use the following registers:
2105 - x0: Address of the lock.
2106 - x1: Pointer to collecting_t object.
2107 - x2: Scratch register.
2108
2109 MOV x0, #(lockaddr)
2110 ...
2111 MOV x1, sp
2112
2113 ; Trigger an event local to this core. So the following WFE
2114 ; instruction is ignored.
2115 SEVL
2116 again:
2117 ; Wait for an event. The event is triggered by either the SEVL
2118 ; or STLR instructions (store release).
2119 WFE
2120
2121 ; Atomically read at lockaddr. This marks the memory location as
2122 ; exclusive. This instruction also has memory constraints which
2123 ; make sure all previous data reads and writes are done before
2124 ; executing it.
2125 LDAXR x2, [x0]
2126
2127 ; Try again if another thread holds the lock.
2128 CBNZ x2, again
2129
2130 ; We can lock it! Write the address of the collecting_t object.
2131 ; This instruction will fail if the memory location is not marked
2132 ; as exclusive anymore. If it succeeds, it will remove the
2133 ; exclusive mark on the memory location. This way, if another
2134 ; thread executes this instruction before us, we will fail and try
2135 ; all over again.
2136 STXR w2, x1, [x0]
2137 CBNZ w2, again
2138
2139 */
2140
2141 p += emit_mov_addr (p, x0, lockaddr);
2142 p += emit_mov (p, x1, register_operand (sp));
2143
2144 p += emit_sevl (p);
2145 p += emit_wfe (p);
2146 p += emit_ldaxr (p, x2, x0);
2147 p += emit_cb (p, 1, w2, -2 * 4);
2148 p += emit_stxr (p, w2, x1, x0);
2149 p += emit_cb (p, 1, x2, -4 * 4);
2150
2151 /* Call collector (struct tracepoint *, unsigned char *):
2152
2153 MOV x0, #(tpoint)
2154 ...
2155
2156 ; Saved registers start after the collecting_t object.
2157 ADD x1, sp, #16
2158
2159 ; We use an intra-procedure-call scratch register.
2160 MOV ip0, #(collector)
2161 ...
2162
2163 ; And call back to C!
2164 BLR ip0
2165
2166 */
2167
2168 p += emit_mov_addr (p, x0, tpoint);
2169 p += emit_add (p, x1, sp, immediate_operand (16));
2170
2171 p += emit_mov_addr (p, ip0, collector);
2172 p += emit_blr (p, ip0);
2173
2174 /* Release the lock.
2175
2176 MOV x0, #(lockaddr)
2177 ...
2178
2179 ; This instruction is a normal store with memory ordering
2180 ; constraints. Thanks to this we do not have to put a data
2181 ; barrier instruction to make sure all data read and writes are done
30baf67b 2182 ; before this instruction is executed. Furthermore, this instruction
bb903df0
PL
2183 ; will trigger an event, letting other threads know they can grab
2184 ; the lock.
2185 STLR xzr, [x0]
2186
2187 */
2188 p += emit_mov_addr (p, x0, lockaddr);
2189 p += emit_stlr (p, xzr, x0);
2190
2191 /* Free collecting_t object:
2192
2193 ADD sp, sp, #16
2194
2195 */
2196 p += emit_add (p, sp, sp, immediate_operand (16));
2197
2198 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2199 registers from the stack.
2200
2201 LDR x2, [sp, #(2 * 16)]
2202 LDR x1, [sp, #(1 * 16)]
2203 LDR x0, [sp, #(0 * 16)]
2204
2205 MSR NZCV, x2
2206 MSR FPSR, x1
2207 MSR FPCR, x0
2208
2209 ADD sp, sp #(5 * 16)
2210
2211 */
2212 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2213 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2214 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2215 p += emit_msr (p, NZCV, x2);
2216 p += emit_msr (p, FPSR, x1);
2217 p += emit_msr (p, FPCR, x0);
2218
2219 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2220
2221 /* Pop general purpose registers:
2222
2223 LDR x0, [sp]
2224 ...
2225 LDR x30, [sp, #(30 * 16)]
2226
2227 ADD sp, sp, #(31 * 16)
2228
2229 */
2230 for (i = 0; i <= 30; i += 1)
2231 p += emit_ldr (p, aarch64_register (i, 1), sp,
2232 offset_memory_operand (i * 16));
2233 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2234
2235 /* Pop SIMD&FP registers:
2236
2237 LDP q0, q1, [sp]
2238 ...
2239 LDP q30, q31, [sp, #(30 * 16)]
2240
2241 ADD sp, sp, #(32 * 16)
2242
2243 */
2244 for (i = 0; i <= 30; i += 2)
2245 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2246 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2247
2248 /* Write the code into the inferior memory. */
2249 append_insns (&buildaddr, p - buf, buf);
2250
2251 /* Now emit the relocated instruction. */
2252 *adjusted_insn_addr = buildaddr;
70b439f0 2253 target_read_uint32 (tpaddr, &insn);
0badd99f
YQ
2254
2255 insn_data.base.insn_addr = tpaddr;
2256 insn_data.new_addr = buildaddr;
2257 insn_data.insn_ptr = buf;
2258
2259 aarch64_relocate_instruction (insn, &visitor,
2260 (struct aarch64_insn_data *) &insn_data);
2261
bb903df0 2262 /* We may not have been able to relocate the instruction. */
0badd99f 2263 if (insn_data.insn_ptr == buf)
bb903df0
PL
2264 {
2265 sprintf (err,
2266 "E.Could not relocate instruction from %s to %s.",
2267 core_addr_to_string_nz (tpaddr),
2268 core_addr_to_string_nz (buildaddr));
2269 return 1;
2270 }
dfaffe9d 2271 else
0badd99f 2272 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
dfaffe9d 2273 *adjusted_insn_addr_end = buildaddr;
bb903df0
PL
2274
2275 /* Go back to the start of the buffer. */
2276 p = buf;
2277
2278 /* Emit a branch back from the jump pad. */
2279 offset = (tpaddr + orig_size - buildaddr);
2280 if (!can_encode_int32 (offset, 28))
2281 {
2282 sprintf (err,
2283 "E.Jump back from jump pad too far from tracepoint "
2ac09a5b 2284 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2285 offset);
2286 return 1;
2287 }
2288
2289 p += emit_b (p, 0, offset);
2290 append_insns (&buildaddr, p - buf, buf);
2291
2292 /* Give the caller a branch instruction into the jump pad. */
2293 offset = (*jump_entry - tpaddr);
2294 if (!can_encode_int32 (offset, 28))
2295 {
2296 sprintf (err,
2297 "E.Jump pad too far from tracepoint "
2ac09a5b 2298 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2299 offset);
2300 return 1;
2301 }
2302
2303 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2304 *jjump_pad_insn_size = 4;
2305
2306 /* Return the end address of our pad. */
2307 *jump_entry = buildaddr;
2308
2309 return 0;
2310}
2311
afbe19f8
PL
2312/* Helper function writing LEN instructions from START into
2313 current_insn_ptr. */
2314
2315static void
2316emit_ops_insns (const uint32_t *start, int len)
2317{
2318 CORE_ADDR buildaddr = current_insn_ptr;
2319
2320 if (debug_threads)
2321 debug_printf ("Adding %d instrucions at %s\n",
2322 len, paddress (buildaddr));
2323
2324 append_insns (&buildaddr, len, start);
2325 current_insn_ptr = buildaddr;
2326}
2327
2328/* Pop a register from the stack. */
2329
2330static int
2331emit_pop (uint32_t *buf, struct aarch64_register rt)
2332{
2333 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2334}
2335
2336/* Push a register on the stack. */
2337
2338static int
2339emit_push (uint32_t *buf, struct aarch64_register rt)
2340{
2341 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2342}
2343
2344/* Implementation of emit_ops method "emit_prologue". */
2345
2346static void
2347aarch64_emit_prologue (void)
2348{
2349 uint32_t buf[16];
2350 uint32_t *p = buf;
2351
2352 /* This function emit a prologue for the following function prototype:
2353
2354 enum eval_result_type f (unsigned char *regs,
2355 ULONGEST *value);
2356
2357 The first argument is a buffer of raw registers. The second
2358 argument is the result of
2359 evaluating the expression, which will be set to whatever is on top of
2360 the stack at the end.
2361
2362 The stack set up by the prologue is as such:
2363
2364 High *------------------------------------------------------*
2365 | LR |
2366 | FP | <- FP
2367 | x1 (ULONGEST *value) |
2368 | x0 (unsigned char *regs) |
2369 Low *------------------------------------------------------*
2370
2371 As we are implementing a stack machine, each opcode can expand the
2372 stack so we never know how far we are from the data saved by this
2373 prologue. In order to be able refer to value and regs later, we save
2374 the current stack pointer in the frame pointer. This way, it is not
2375 clobbered when calling C functions.
2376
30baf67b 2377 Finally, throughout every operation, we are using register x0 as the
afbe19f8
PL
2378 top of the stack, and x1 as a scratch register. */
2379
2380 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2381 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2382 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2383
2384 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2385
2386
2387 emit_ops_insns (buf, p - buf);
2388}
2389
2390/* Implementation of emit_ops method "emit_epilogue". */
2391
2392static void
2393aarch64_emit_epilogue (void)
2394{
2395 uint32_t buf[16];
2396 uint32_t *p = buf;
2397
2398 /* Store the result of the expression (x0) in *value. */
2399 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2400 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2401 p += emit_str (p, x0, x1, offset_memory_operand (0));
2402
2403 /* Restore the previous state. */
2404 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2405 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2406
2407 /* Return expr_eval_no_error. */
2408 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2409 p += emit_ret (p, lr);
2410
2411 emit_ops_insns (buf, p - buf);
2412}
2413
2414/* Implementation of emit_ops method "emit_add". */
2415
2416static void
2417aarch64_emit_add (void)
2418{
2419 uint32_t buf[16];
2420 uint32_t *p = buf;
2421
2422 p += emit_pop (p, x1);
45e3745e 2423 p += emit_add (p, x0, x1, register_operand (x0));
afbe19f8
PL
2424
2425 emit_ops_insns (buf, p - buf);
2426}
2427
2428/* Implementation of emit_ops method "emit_sub". */
2429
2430static void
2431aarch64_emit_sub (void)
2432{
2433 uint32_t buf[16];
2434 uint32_t *p = buf;
2435
2436 p += emit_pop (p, x1);
45e3745e 2437 p += emit_sub (p, x0, x1, register_operand (x0));
afbe19f8
PL
2438
2439 emit_ops_insns (buf, p - buf);
2440}
2441
2442/* Implementation of emit_ops method "emit_mul". */
2443
2444static void
2445aarch64_emit_mul (void)
2446{
2447 uint32_t buf[16];
2448 uint32_t *p = buf;
2449
2450 p += emit_pop (p, x1);
2451 p += emit_mul (p, x0, x1, x0);
2452
2453 emit_ops_insns (buf, p - buf);
2454}
2455
2456/* Implementation of emit_ops method "emit_lsh". */
2457
2458static void
2459aarch64_emit_lsh (void)
2460{
2461 uint32_t buf[16];
2462 uint32_t *p = buf;
2463
2464 p += emit_pop (p, x1);
2465 p += emit_lslv (p, x0, x1, x0);
2466
2467 emit_ops_insns (buf, p - buf);
2468}
2469
2470/* Implementation of emit_ops method "emit_rsh_signed". */
2471
2472static void
2473aarch64_emit_rsh_signed (void)
2474{
2475 uint32_t buf[16];
2476 uint32_t *p = buf;
2477
2478 p += emit_pop (p, x1);
2479 p += emit_asrv (p, x0, x1, x0);
2480
2481 emit_ops_insns (buf, p - buf);
2482}
2483
2484/* Implementation of emit_ops method "emit_rsh_unsigned". */
2485
2486static void
2487aarch64_emit_rsh_unsigned (void)
2488{
2489 uint32_t buf[16];
2490 uint32_t *p = buf;
2491
2492 p += emit_pop (p, x1);
2493 p += emit_lsrv (p, x0, x1, x0);
2494
2495 emit_ops_insns (buf, p - buf);
2496}
2497
2498/* Implementation of emit_ops method "emit_ext". */
2499
2500static void
2501aarch64_emit_ext (int arg)
2502{
2503 uint32_t buf[16];
2504 uint32_t *p = buf;
2505
2506 p += emit_sbfx (p, x0, x0, 0, arg);
2507
2508 emit_ops_insns (buf, p - buf);
2509}
2510
2511/* Implementation of emit_ops method "emit_log_not". */
2512
2513static void
2514aarch64_emit_log_not (void)
2515{
2516 uint32_t buf[16];
2517 uint32_t *p = buf;
2518
2519 /* If the top of the stack is 0, replace it with 1. Else replace it with
2520 0. */
2521
2522 p += emit_cmp (p, x0, immediate_operand (0));
2523 p += emit_cset (p, x0, EQ);
2524
2525 emit_ops_insns (buf, p - buf);
2526}
2527
2528/* Implementation of emit_ops method "emit_bit_and". */
2529
2530static void
2531aarch64_emit_bit_and (void)
2532{
2533 uint32_t buf[16];
2534 uint32_t *p = buf;
2535
2536 p += emit_pop (p, x1);
2537 p += emit_and (p, x0, x0, x1);
2538
2539 emit_ops_insns (buf, p - buf);
2540}
2541
2542/* Implementation of emit_ops method "emit_bit_or". */
2543
2544static void
2545aarch64_emit_bit_or (void)
2546{
2547 uint32_t buf[16];
2548 uint32_t *p = buf;
2549
2550 p += emit_pop (p, x1);
2551 p += emit_orr (p, x0, x0, x1);
2552
2553 emit_ops_insns (buf, p - buf);
2554}
2555
2556/* Implementation of emit_ops method "emit_bit_xor". */
2557
2558static void
2559aarch64_emit_bit_xor (void)
2560{
2561 uint32_t buf[16];
2562 uint32_t *p = buf;
2563
2564 p += emit_pop (p, x1);
2565 p += emit_eor (p, x0, x0, x1);
2566
2567 emit_ops_insns (buf, p - buf);
2568}
2569
2570/* Implementation of emit_ops method "emit_bit_not". */
2571
2572static void
2573aarch64_emit_bit_not (void)
2574{
2575 uint32_t buf[16];
2576 uint32_t *p = buf;
2577
2578 p += emit_mvn (p, x0, x0);
2579
2580 emit_ops_insns (buf, p - buf);
2581}
2582
2583/* Implementation of emit_ops method "emit_equal". */
2584
2585static void
2586aarch64_emit_equal (void)
2587{
2588 uint32_t buf[16];
2589 uint32_t *p = buf;
2590
2591 p += emit_pop (p, x1);
2592 p += emit_cmp (p, x0, register_operand (x1));
2593 p += emit_cset (p, x0, EQ);
2594
2595 emit_ops_insns (buf, p - buf);
2596}
2597
2598/* Implementation of emit_ops method "emit_less_signed". */
2599
2600static void
2601aarch64_emit_less_signed (void)
2602{
2603 uint32_t buf[16];
2604 uint32_t *p = buf;
2605
2606 p += emit_pop (p, x1);
2607 p += emit_cmp (p, x1, register_operand (x0));
2608 p += emit_cset (p, x0, LT);
2609
2610 emit_ops_insns (buf, p - buf);
2611}
2612
2613/* Implementation of emit_ops method "emit_less_unsigned". */
2614
2615static void
2616aarch64_emit_less_unsigned (void)
2617{
2618 uint32_t buf[16];
2619 uint32_t *p = buf;
2620
2621 p += emit_pop (p, x1);
2622 p += emit_cmp (p, x1, register_operand (x0));
2623 p += emit_cset (p, x0, LO);
2624
2625 emit_ops_insns (buf, p - buf);
2626}
2627
2628/* Implementation of emit_ops method "emit_ref". */
2629
2630static void
2631aarch64_emit_ref (int size)
2632{
2633 uint32_t buf[16];
2634 uint32_t *p = buf;
2635
2636 switch (size)
2637 {
2638 case 1:
2639 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2640 break;
2641 case 2:
2642 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2643 break;
2644 case 4:
2645 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2646 break;
2647 case 8:
2648 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2649 break;
2650 default:
2651 /* Unknown size, bail on compilation. */
2652 emit_error = 1;
2653 break;
2654 }
2655
2656 emit_ops_insns (buf, p - buf);
2657}
2658
2659/* Implementation of emit_ops method "emit_if_goto". */
2660
2661static void
2662aarch64_emit_if_goto (int *offset_p, int *size_p)
2663{
2664 uint32_t buf[16];
2665 uint32_t *p = buf;
2666
2667 /* The Z flag is set or cleared here. */
2668 p += emit_cmp (p, x0, immediate_operand (0));
2669 /* This instruction must not change the Z flag. */
2670 p += emit_pop (p, x0);
2671 /* Branch over the next instruction if x0 == 0. */
2672 p += emit_bcond (p, EQ, 8);
2673
2674 /* The NOP instruction will be patched with an unconditional branch. */
2675 if (offset_p)
2676 *offset_p = (p - buf) * 4;
2677 if (size_p)
2678 *size_p = 4;
2679 p += emit_nop (p);
2680
2681 emit_ops_insns (buf, p - buf);
2682}
2683
2684/* Implementation of emit_ops method "emit_goto". */
2685
2686static void
2687aarch64_emit_goto (int *offset_p, int *size_p)
2688{
2689 uint32_t buf[16];
2690 uint32_t *p = buf;
2691
2692 /* The NOP instruction will be patched with an unconditional branch. */
2693 if (offset_p)
2694 *offset_p = 0;
2695 if (size_p)
2696 *size_p = 4;
2697 p += emit_nop (p);
2698
2699 emit_ops_insns (buf, p - buf);
2700}
2701
2702/* Implementation of emit_ops method "write_goto_address". */
2703
bb1183e2 2704static void
afbe19f8
PL
2705aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2706{
2707 uint32_t insn;
2708
2709 emit_b (&insn, 0, to - from);
2710 append_insns (&from, 1, &insn);
2711}
2712
2713/* Implementation of emit_ops method "emit_const". */
2714
2715static void
2716aarch64_emit_const (LONGEST num)
2717{
2718 uint32_t buf[16];
2719 uint32_t *p = buf;
2720
2721 p += emit_mov_addr (p, x0, num);
2722
2723 emit_ops_insns (buf, p - buf);
2724}
2725
2726/* Implementation of emit_ops method "emit_call". */
2727
2728static void
2729aarch64_emit_call (CORE_ADDR fn)
2730{
2731 uint32_t buf[16];
2732 uint32_t *p = buf;
2733
2734 p += emit_mov_addr (p, ip0, fn);
2735 p += emit_blr (p, ip0);
2736
2737 emit_ops_insns (buf, p - buf);
2738}
2739
2740/* Implementation of emit_ops method "emit_reg". */
2741
2742static void
2743aarch64_emit_reg (int reg)
2744{
2745 uint32_t buf[16];
2746 uint32_t *p = buf;
2747
2748 /* Set x0 to unsigned char *regs. */
2749 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2750 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2751 p += emit_mov (p, x1, immediate_operand (reg));
2752
2753 emit_ops_insns (buf, p - buf);
2754
2755 aarch64_emit_call (get_raw_reg_func_addr ());
2756}
2757
2758/* Implementation of emit_ops method "emit_pop". */
2759
2760static void
2761aarch64_emit_pop (void)
2762{
2763 uint32_t buf[16];
2764 uint32_t *p = buf;
2765
2766 p += emit_pop (p, x0);
2767
2768 emit_ops_insns (buf, p - buf);
2769}
2770
2771/* Implementation of emit_ops method "emit_stack_flush". */
2772
2773static void
2774aarch64_emit_stack_flush (void)
2775{
2776 uint32_t buf[16];
2777 uint32_t *p = buf;
2778
2779 p += emit_push (p, x0);
2780
2781 emit_ops_insns (buf, p - buf);
2782}
2783
2784/* Implementation of emit_ops method "emit_zero_ext". */
2785
2786static void
2787aarch64_emit_zero_ext (int arg)
2788{
2789 uint32_t buf[16];
2790 uint32_t *p = buf;
2791
2792 p += emit_ubfx (p, x0, x0, 0, arg);
2793
2794 emit_ops_insns (buf, p - buf);
2795}
2796
2797/* Implementation of emit_ops method "emit_swap". */
2798
2799static void
2800aarch64_emit_swap (void)
2801{
2802 uint32_t buf[16];
2803 uint32_t *p = buf;
2804
2805 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2806 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2807 p += emit_mov (p, x0, register_operand (x1));
2808
2809 emit_ops_insns (buf, p - buf);
2810}
2811
2812/* Implementation of emit_ops method "emit_stack_adjust". */
2813
2814static void
2815aarch64_emit_stack_adjust (int n)
2816{
2817 /* This is not needed with our design. */
2818 uint32_t buf[16];
2819 uint32_t *p = buf;
2820
2821 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2822
2823 emit_ops_insns (buf, p - buf);
2824}
2825
2826/* Implementation of emit_ops method "emit_int_call_1". */
2827
2828static void
2829aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2830{
2831 uint32_t buf[16];
2832 uint32_t *p = buf;
2833
2834 p += emit_mov (p, x0, immediate_operand (arg1));
2835
2836 emit_ops_insns (buf, p - buf);
2837
2838 aarch64_emit_call (fn);
2839}
2840
2841/* Implementation of emit_ops method "emit_void_call_2". */
2842
2843static void
2844aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2845{
2846 uint32_t buf[16];
2847 uint32_t *p = buf;
2848
2849 /* Push x0 on the stack. */
2850 aarch64_emit_stack_flush ();
2851
2852 /* Setup arguments for the function call:
2853
2854 x0: arg1
2855 x1: top of the stack
2856
2857 MOV x1, x0
2858 MOV x0, #arg1 */
2859
2860 p += emit_mov (p, x1, register_operand (x0));
2861 p += emit_mov (p, x0, immediate_operand (arg1));
2862
2863 emit_ops_insns (buf, p - buf);
2864
2865 aarch64_emit_call (fn);
2866
2867 /* Restore x0. */
2868 aarch64_emit_pop ();
2869}
2870
2871/* Implementation of emit_ops method "emit_eq_goto". */
2872
2873static void
2874aarch64_emit_eq_goto (int *offset_p, int *size_p)
2875{
2876 uint32_t buf[16];
2877 uint32_t *p = buf;
2878
2879 p += emit_pop (p, x1);
2880 p += emit_cmp (p, x1, register_operand (x0));
2881 /* Branch over the next instruction if x0 != x1. */
2882 p += emit_bcond (p, NE, 8);
2883 /* The NOP instruction will be patched with an unconditional branch. */
2884 if (offset_p)
2885 *offset_p = (p - buf) * 4;
2886 if (size_p)
2887 *size_p = 4;
2888 p += emit_nop (p);
2889
2890 emit_ops_insns (buf, p - buf);
2891}
2892
2893/* Implementation of emit_ops method "emit_ne_goto". */
2894
2895static void
2896aarch64_emit_ne_goto (int *offset_p, int *size_p)
2897{
2898 uint32_t buf[16];
2899 uint32_t *p = buf;
2900
2901 p += emit_pop (p, x1);
2902 p += emit_cmp (p, x1, register_operand (x0));
2903 /* Branch over the next instruction if x0 == x1. */
2904 p += emit_bcond (p, EQ, 8);
2905 /* The NOP instruction will be patched with an unconditional branch. */
2906 if (offset_p)
2907 *offset_p = (p - buf) * 4;
2908 if (size_p)
2909 *size_p = 4;
2910 p += emit_nop (p);
2911
2912 emit_ops_insns (buf, p - buf);
2913}
2914
2915/* Implementation of emit_ops method "emit_lt_goto". */
2916
2917static void
2918aarch64_emit_lt_goto (int *offset_p, int *size_p)
2919{
2920 uint32_t buf[16];
2921 uint32_t *p = buf;
2922
2923 p += emit_pop (p, x1);
2924 p += emit_cmp (p, x1, register_operand (x0));
2925 /* Branch over the next instruction if x0 >= x1. */
2926 p += emit_bcond (p, GE, 8);
2927 /* The NOP instruction will be patched with an unconditional branch. */
2928 if (offset_p)
2929 *offset_p = (p - buf) * 4;
2930 if (size_p)
2931 *size_p = 4;
2932 p += emit_nop (p);
2933
2934 emit_ops_insns (buf, p - buf);
2935}
2936
2937/* Implementation of emit_ops method "emit_le_goto". */
2938
2939static void
2940aarch64_emit_le_goto (int *offset_p, int *size_p)
2941{
2942 uint32_t buf[16];
2943 uint32_t *p = buf;
2944
2945 p += emit_pop (p, x1);
2946 p += emit_cmp (p, x1, register_operand (x0));
2947 /* Branch over the next instruction if x0 > x1. */
2948 p += emit_bcond (p, GT, 8);
2949 /* The NOP instruction will be patched with an unconditional branch. */
2950 if (offset_p)
2951 *offset_p = (p - buf) * 4;
2952 if (size_p)
2953 *size_p = 4;
2954 p += emit_nop (p);
2955
2956 emit_ops_insns (buf, p - buf);
2957}
2958
2959/* Implementation of emit_ops method "emit_gt_goto". */
2960
2961static void
2962aarch64_emit_gt_goto (int *offset_p, int *size_p)
2963{
2964 uint32_t buf[16];
2965 uint32_t *p = buf;
2966
2967 p += emit_pop (p, x1);
2968 p += emit_cmp (p, x1, register_operand (x0));
2969 /* Branch over the next instruction if x0 <= x1. */
2970 p += emit_bcond (p, LE, 8);
2971 /* The NOP instruction will be patched with an unconditional branch. */
2972 if (offset_p)
2973 *offset_p = (p - buf) * 4;
2974 if (size_p)
2975 *size_p = 4;
2976 p += emit_nop (p);
2977
2978 emit_ops_insns (buf, p - buf);
2979}
2980
2981/* Implementation of emit_ops method "emit_ge_got". */
2982
2983static void
2984aarch64_emit_ge_got (int *offset_p, int *size_p)
2985{
2986 uint32_t buf[16];
2987 uint32_t *p = buf;
2988
2989 p += emit_pop (p, x1);
2990 p += emit_cmp (p, x1, register_operand (x0));
2991 /* Branch over the next instruction if x0 <= x1. */
2992 p += emit_bcond (p, LT, 8);
2993 /* The NOP instruction will be patched with an unconditional branch. */
2994 if (offset_p)
2995 *offset_p = (p - buf) * 4;
2996 if (size_p)
2997 *size_p = 4;
2998 p += emit_nop (p);
2999
3000 emit_ops_insns (buf, p - buf);
3001}
3002
3003static struct emit_ops aarch64_emit_ops_impl =
3004{
3005 aarch64_emit_prologue,
3006 aarch64_emit_epilogue,
3007 aarch64_emit_add,
3008 aarch64_emit_sub,
3009 aarch64_emit_mul,
3010 aarch64_emit_lsh,
3011 aarch64_emit_rsh_signed,
3012 aarch64_emit_rsh_unsigned,
3013 aarch64_emit_ext,
3014 aarch64_emit_log_not,
3015 aarch64_emit_bit_and,
3016 aarch64_emit_bit_or,
3017 aarch64_emit_bit_xor,
3018 aarch64_emit_bit_not,
3019 aarch64_emit_equal,
3020 aarch64_emit_less_signed,
3021 aarch64_emit_less_unsigned,
3022 aarch64_emit_ref,
3023 aarch64_emit_if_goto,
3024 aarch64_emit_goto,
3025 aarch64_write_goto_address,
3026 aarch64_emit_const,
3027 aarch64_emit_call,
3028 aarch64_emit_reg,
3029 aarch64_emit_pop,
3030 aarch64_emit_stack_flush,
3031 aarch64_emit_zero_ext,
3032 aarch64_emit_swap,
3033 aarch64_emit_stack_adjust,
3034 aarch64_emit_int_call_1,
3035 aarch64_emit_void_call_2,
3036 aarch64_emit_eq_goto,
3037 aarch64_emit_ne_goto,
3038 aarch64_emit_lt_goto,
3039 aarch64_emit_le_goto,
3040 aarch64_emit_gt_goto,
3041 aarch64_emit_ge_got,
3042};
3043
3044/* Implementation of linux_target_ops method "emit_ops". */
3045
3046static struct emit_ops *
3047aarch64_emit_ops (void)
3048{
3049 return &aarch64_emit_ops_impl;
3050}
3051
bb903df0
PL
3052/* Implementation of linux_target_ops method
3053 "get_min_fast_tracepoint_insn_len". */
3054
3055static int
3056aarch64_get_min_fast_tracepoint_insn_len (void)
3057{
3058 return 4;
3059}
3060
d1d0aea1
PL
3061/* Implementation of linux_target_ops method "supports_range_stepping". */
3062
3063static int
3064aarch64_supports_range_stepping (void)
3065{
3066 return 1;
3067}
3068
3ca4edb6 3069/* Implementation of target ops method "sw_breakpoint_from_kind". */
dd373349 3070
3ca4edb6
TBA
3071const gdb_byte *
3072aarch64_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349 3073{
17b1509a
YQ
3074 if (is_64bit_tdesc ())
3075 {
3076 *size = aarch64_breakpoint_len;
3077 return aarch64_breakpoint;
3078 }
3079 else
3080 return arm_sw_breakpoint_from_kind (kind, size);
3081}
3082
06250e4e 3083/* Implementation of target ops method "breakpoint_kind_from_pc". */
17b1509a 3084
06250e4e
TBA
3085int
3086aarch64_target::breakpoint_kind_from_pc (CORE_ADDR *pcptr)
17b1509a
YQ
3087{
3088 if (is_64bit_tdesc ())
3089 return aarch64_breakpoint_len;
3090 else
3091 return arm_breakpoint_kind_from_pc (pcptr);
3092}
3093
06250e4e 3094/* Implementation of the target ops method
17b1509a
YQ
3095 "breakpoint_kind_from_current_state". */
3096
06250e4e
TBA
3097int
3098aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
17b1509a
YQ
3099{
3100 if (is_64bit_tdesc ())
3101 return aarch64_breakpoint_len;
3102 else
3103 return arm_breakpoint_kind_from_current_state (pcptr);
dd373349
AT
3104}
3105
7d00775e
AT
3106/* Support for hardware single step. */
3107
3108static int
3109aarch64_supports_hardware_single_step (void)
3110{
3111 return 1;
3112}
3113
176eb98c
MS
3114struct linux_target_ops the_low_target =
3115{
421530db
PL
3116 NULL, /* collect_ptrace_register */
3117 NULL, /* supply_ptrace_register */
ade90bde 3118 aarch64_linux_siginfo_fixup,
176eb98c 3119 aarch64_linux_new_process,
04ec7890 3120 aarch64_linux_delete_process,
176eb98c 3121 aarch64_linux_new_thread,
466eecee 3122 aarch64_linux_delete_thread,
3a8a0396 3123 aarch64_linux_new_fork,
176eb98c 3124 aarch64_linux_prepare_to_resume,
421530db 3125 NULL, /* process_qsupported */
7671bf47 3126 aarch64_supports_tracepoints,
bb903df0
PL
3127 aarch64_get_thread_area,
3128 aarch64_install_fast_tracepoint_jump_pad,
afbe19f8 3129 aarch64_emit_ops,
bb903df0 3130 aarch64_get_min_fast_tracepoint_insn_len,
d1d0aea1 3131 aarch64_supports_range_stepping,
7d00775e 3132 aarch64_supports_hardware_single_step,
061fc021 3133 aarch64_get_syscall_trapinfo,
176eb98c 3134};
3aee8918 3135
ef0478f6
TBA
3136/* The linux target ops object. */
3137
3138linux_process_target *the_linux_target = &the_aarch64_target;
3139
3aee8918
PA
3140void
3141initialize_low_arch (void)
3142{
3b53ae99
YQ
3143 initialize_low_arch_aarch32 ();
3144
3aee8918 3145 initialize_regsets_info (&aarch64_regsets_info);
02895270 3146 initialize_regsets_info (&aarch64_sve_regsets_info);
3aee8918 3147}
This page took 1.006576 seconds and 4 git commands to generate.