gdbserver/linux-low: turn 'prepare_to_resume' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-aarch64-low.cc
CommitLineData
176eb98c
MS
1/* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
b811d2c2 4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
176eb98c
MS
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "server.h"
23#include "linux-low.h"
db3cb7cb 24#include "nat/aarch64-linux.h"
554717a3 25#include "nat/aarch64-linux-hw-point.h"
bb903df0 26#include "arch/aarch64-insn.h"
3b53ae99 27#include "linux-aarch32-low.h"
176eb98c 28#include "elf/common.h"
afbe19f8
PL
29#include "ax.h"
30#include "tracepoint.h"
f9d949fb 31#include "debug.h"
176eb98c
MS
32
33#include <signal.h>
34#include <sys/user.h>
5826e159 35#include "nat/gdb_ptrace.h"
e9dae05e 36#include <asm/ptrace.h>
bb903df0
PL
37#include <inttypes.h>
38#include <endian.h>
39#include <sys/uio.h>
176eb98c
MS
40
41#include "gdb_proc_service.h"
cc628f3d 42#include "arch/aarch64.h"
7cc17433 43#include "linux-aarch32-tdesc.h"
d6d7ce56 44#include "linux-aarch64-tdesc.h"
fefa175e 45#include "nat/aarch64-sve-linux-ptrace.h"
02895270 46#include "tdesc.h"
176eb98c 47
176eb98c
MS
48#ifdef HAVE_SYS_REG_H
49#include <sys/reg.h>
50#endif
51
ef0478f6
TBA
52/* Linux target op definitions for the AArch64 architecture. */
53
54class aarch64_target : public linux_process_target
55{
56public:
57
aa8d21c9
TBA
58 const regs_info *get_regs_info () override;
59
06250e4e
TBA
60 int breakpoint_kind_from_pc (CORE_ADDR *pcptr) override;
61
62 int breakpoint_kind_from_current_state (CORE_ADDR *pcptr) override;
63
3ca4edb6
TBA
64 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
65
007c9b97
TBA
66 bool supports_z_point_type (char z_type) override;
67
797bcff5
TBA
68protected:
69
70 void low_arch_setup () override;
daca57a7
TBA
71
72 bool low_cannot_fetch_register (int regno) override;
73
74 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
75
76 bool low_supports_breakpoints () override;
77
78 CORE_ADDR low_get_pc (regcache *regcache) override;
79
80 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
d7146cda
TBA
81
82 bool low_breakpoint_at (CORE_ADDR pc) override;
9db9aa23
TBA
83
84 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
85 int size, raw_breakpoint *bp) override;
86
87 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
88 int size, raw_breakpoint *bp) override;
ac1bbaca
TBA
89
90 bool low_stopped_by_watchpoint () override;
91
92 CORE_ADDR low_stopped_data_address () override;
cb63de7c
TBA
93
94 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
95 int direction) override;
fd000fb3
TBA
96
97 arch_process_info *low_new_process () override;
98
99 void low_delete_process (arch_process_info *info) override;
100
101 void low_new_thread (lwp_info *) override;
102
103 void low_delete_thread (arch_lwp_info *) override;
104
105 void low_new_fork (process_info *parent, process_info *child) override;
d7599cc0
TBA
106
107 void low_prepare_to_resume (lwp_info *lwp) override;
ef0478f6
TBA
108};
109
110/* The singleton target ops object. */
111
112static aarch64_target the_aarch64_target;
113
daca57a7
TBA
114bool
115aarch64_target::low_cannot_fetch_register (int regno)
116{
117 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
118 "is not implemented by the target");
119}
120
121bool
122aarch64_target::low_cannot_store_register (int regno)
123{
124 gdb_assert_not_reached ("linux target op low_cannot_store_register "
125 "is not implemented by the target");
126}
127
d7599cc0
TBA
128void
129aarch64_target::low_prepare_to_resume (lwp_info *lwp)
130{
131 aarch64_linux_prepare_to_resume (lwp);
132}
133
176eb98c
MS
134/* Per-process arch-specific data we want to keep. */
135
136struct arch_process_info
137{
138 /* Hardware breakpoint/watchpoint data.
139 The reason for them to be per-process rather than per-thread is
140 due to the lack of information in the gdbserver environment;
141 gdbserver is not told that whether a requested hardware
142 breakpoint/watchpoint is thread specific or not, so it has to set
143 each hw bp/wp for every thread in the current process. The
144 higher level bp/wp management in gdb will resume a thread if a hw
145 bp/wp trap is not expected for it. Since the hw bp/wp setting is
146 same for each thread, it is reasonable for the data to live here.
147 */
148 struct aarch64_debug_reg_state debug_reg_state;
149};
150
3b53ae99
YQ
151/* Return true if the size of register 0 is 8 byte. */
152
153static int
154is_64bit_tdesc (void)
155{
156 struct regcache *regcache = get_thread_regcache (current_thread, 0);
157
158 return register_size (regcache->tdesc, 0) == 8;
159}
160
02895270
AH
161/* Return true if the regcache contains the number of SVE registers. */
162
163static bool
164is_sve_tdesc (void)
165{
166 struct regcache *regcache = get_thread_regcache (current_thread, 0);
167
6cdd651f 168 return tdesc_contains_feature (regcache->tdesc, "org.gnu.gdb.aarch64.sve");
02895270
AH
169}
170
176eb98c
MS
171static void
172aarch64_fill_gregset (struct regcache *regcache, void *buf)
173{
6a69a054 174 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
176eb98c
MS
175 int i;
176
177 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
178 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
179 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
180 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
181 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
182}
183
184static void
185aarch64_store_gregset (struct regcache *regcache, const void *buf)
186{
6a69a054 187 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
176eb98c
MS
188 int i;
189
190 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
191 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
192 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
193 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
194 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
195}
196
197static void
198aarch64_fill_fpregset (struct regcache *regcache, void *buf)
199{
9caa3311 200 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
176eb98c
MS
201 int i;
202
203 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
204 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
205 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
206 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
207}
208
209static void
210aarch64_store_fpregset (struct regcache *regcache, const void *buf)
211{
9caa3311
YQ
212 const struct user_fpsimd_state *regset
213 = (const struct user_fpsimd_state *) buf;
176eb98c
MS
214 int i;
215
216 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
217 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
218 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
219 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
220}
221
1ef53e6b
AH
222/* Store the pauth registers to regcache. */
223
224static void
225aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
226{
227 uint64_t *pauth_regset = (uint64_t *) buf;
228 int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
229
230 if (pauth_base == 0)
231 return;
232
233 supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
234 &pauth_regset[0]);
235 supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
236 &pauth_regset[1]);
237}
238
bf9ae9d8
TBA
239bool
240aarch64_target::low_supports_breakpoints ()
241{
242 return true;
243}
244
245/* Implementation of linux target ops method "low_get_pc". */
421530db 246
bf9ae9d8
TBA
247CORE_ADDR
248aarch64_target::low_get_pc (regcache *regcache)
176eb98c 249{
8a7e4587 250 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 251 return linux_get_pc_64bit (regcache);
8a7e4587 252 else
a5652c21 253 return linux_get_pc_32bit (regcache);
176eb98c
MS
254}
255
bf9ae9d8 256/* Implementation of linux target ops method "low_set_pc". */
421530db 257
bf9ae9d8
TBA
258void
259aarch64_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
176eb98c 260{
8a7e4587 261 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 262 linux_set_pc_64bit (regcache, pc);
8a7e4587 263 else
a5652c21 264 linux_set_pc_32bit (regcache, pc);
176eb98c
MS
265}
266
176eb98c
MS
267#define aarch64_breakpoint_len 4
268
37d66942
PL
269/* AArch64 BRK software debug mode instruction.
270 This instruction needs to match gdb/aarch64-tdep.c
271 (aarch64_default_breakpoint). */
272static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
176eb98c 273
d7146cda 274/* Implementation of linux target ops method "low_breakpoint_at". */
421530db 275
d7146cda
TBA
276bool
277aarch64_target::low_breakpoint_at (CORE_ADDR where)
176eb98c 278{
db91f502
YQ
279 if (is_64bit_tdesc ())
280 {
281 gdb_byte insn[aarch64_breakpoint_len];
176eb98c 282
d7146cda 283 read_memory (where, (unsigned char *) &insn, aarch64_breakpoint_len);
db91f502 284 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
d7146cda 285 return true;
176eb98c 286
d7146cda 287 return false;
db91f502
YQ
288 }
289 else
290 return arm_breakpoint_at (where);
176eb98c
MS
291}
292
176eb98c
MS
293static void
294aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
295{
296 int i;
297
298 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
299 {
300 state->dr_addr_bp[i] = 0;
301 state->dr_ctrl_bp[i] = 0;
302 state->dr_ref_count_bp[i] = 0;
303 }
304
305 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
306 {
307 state->dr_addr_wp[i] = 0;
308 state->dr_ctrl_wp[i] = 0;
309 state->dr_ref_count_wp[i] = 0;
310 }
311}
312
176eb98c
MS
313/* Return the pointer to the debug register state structure in the
314 current process' arch-specific data area. */
315
db3cb7cb 316struct aarch64_debug_reg_state *
88e2cf7e 317aarch64_get_debug_reg_state (pid_t pid)
176eb98c 318{
88e2cf7e 319 struct process_info *proc = find_process_pid (pid);
176eb98c 320
fe978cb0 321 return &proc->priv->arch_private->debug_reg_state;
176eb98c
MS
322}
323
007c9b97 324/* Implementation of target ops method "supports_z_point_type". */
421530db 325
007c9b97
TBA
326bool
327aarch64_target::supports_z_point_type (char z_type)
4ff0d3d8
PA
328{
329 switch (z_type)
330 {
96c97461 331 case Z_PACKET_SW_BP:
4ff0d3d8
PA
332 case Z_PACKET_HW_BP:
333 case Z_PACKET_WRITE_WP:
334 case Z_PACKET_READ_WP:
335 case Z_PACKET_ACCESS_WP:
007c9b97 336 return true;
4ff0d3d8 337 default:
007c9b97 338 return false;
4ff0d3d8
PA
339 }
340}
341
9db9aa23 342/* Implementation of linux target ops method "low_insert_point".
176eb98c 343
421530db
PL
344 It actually only records the info of the to-be-inserted bp/wp;
345 the actual insertion will happen when threads are resumed. */
176eb98c 346
9db9aa23
TBA
347int
348aarch64_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
349 int len, raw_breakpoint *bp)
176eb98c
MS
350{
351 int ret;
4ff0d3d8 352 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
353 struct aarch64_debug_reg_state *state
354 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 355
c5e92cca 356 if (show_debug_regs)
176eb98c
MS
357 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
358 (unsigned long) addr, len);
359
802e8e6d
PA
360 /* Determine the type from the raw breakpoint type. */
361 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
362
363 if (targ_type != hw_execute)
39edd165
YQ
364 {
365 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
366 ret = aarch64_handle_watchpoint (targ_type, addr, len,
367 1 /* is_insert */, state);
368 else
369 ret = -1;
370 }
176eb98c 371 else
8d689ee5
YQ
372 {
373 if (len == 3)
374 {
375 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
376 instruction. Set it to 2 to correctly encode length bit
377 mask in hardware/watchpoint control register. */
378 len = 2;
379 }
380 ret = aarch64_handle_breakpoint (targ_type, addr, len,
381 1 /* is_insert */, state);
382 }
176eb98c 383
60a191ed 384 if (show_debug_regs)
88e2cf7e
YQ
385 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
386 targ_type);
176eb98c
MS
387
388 return ret;
389}
390
9db9aa23 391/* Implementation of linux target ops method "low_remove_point".
176eb98c 392
421530db
PL
393 It actually only records the info of the to-be-removed bp/wp,
394 the actual removal will be done when threads are resumed. */
176eb98c 395
9db9aa23
TBA
396int
397aarch64_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
398 int len, raw_breakpoint *bp)
176eb98c
MS
399{
400 int ret;
4ff0d3d8 401 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
402 struct aarch64_debug_reg_state *state
403 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 404
c5e92cca 405 if (show_debug_regs)
176eb98c
MS
406 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
407 (unsigned long) addr, len);
408
802e8e6d
PA
409 /* Determine the type from the raw breakpoint type. */
410 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
411
412 /* Set up state pointers. */
413 if (targ_type != hw_execute)
414 ret =
c67ca4de
YQ
415 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
416 state);
176eb98c 417 else
8d689ee5
YQ
418 {
419 if (len == 3)
420 {
421 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
422 instruction. Set it to 2 to correctly encode length bit
423 mask in hardware/watchpoint control register. */
424 len = 2;
425 }
426 ret = aarch64_handle_breakpoint (targ_type, addr, len,
427 0 /* is_insert */, state);
428 }
176eb98c 429
60a191ed 430 if (show_debug_regs)
88e2cf7e
YQ
431 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
432 targ_type);
176eb98c
MS
433
434 return ret;
435}
436
ac1bbaca 437/* Implementation of linux target ops method "low_stopped_data_address". */
176eb98c 438
ac1bbaca
TBA
439CORE_ADDR
440aarch64_target::low_stopped_data_address ()
176eb98c
MS
441{
442 siginfo_t siginfo;
443 int pid, i;
444 struct aarch64_debug_reg_state *state;
445
0bfdf32f 446 pid = lwpid_of (current_thread);
176eb98c
MS
447
448 /* Get the siginfo. */
449 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
450 return (CORE_ADDR) 0;
451
452 /* Need to be a hardware breakpoint/watchpoint trap. */
453 if (siginfo.si_signo != SIGTRAP
454 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
455 return (CORE_ADDR) 0;
456
457 /* Check if the address matches any watched address. */
88e2cf7e 458 state = aarch64_get_debug_reg_state (pid_of (current_thread));
176eb98c
MS
459 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
460 {
a3b60e45
JK
461 const unsigned int offset
462 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
176eb98c
MS
463 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
464 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
a3b60e45
JK
465 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
466 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
467 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
468
176eb98c
MS
469 if (state->dr_ref_count_wp[i]
470 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
a3b60e45 471 && addr_trap >= addr_watch_aligned
176eb98c 472 && addr_trap < addr_watch + len)
a3b60e45
JK
473 {
474 /* ADDR_TRAP reports the first address of the memory range
475 accessed by the CPU, regardless of what was the memory
476 range watched. Thus, a large CPU access that straddles
477 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
478 ADDR_TRAP that is lower than the
479 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
480
481 addr: | 4 | 5 | 6 | 7 | 8 |
482 |---- range watched ----|
483 |----------- range accessed ------------|
484
485 In this case, ADDR_TRAP will be 4.
486
487 To match a watchpoint known to GDB core, we must never
488 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
489 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
490 positive on kernels older than 4.10. See PR
491 external/20207. */
492 return addr_orig;
493 }
176eb98c
MS
494 }
495
496 return (CORE_ADDR) 0;
497}
498
ac1bbaca 499/* Implementation of linux target ops method "low_stopped_by_watchpoint". */
176eb98c 500
ac1bbaca
TBA
501bool
502aarch64_target::low_stopped_by_watchpoint ()
176eb98c 503{
ac1bbaca 504 return (low_stopped_data_address () != 0);
176eb98c
MS
505}
506
507/* Fetch the thread-local storage pointer for libthread_db. */
508
509ps_err_e
754653a7 510ps_get_thread_area (struct ps_prochandle *ph,
176eb98c
MS
511 lwpid_t lwpid, int idx, void **base)
512{
a0cc84cd
YQ
513 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
514 is_64bit_tdesc ());
176eb98c
MS
515}
516
cb63de7c 517/* Implementation of linux target ops method "low_siginfo_fixup". */
ade90bde 518
cb63de7c
TBA
519bool
520aarch64_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
521 int direction)
ade90bde
YQ
522{
523 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
524 if (!is_64bit_tdesc ())
525 {
526 if (direction == 0)
527 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
528 native);
529 else
530 aarch64_siginfo_from_compat_siginfo (native,
531 (struct compat_siginfo *) inf);
532
cb63de7c 533 return true;
ade90bde
YQ
534 }
535
cb63de7c 536 return false;
ade90bde
YQ
537}
538
fd000fb3 539/* Implementation of linux target ops method "low_new_process". */
176eb98c 540
fd000fb3
TBA
541arch_process_info *
542aarch64_target::low_new_process ()
176eb98c 543{
8d749320 544 struct arch_process_info *info = XCNEW (struct arch_process_info);
176eb98c
MS
545
546 aarch64_init_debug_reg_state (&info->debug_reg_state);
547
548 return info;
549}
550
fd000fb3 551/* Implementation of linux target ops method "low_delete_process". */
04ec7890 552
fd000fb3
TBA
553void
554aarch64_target::low_delete_process (arch_process_info *info)
04ec7890
SM
555{
556 xfree (info);
557}
558
fd000fb3
TBA
559void
560aarch64_target::low_new_thread (lwp_info *lwp)
561{
562 aarch64_linux_new_thread (lwp);
563}
421530db 564
fd000fb3
TBA
565void
566aarch64_target::low_delete_thread (arch_lwp_info *arch_lwp)
567{
568 aarch64_linux_delete_thread (arch_lwp);
569}
570
571/* Implementation of linux target ops method "low_new_fork". */
572
573void
574aarch64_target::low_new_fork (process_info *parent,
575 process_info *child)
3a8a0396
DB
576{
577 /* These are allocated by linux_add_process. */
61a7418c
DB
578 gdb_assert (parent->priv != NULL
579 && parent->priv->arch_private != NULL);
580 gdb_assert (child->priv != NULL
581 && child->priv->arch_private != NULL);
3a8a0396
DB
582
583 /* Linux kernel before 2.6.33 commit
584 72f674d203cd230426437cdcf7dd6f681dad8b0d
585 will inherit hardware debug registers from parent
586 on fork/vfork/clone. Newer Linux kernels create such tasks with
587 zeroed debug registers.
588
589 GDB core assumes the child inherits the watchpoints/hw
590 breakpoints of the parent, and will remove them all from the
591 forked off process. Copy the debug registers mirrors into the
592 new process so that all breakpoints and watchpoints can be
593 removed together. The debug registers mirror will become zeroed
594 in the end before detaching the forked off process, thus making
595 this compatible with older Linux kernels too. */
596
61a7418c 597 *child->priv->arch_private = *parent->priv->arch_private;
3a8a0396
DB
598}
599
ee4fbcfa
AH
600/* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
601#define AARCH64_HWCAP_PACA (1 << 30)
602
797bcff5 603/* Implementation of linux target ops method "low_arch_setup". */
3b53ae99 604
797bcff5
TBA
605void
606aarch64_target::low_arch_setup ()
3b53ae99
YQ
607{
608 unsigned int machine;
609 int is_elf64;
610 int tid;
611
612 tid = lwpid_of (current_thread);
613
614 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
615
616 if (is_elf64)
fefa175e
AH
617 {
618 uint64_t vq = aarch64_sve_get_vq (tid);
974c89e0
AH
619 unsigned long hwcap = linux_get_hwcap (8);
620 bool pauth_p = hwcap & AARCH64_HWCAP_PACA;
ee4fbcfa
AH
621
622 current_process ()->tdesc = aarch64_linux_read_description (vq, pauth_p);
fefa175e 623 }
3b53ae99 624 else
7cc17433 625 current_process ()->tdesc = aarch32_linux_read_description ();
176eb98c 626
af1b22f3 627 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
176eb98c
MS
628}
629
02895270
AH
630/* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
631
632static void
633aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
634{
635 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
636}
637
638/* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
639
640static void
641aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
642{
643 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
644}
645
3aee8918 646static struct regset_info aarch64_regsets[] =
176eb98c
MS
647{
648 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
649 sizeof (struct user_pt_regs), GENERAL_REGS,
650 aarch64_fill_gregset, aarch64_store_gregset },
651 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
652 sizeof (struct user_fpsimd_state), FP_REGS,
653 aarch64_fill_fpregset, aarch64_store_fpregset
654 },
1ef53e6b
AH
655 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
656 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
657 NULL, aarch64_store_pauthregset },
50bc912a 658 NULL_REGSET
176eb98c
MS
659};
660
3aee8918
PA
661static struct regsets_info aarch64_regsets_info =
662 {
663 aarch64_regsets, /* regsets */
664 0, /* num_regsets */
665 NULL, /* disabled_regsets */
666 };
667
3b53ae99 668static struct regs_info regs_info_aarch64 =
3aee8918
PA
669 {
670 NULL, /* regset_bitmap */
c2d65f38 671 NULL, /* usrregs */
3aee8918
PA
672 &aarch64_regsets_info,
673 };
674
02895270
AH
675static struct regset_info aarch64_sve_regsets[] =
676{
677 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
678 sizeof (struct user_pt_regs), GENERAL_REGS,
679 aarch64_fill_gregset, aarch64_store_gregset },
680 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
681 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
682 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
683 },
1ef53e6b
AH
684 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
685 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
686 NULL, aarch64_store_pauthregset },
02895270
AH
687 NULL_REGSET
688};
689
690static struct regsets_info aarch64_sve_regsets_info =
691 {
692 aarch64_sve_regsets, /* regsets. */
693 0, /* num_regsets. */
694 NULL, /* disabled_regsets. */
695 };
696
697static struct regs_info regs_info_aarch64_sve =
698 {
699 NULL, /* regset_bitmap. */
700 NULL, /* usrregs. */
701 &aarch64_sve_regsets_info,
702 };
703
aa8d21c9 704/* Implementation of linux target ops method "get_regs_info". */
421530db 705
aa8d21c9
TBA
706const regs_info *
707aarch64_target::get_regs_info ()
3aee8918 708{
02895270 709 if (!is_64bit_tdesc ())
3b53ae99 710 return &regs_info_aarch32;
02895270
AH
711
712 if (is_sve_tdesc ())
713 return &regs_info_aarch64_sve;
714
715 return &regs_info_aarch64;
3aee8918
PA
716}
717
7671bf47
PL
718/* Implementation of linux_target_ops method "supports_tracepoints". */
719
720static int
721aarch64_supports_tracepoints (void)
722{
524b57e6
YQ
723 if (current_thread == NULL)
724 return 1;
725 else
726 {
727 /* We don't support tracepoints on aarch32 now. */
728 return is_64bit_tdesc ();
729 }
7671bf47
PL
730}
731
bb903df0
PL
732/* Implementation of linux_target_ops method "get_thread_area". */
733
734static int
735aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
736{
737 struct iovec iovec;
738 uint64_t reg;
739
740 iovec.iov_base = &reg;
741 iovec.iov_len = sizeof (reg);
742
743 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
744 return -1;
745
746 *addrp = reg;
747
748 return 0;
749}
750
061fc021
YQ
751/* Implementation of linux_target_ops method "get_syscall_trapinfo". */
752
753static void
754aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
755{
756 int use_64bit = register_size (regcache->tdesc, 0) == 8;
757
758 if (use_64bit)
759 {
760 long l_sysno;
761
762 collect_register_by_name (regcache, "x8", &l_sysno);
763 *sysno = (int) l_sysno;
764 }
765 else
766 collect_register_by_name (regcache, "r7", sysno);
767}
768
afbe19f8
PL
769/* List of condition codes that we need. */
770
771enum aarch64_condition_codes
772{
773 EQ = 0x0,
774 NE = 0x1,
775 LO = 0x3,
776 GE = 0xa,
777 LT = 0xb,
778 GT = 0xc,
779 LE = 0xd,
bb903df0
PL
780};
781
6c1c9a8b
YQ
782enum aarch64_operand_type
783{
784 OPERAND_IMMEDIATE,
785 OPERAND_REGISTER,
786};
787
bb903df0
PL
788/* Representation of an operand. At this time, it only supports register
789 and immediate types. */
790
791struct aarch64_operand
792{
793 /* Type of the operand. */
6c1c9a8b
YQ
794 enum aarch64_operand_type type;
795
bb903df0
PL
796 /* Value of the operand according to the type. */
797 union
798 {
799 uint32_t imm;
800 struct aarch64_register reg;
801 };
802};
803
804/* List of registers that we are currently using, we can add more here as
805 we need to use them. */
806
807/* General purpose scratch registers (64 bit). */
808static const struct aarch64_register x0 = { 0, 1 };
809static const struct aarch64_register x1 = { 1, 1 };
810static const struct aarch64_register x2 = { 2, 1 };
811static const struct aarch64_register x3 = { 3, 1 };
812static const struct aarch64_register x4 = { 4, 1 };
813
814/* General purpose scratch registers (32 bit). */
afbe19f8 815static const struct aarch64_register w0 = { 0, 0 };
bb903df0
PL
816static const struct aarch64_register w2 = { 2, 0 };
817
818/* Intra-procedure scratch registers. */
819static const struct aarch64_register ip0 = { 16, 1 };
820
821/* Special purpose registers. */
afbe19f8
PL
822static const struct aarch64_register fp = { 29, 1 };
823static const struct aarch64_register lr = { 30, 1 };
bb903df0
PL
824static const struct aarch64_register sp = { 31, 1 };
825static const struct aarch64_register xzr = { 31, 1 };
826
827/* Dynamically allocate a new register. If we know the register
828 statically, we should make it a global as above instead of using this
829 helper function. */
830
831static struct aarch64_register
832aarch64_register (unsigned num, int is64)
833{
834 return (struct aarch64_register) { num, is64 };
835}
836
837/* Helper function to create a register operand, for instructions with
838 different types of operands.
839
840 For example:
841 p += emit_mov (p, x0, register_operand (x1)); */
842
843static struct aarch64_operand
844register_operand (struct aarch64_register reg)
845{
846 struct aarch64_operand operand;
847
848 operand.type = OPERAND_REGISTER;
849 operand.reg = reg;
850
851 return operand;
852}
853
854/* Helper function to create an immediate operand, for instructions with
855 different types of operands.
856
857 For example:
858 p += emit_mov (p, x0, immediate_operand (12)); */
859
860static struct aarch64_operand
861immediate_operand (uint32_t imm)
862{
863 struct aarch64_operand operand;
864
865 operand.type = OPERAND_IMMEDIATE;
866 operand.imm = imm;
867
868 return operand;
869}
870
bb903df0
PL
871/* Helper function to create an offset memory operand.
872
873 For example:
874 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
875
876static struct aarch64_memory_operand
877offset_memory_operand (int32_t offset)
878{
879 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
880}
881
882/* Helper function to create a pre-index memory operand.
883
884 For example:
885 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
886
887static struct aarch64_memory_operand
888preindex_memory_operand (int32_t index)
889{
890 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
891}
892
afbe19f8
PL
893/* Helper function to create a post-index memory operand.
894
895 For example:
896 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
897
898static struct aarch64_memory_operand
899postindex_memory_operand (int32_t index)
900{
901 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
902}
903
bb903df0
PL
904/* System control registers. These special registers can be written and
905 read with the MRS and MSR instructions.
906
907 - NZCV: Condition flags. GDB refers to this register under the CPSR
908 name.
909 - FPSR: Floating-point status register.
910 - FPCR: Floating-point control registers.
911 - TPIDR_EL0: Software thread ID register. */
912
913enum aarch64_system_control_registers
914{
915 /* op0 op1 crn crm op2 */
916 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
917 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
918 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
919 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
920};
921
bb903df0
PL
922/* Write a BLR instruction into *BUF.
923
924 BLR rn
925
926 RN is the register to branch to. */
927
928static int
929emit_blr (uint32_t *buf, struct aarch64_register rn)
930{
e1c587c3 931 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
bb903df0
PL
932}
933
afbe19f8 934/* Write a RET instruction into *BUF.
bb903df0 935
afbe19f8 936 RET xn
bb903df0 937
afbe19f8 938 RN is the register to branch to. */
bb903df0
PL
939
940static int
afbe19f8
PL
941emit_ret (uint32_t *buf, struct aarch64_register rn)
942{
e1c587c3 943 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
afbe19f8
PL
944}
945
946static int
947emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
948 struct aarch64_register rt,
949 struct aarch64_register rt2,
950 struct aarch64_register rn,
951 struct aarch64_memory_operand operand)
bb903df0
PL
952{
953 uint32_t opc;
954 uint32_t pre_index;
955 uint32_t write_back;
956
957 if (rt.is64)
958 opc = ENCODE (2, 2, 30);
959 else
960 opc = ENCODE (0, 2, 30);
961
962 switch (operand.type)
963 {
964 case MEMORY_OPERAND_OFFSET:
965 {
966 pre_index = ENCODE (1, 1, 24);
967 write_back = ENCODE (0, 1, 23);
968 break;
969 }
afbe19f8
PL
970 case MEMORY_OPERAND_POSTINDEX:
971 {
972 pre_index = ENCODE (0, 1, 24);
973 write_back = ENCODE (1, 1, 23);
974 break;
975 }
bb903df0
PL
976 case MEMORY_OPERAND_PREINDEX:
977 {
978 pre_index = ENCODE (1, 1, 24);
979 write_back = ENCODE (1, 1, 23);
980 break;
981 }
982 default:
983 return 0;
984 }
985
e1c587c3
YQ
986 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
987 | ENCODE (operand.index >> 3, 7, 15)
988 | ENCODE (rt2.num, 5, 10)
989 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
990}
991
afbe19f8
PL
992/* Write a STP instruction into *BUF.
993
994 STP rt, rt2, [rn, #offset]
995 STP rt, rt2, [rn, #index]!
996 STP rt, rt2, [rn], #index
997
998 RT and RT2 are the registers to store.
999 RN is the base address register.
1000 OFFSET is the immediate to add to the base address. It is limited to a
1001 -512 .. 504 range (7 bits << 3). */
1002
1003static int
1004emit_stp (uint32_t *buf, struct aarch64_register rt,
1005 struct aarch64_register rt2, struct aarch64_register rn,
1006 struct aarch64_memory_operand operand)
1007{
1008 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
1009}
1010
1011/* Write a LDP instruction into *BUF.
1012
1013 LDP rt, rt2, [rn, #offset]
1014 LDP rt, rt2, [rn, #index]!
1015 LDP rt, rt2, [rn], #index
1016
1017 RT and RT2 are the registers to store.
1018 RN is the base address register.
1019 OFFSET is the immediate to add to the base address. It is limited to a
1020 -512 .. 504 range (7 bits << 3). */
1021
1022static int
1023emit_ldp (uint32_t *buf, struct aarch64_register rt,
1024 struct aarch64_register rt2, struct aarch64_register rn,
1025 struct aarch64_memory_operand operand)
1026{
1027 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
1028}
1029
bb903df0
PL
1030/* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
1031
1032 LDP qt, qt2, [rn, #offset]
1033
1034 RT and RT2 are the Q registers to store.
1035 RN is the base address register.
1036 OFFSET is the immediate to add to the base address. It is limited to
1037 -1024 .. 1008 range (7 bits << 4). */
1038
1039static int
1040emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1041 struct aarch64_register rn, int32_t offset)
1042{
1043 uint32_t opc = ENCODE (2, 2, 30);
1044 uint32_t pre_index = ENCODE (1, 1, 24);
1045
e1c587c3
YQ
1046 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
1047 | ENCODE (offset >> 4, 7, 15)
1048 | ENCODE (rt2, 5, 10)
1049 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1050}
1051
1052/* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1053
1054 STP qt, qt2, [rn, #offset]
1055
1056 RT and RT2 are the Q registers to store.
1057 RN is the base address register.
1058 OFFSET is the immediate to add to the base address. It is limited to
1059 -1024 .. 1008 range (7 bits << 4). */
1060
1061static int
1062emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1063 struct aarch64_register rn, int32_t offset)
1064{
1065 uint32_t opc = ENCODE (2, 2, 30);
1066 uint32_t pre_index = ENCODE (1, 1, 24);
1067
e1c587c3 1068 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
b6542f81
YQ
1069 | ENCODE (offset >> 4, 7, 15)
1070 | ENCODE (rt2, 5, 10)
1071 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1072}
1073
afbe19f8
PL
1074/* Write a LDRH instruction into *BUF.
1075
1076 LDRH wt, [xn, #offset]
1077 LDRH wt, [xn, #index]!
1078 LDRH wt, [xn], #index
1079
1080 RT is the register to store.
1081 RN is the base address register.
1082 OFFSET is the immediate to add to the base address. It is limited to
1083 0 .. 32760 range (12 bits << 3). */
1084
1085static int
1086emit_ldrh (uint32_t *buf, struct aarch64_register rt,
1087 struct aarch64_register rn,
1088 struct aarch64_memory_operand operand)
1089{
1c2e1515 1090 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
afbe19f8
PL
1091}
1092
1093/* Write a LDRB instruction into *BUF.
1094
1095 LDRB wt, [xn, #offset]
1096 LDRB wt, [xn, #index]!
1097 LDRB wt, [xn], #index
1098
1099 RT is the register to store.
1100 RN is the base address register.
1101 OFFSET is the immediate to add to the base address. It is limited to
1102 0 .. 32760 range (12 bits << 3). */
1103
1104static int
1105emit_ldrb (uint32_t *buf, struct aarch64_register rt,
1106 struct aarch64_register rn,
1107 struct aarch64_memory_operand operand)
1108{
1c2e1515 1109 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
afbe19f8
PL
1110}
1111
bb903df0 1112
bb903df0
PL
1113
1114/* Write a STR instruction into *BUF.
1115
1116 STR rt, [rn, #offset]
1117 STR rt, [rn, #index]!
afbe19f8 1118 STR rt, [rn], #index
bb903df0
PL
1119
1120 RT is the register to store.
1121 RN is the base address register.
1122 OFFSET is the immediate to add to the base address. It is limited to
1123 0 .. 32760 range (12 bits << 3). */
1124
1125static int
1126emit_str (uint32_t *buf, struct aarch64_register rt,
1127 struct aarch64_register rn,
1128 struct aarch64_memory_operand operand)
1129{
1c2e1515 1130 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
bb903df0
PL
1131}
1132
1133/* Helper function emitting an exclusive load or store instruction. */
1134
1135static int
1136emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1137 enum aarch64_opcodes opcode,
1138 struct aarch64_register rs,
1139 struct aarch64_register rt,
1140 struct aarch64_register rt2,
1141 struct aarch64_register rn)
1142{
e1c587c3
YQ
1143 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1144 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1145 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
1146}
1147
1148/* Write a LAXR instruction into *BUF.
1149
1150 LDAXR rt, [xn]
1151
1152 RT is the destination register.
1153 RN is the base address register. */
1154
1155static int
1156emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1157 struct aarch64_register rn)
1158{
1159 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1160 xzr, rn);
1161}
1162
1163/* Write a STXR instruction into *BUF.
1164
1165 STXR ws, rt, [xn]
1166
1167 RS is the result register, it indicates if the store succeeded or not.
1168 RT is the destination register.
1169 RN is the base address register. */
1170
1171static int
1172emit_stxr (uint32_t *buf, struct aarch64_register rs,
1173 struct aarch64_register rt, struct aarch64_register rn)
1174{
1175 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1176 xzr, rn);
1177}
1178
1179/* Write a STLR instruction into *BUF.
1180
1181 STLR rt, [xn]
1182
1183 RT is the register to store.
1184 RN is the base address register. */
1185
1186static int
1187emit_stlr (uint32_t *buf, struct aarch64_register rt,
1188 struct aarch64_register rn)
1189{
1190 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1191 xzr, rn);
1192}
1193
1194/* Helper function for data processing instructions with register sources. */
1195
1196static int
231c0592 1197emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
bb903df0
PL
1198 struct aarch64_register rd,
1199 struct aarch64_register rn,
1200 struct aarch64_register rm)
1201{
1202 uint32_t size = ENCODE (rd.is64, 1, 31);
1203
e1c587c3
YQ
1204 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1205 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1206}
1207
1208/* Helper function for data processing instructions taking either a register
1209 or an immediate. */
1210
1211static int
1212emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1213 struct aarch64_register rd,
1214 struct aarch64_register rn,
1215 struct aarch64_operand operand)
1216{
1217 uint32_t size = ENCODE (rd.is64, 1, 31);
1218 /* The opcode is different for register and immediate source operands. */
1219 uint32_t operand_opcode;
1220
1221 if (operand.type == OPERAND_IMMEDIATE)
1222 {
1223 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1224 operand_opcode = ENCODE (8, 4, 25);
1225
e1c587c3
YQ
1226 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1227 | ENCODE (operand.imm, 12, 10)
1228 | ENCODE (rn.num, 5, 5)
1229 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1230 }
1231 else
1232 {
1233 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1234 operand_opcode = ENCODE (5, 4, 25);
1235
1236 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1237 rn, operand.reg);
1238 }
1239}
1240
1241/* Write an ADD instruction into *BUF.
1242
1243 ADD rd, rn, #imm
1244 ADD rd, rn, rm
1245
1246 This function handles both an immediate and register add.
1247
1248 RD is the destination register.
1249 RN is the input register.
1250 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1251 OPERAND_REGISTER. */
1252
1253static int
1254emit_add (uint32_t *buf, struct aarch64_register rd,
1255 struct aarch64_register rn, struct aarch64_operand operand)
1256{
1257 return emit_data_processing (buf, ADD, rd, rn, operand);
1258}
1259
1260/* Write a SUB instruction into *BUF.
1261
1262 SUB rd, rn, #imm
1263 SUB rd, rn, rm
1264
1265 This function handles both an immediate and register sub.
1266
1267 RD is the destination register.
1268 RN is the input register.
1269 IMM is the immediate to substract to RN. */
1270
1271static int
1272emit_sub (uint32_t *buf, struct aarch64_register rd,
1273 struct aarch64_register rn, struct aarch64_operand operand)
1274{
1275 return emit_data_processing (buf, SUB, rd, rn, operand);
1276}
1277
1278/* Write a MOV instruction into *BUF.
1279
1280 MOV rd, #imm
1281 MOV rd, rm
1282
1283 This function handles both a wide immediate move and a register move,
1284 with the condition that the source register is not xzr. xzr and the
1285 stack pointer share the same encoding and this function only supports
1286 the stack pointer.
1287
1288 RD is the destination register.
1289 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1290 OPERAND_REGISTER. */
1291
1292static int
1293emit_mov (uint32_t *buf, struct aarch64_register rd,
1294 struct aarch64_operand operand)
1295{
1296 if (operand.type == OPERAND_IMMEDIATE)
1297 {
1298 uint32_t size = ENCODE (rd.is64, 1, 31);
1299 /* Do not shift the immediate. */
1300 uint32_t shift = ENCODE (0, 2, 21);
1301
e1c587c3
YQ
1302 return aarch64_emit_insn (buf, MOV | size | shift
1303 | ENCODE (operand.imm, 16, 5)
1304 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1305 }
1306 else
1307 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1308}
1309
1310/* Write a MOVK instruction into *BUF.
1311
1312 MOVK rd, #imm, lsl #shift
1313
1314 RD is the destination register.
1315 IMM is the immediate.
1316 SHIFT is the logical shift left to apply to IMM. */
1317
1318static int
7781c06f
YQ
1319emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1320 unsigned shift)
bb903df0
PL
1321{
1322 uint32_t size = ENCODE (rd.is64, 1, 31);
1323
e1c587c3
YQ
1324 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1325 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1326}
1327
1328/* Write instructions into *BUF in order to move ADDR into a register.
1329 ADDR can be a 64-bit value.
1330
1331 This function will emit a series of MOV and MOVK instructions, such as:
1332
1333 MOV xd, #(addr)
1334 MOVK xd, #(addr >> 16), lsl #16
1335 MOVK xd, #(addr >> 32), lsl #32
1336 MOVK xd, #(addr >> 48), lsl #48 */
1337
1338static int
1339emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1340{
1341 uint32_t *p = buf;
1342
1343 /* The MOV (wide immediate) instruction clears to top bits of the
1344 register. */
1345 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1346
1347 if ((addr >> 16) != 0)
1348 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1349 else
1350 return p - buf;
1351
1352 if ((addr >> 32) != 0)
1353 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1354 else
1355 return p - buf;
1356
1357 if ((addr >> 48) != 0)
1358 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1359
1360 return p - buf;
1361}
1362
afbe19f8
PL
1363/* Write a SUBS instruction into *BUF.
1364
1365 SUBS rd, rn, rm
1366
1367 This instruction update the condition flags.
1368
1369 RD is the destination register.
1370 RN and RM are the source registers. */
1371
1372static int
1373emit_subs (uint32_t *buf, struct aarch64_register rd,
1374 struct aarch64_register rn, struct aarch64_operand operand)
1375{
1376 return emit_data_processing (buf, SUBS, rd, rn, operand);
1377}
1378
1379/* Write a CMP instruction into *BUF.
1380
1381 CMP rn, rm
1382
1383 This instruction is an alias of SUBS xzr, rn, rm.
1384
1385 RN and RM are the registers to compare. */
1386
1387static int
1388emit_cmp (uint32_t *buf, struct aarch64_register rn,
1389 struct aarch64_operand operand)
1390{
1391 return emit_subs (buf, xzr, rn, operand);
1392}
1393
1394/* Write a AND instruction into *BUF.
1395
1396 AND rd, rn, rm
1397
1398 RD is the destination register.
1399 RN and RM are the source registers. */
1400
1401static int
1402emit_and (uint32_t *buf, struct aarch64_register rd,
1403 struct aarch64_register rn, struct aarch64_register rm)
1404{
1405 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1406}
1407
1408/* Write a ORR instruction into *BUF.
1409
1410 ORR rd, rn, rm
1411
1412 RD is the destination register.
1413 RN and RM are the source registers. */
1414
1415static int
1416emit_orr (uint32_t *buf, struct aarch64_register rd,
1417 struct aarch64_register rn, struct aarch64_register rm)
1418{
1419 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1420}
1421
1422/* Write a ORN instruction into *BUF.
1423
1424 ORN rd, rn, rm
1425
1426 RD is the destination register.
1427 RN and RM are the source registers. */
1428
1429static int
1430emit_orn (uint32_t *buf, struct aarch64_register rd,
1431 struct aarch64_register rn, struct aarch64_register rm)
1432{
1433 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1434}
1435
1436/* Write a EOR instruction into *BUF.
1437
1438 EOR rd, rn, rm
1439
1440 RD is the destination register.
1441 RN and RM are the source registers. */
1442
1443static int
1444emit_eor (uint32_t *buf, struct aarch64_register rd,
1445 struct aarch64_register rn, struct aarch64_register rm)
1446{
1447 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1448}
1449
1450/* Write a MVN instruction into *BUF.
1451
1452 MVN rd, rm
1453
1454 This is an alias for ORN rd, xzr, rm.
1455
1456 RD is the destination register.
1457 RM is the source register. */
1458
1459static int
1460emit_mvn (uint32_t *buf, struct aarch64_register rd,
1461 struct aarch64_register rm)
1462{
1463 return emit_orn (buf, rd, xzr, rm);
1464}
1465
1466/* Write a LSLV instruction into *BUF.
1467
1468 LSLV rd, rn, rm
1469
1470 RD is the destination register.
1471 RN and RM are the source registers. */
1472
1473static int
1474emit_lslv (uint32_t *buf, struct aarch64_register rd,
1475 struct aarch64_register rn, struct aarch64_register rm)
1476{
1477 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1478}
1479
1480/* Write a LSRV instruction into *BUF.
1481
1482 LSRV rd, rn, rm
1483
1484 RD is the destination register.
1485 RN and RM are the source registers. */
1486
1487static int
1488emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1489 struct aarch64_register rn, struct aarch64_register rm)
1490{
1491 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1492}
1493
1494/* Write a ASRV instruction into *BUF.
1495
1496 ASRV rd, rn, rm
1497
1498 RD is the destination register.
1499 RN and RM are the source registers. */
1500
1501static int
1502emit_asrv (uint32_t *buf, struct aarch64_register rd,
1503 struct aarch64_register rn, struct aarch64_register rm)
1504{
1505 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1506}
1507
1508/* Write a MUL instruction into *BUF.
1509
1510 MUL rd, rn, rm
1511
1512 RD is the destination register.
1513 RN and RM are the source registers. */
1514
1515static int
1516emit_mul (uint32_t *buf, struct aarch64_register rd,
1517 struct aarch64_register rn, struct aarch64_register rm)
1518{
1519 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1520}
1521
bb903df0
PL
1522/* Write a MRS instruction into *BUF. The register size is 64-bit.
1523
1524 MRS xt, system_reg
1525
1526 RT is the destination register.
1527 SYSTEM_REG is special purpose register to read. */
1528
1529static int
1530emit_mrs (uint32_t *buf, struct aarch64_register rt,
1531 enum aarch64_system_control_registers system_reg)
1532{
e1c587c3
YQ
1533 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1534 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1535}
1536
1537/* Write a MSR instruction into *BUF. The register size is 64-bit.
1538
1539 MSR system_reg, xt
1540
1541 SYSTEM_REG is special purpose register to write.
1542 RT is the input register. */
1543
1544static int
1545emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1546 struct aarch64_register rt)
1547{
e1c587c3
YQ
1548 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1549 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1550}
1551
1552/* Write a SEVL instruction into *BUF.
1553
1554 This is a hint instruction telling the hardware to trigger an event. */
1555
1556static int
1557emit_sevl (uint32_t *buf)
1558{
e1c587c3 1559 return aarch64_emit_insn (buf, SEVL);
bb903df0
PL
1560}
1561
1562/* Write a WFE instruction into *BUF.
1563
1564 This is a hint instruction telling the hardware to wait for an event. */
1565
1566static int
1567emit_wfe (uint32_t *buf)
1568{
e1c587c3 1569 return aarch64_emit_insn (buf, WFE);
bb903df0
PL
1570}
1571
afbe19f8
PL
1572/* Write a SBFM instruction into *BUF.
1573
1574 SBFM rd, rn, #immr, #imms
1575
1576 This instruction moves the bits from #immr to #imms into the
1577 destination, sign extending the result.
1578
1579 RD is the destination register.
1580 RN is the source register.
1581 IMMR is the bit number to start at (least significant bit).
1582 IMMS is the bit number to stop at (most significant bit). */
1583
1584static int
1585emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1586 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1587{
1588 uint32_t size = ENCODE (rd.is64, 1, 31);
1589 uint32_t n = ENCODE (rd.is64, 1, 22);
1590
e1c587c3
YQ
1591 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1592 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1593 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1594}
1595
1596/* Write a SBFX instruction into *BUF.
1597
1598 SBFX rd, rn, #lsb, #width
1599
1600 This instruction moves #width bits from #lsb into the destination, sign
1601 extending the result. This is an alias for:
1602
1603 SBFM rd, rn, #lsb, #(lsb + width - 1)
1604
1605 RD is the destination register.
1606 RN is the source register.
1607 LSB is the bit number to start at (least significant bit).
1608 WIDTH is the number of bits to move. */
1609
1610static int
1611emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1612 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1613{
1614 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1615}
1616
1617/* Write a UBFM instruction into *BUF.
1618
1619 UBFM rd, rn, #immr, #imms
1620
1621 This instruction moves the bits from #immr to #imms into the
1622 destination, extending the result with zeros.
1623
1624 RD is the destination register.
1625 RN is the source register.
1626 IMMR is the bit number to start at (least significant bit).
1627 IMMS is the bit number to stop at (most significant bit). */
1628
1629static int
1630emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1631 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1632{
1633 uint32_t size = ENCODE (rd.is64, 1, 31);
1634 uint32_t n = ENCODE (rd.is64, 1, 22);
1635
e1c587c3
YQ
1636 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1637 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1638 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1639}
1640
1641/* Write a UBFX instruction into *BUF.
1642
1643 UBFX rd, rn, #lsb, #width
1644
1645 This instruction moves #width bits from #lsb into the destination,
1646 extending the result with zeros. This is an alias for:
1647
1648 UBFM rd, rn, #lsb, #(lsb + width - 1)
1649
1650 RD is the destination register.
1651 RN is the source register.
1652 LSB is the bit number to start at (least significant bit).
1653 WIDTH is the number of bits to move. */
1654
1655static int
1656emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1657 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1658{
1659 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1660}
1661
1662/* Write a CSINC instruction into *BUF.
1663
1664 CSINC rd, rn, rm, cond
1665
1666 This instruction conditionally increments rn or rm and places the result
1667 in rd. rn is chosen is the condition is true.
1668
1669 RD is the destination register.
1670 RN and RM are the source registers.
1671 COND is the encoded condition. */
1672
1673static int
1674emit_csinc (uint32_t *buf, struct aarch64_register rd,
1675 struct aarch64_register rn, struct aarch64_register rm,
1676 unsigned cond)
1677{
1678 uint32_t size = ENCODE (rd.is64, 1, 31);
1679
e1c587c3
YQ
1680 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1681 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1682 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1683}
1684
1685/* Write a CSET instruction into *BUF.
1686
1687 CSET rd, cond
1688
1689 This instruction conditionally write 1 or 0 in the destination register.
1690 1 is written if the condition is true. This is an alias for:
1691
1692 CSINC rd, xzr, xzr, !cond
1693
1694 Note that the condition needs to be inverted.
1695
1696 RD is the destination register.
1697 RN and RM are the source registers.
1698 COND is the encoded condition. */
1699
1700static int
1701emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1702{
1703 /* The least significant bit of the condition needs toggling in order to
1704 invert it. */
1705 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1706}
1707
bb903df0
PL
1708/* Write LEN instructions from BUF into the inferior memory at *TO.
1709
1710 Note instructions are always little endian on AArch64, unlike data. */
1711
1712static void
1713append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1714{
1715 size_t byte_len = len * sizeof (uint32_t);
1716#if (__BYTE_ORDER == __BIG_ENDIAN)
cb93dc7f 1717 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
bb903df0
PL
1718 size_t i;
1719
1720 for (i = 0; i < len; i++)
1721 le_buf[i] = htole32 (buf[i]);
1722
4196ab2a 1723 target_write_memory (*to, (const unsigned char *) le_buf, byte_len);
bb903df0
PL
1724
1725 xfree (le_buf);
1726#else
4196ab2a 1727 target_write_memory (*to, (const unsigned char *) buf, byte_len);
bb903df0
PL
1728#endif
1729
1730 *to += byte_len;
1731}
1732
0badd99f
YQ
1733/* Sub-class of struct aarch64_insn_data, store information of
1734 instruction relocation for fast tracepoint. Visitor can
1735 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1736 the relocated instructions in buffer pointed by INSN_PTR. */
bb903df0 1737
0badd99f
YQ
1738struct aarch64_insn_relocation_data
1739{
1740 struct aarch64_insn_data base;
1741
1742 /* The new address the instruction is relocated to. */
1743 CORE_ADDR new_addr;
1744 /* Pointer to the buffer of relocated instruction(s). */
1745 uint32_t *insn_ptr;
1746};
1747
1748/* Implementation of aarch64_insn_visitor method "b". */
1749
1750static void
1751aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1752 struct aarch64_insn_data *data)
1753{
1754 struct aarch64_insn_relocation_data *insn_reloc
1755 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1756 int64_t new_offset
0badd99f
YQ
1757 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1758
1759 if (can_encode_int32 (new_offset, 28))
1760 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1761}
1762
1763/* Implementation of aarch64_insn_visitor method "b_cond". */
1764
1765static void
1766aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1767 struct aarch64_insn_data *data)
1768{
1769 struct aarch64_insn_relocation_data *insn_reloc
1770 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1771 int64_t new_offset
0badd99f
YQ
1772 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1773
1774 if (can_encode_int32 (new_offset, 21))
1775 {
1776 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1777 new_offset);
bb903df0 1778 }
0badd99f 1779 else if (can_encode_int32 (new_offset, 28))
bb903df0 1780 {
0badd99f
YQ
1781 /* The offset is out of range for a conditional branch
1782 instruction but not for a unconditional branch. We can use
1783 the following instructions instead:
bb903df0 1784
0badd99f
YQ
1785 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1786 B NOT_TAKEN ; Else jump over TAKEN and continue.
1787 TAKEN:
1788 B #(offset - 8)
1789 NOT_TAKEN:
1790
1791 */
1792
1793 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1794 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1795 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
bb903df0 1796 }
0badd99f 1797}
bb903df0 1798
0badd99f
YQ
1799/* Implementation of aarch64_insn_visitor method "cb". */
1800
1801static void
1802aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1803 const unsigned rn, int is64,
1804 struct aarch64_insn_data *data)
1805{
1806 struct aarch64_insn_relocation_data *insn_reloc
1807 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1808 int64_t new_offset
0badd99f
YQ
1809 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1810
1811 if (can_encode_int32 (new_offset, 21))
1812 {
1813 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1814 aarch64_register (rn, is64), new_offset);
bb903df0 1815 }
0badd99f 1816 else if (can_encode_int32 (new_offset, 28))
bb903df0 1817 {
0badd99f
YQ
1818 /* The offset is out of range for a compare and branch
1819 instruction but not for a unconditional branch. We can use
1820 the following instructions instead:
1821
1822 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1823 B NOT_TAKEN ; Else jump over TAKEN and continue.
1824 TAKEN:
1825 B #(offset - 8)
1826 NOT_TAKEN:
1827
1828 */
1829 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1830 aarch64_register (rn, is64), 8);
1831 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1832 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1833 }
1834}
bb903df0 1835
0badd99f 1836/* Implementation of aarch64_insn_visitor method "tb". */
bb903df0 1837
0badd99f
YQ
1838static void
1839aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1840 const unsigned rt, unsigned bit,
1841 struct aarch64_insn_data *data)
1842{
1843 struct aarch64_insn_relocation_data *insn_reloc
1844 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1845 int64_t new_offset
0badd99f
YQ
1846 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1847
1848 if (can_encode_int32 (new_offset, 16))
1849 {
1850 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1851 aarch64_register (rt, 1), new_offset);
bb903df0 1852 }
0badd99f 1853 else if (can_encode_int32 (new_offset, 28))
bb903df0 1854 {
0badd99f
YQ
1855 /* The offset is out of range for a test bit and branch
1856 instruction but not for a unconditional branch. We can use
1857 the following instructions instead:
1858
1859 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1860 B NOT_TAKEN ; Else jump over TAKEN and continue.
1861 TAKEN:
1862 B #(offset - 8)
1863 NOT_TAKEN:
1864
1865 */
1866 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1867 aarch64_register (rt, 1), 8);
1868 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1869 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1870 new_offset - 8);
1871 }
1872}
bb903df0 1873
0badd99f 1874/* Implementation of aarch64_insn_visitor method "adr". */
bb903df0 1875
0badd99f
YQ
1876static void
1877aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1878 const int is_adrp,
1879 struct aarch64_insn_data *data)
1880{
1881 struct aarch64_insn_relocation_data *insn_reloc
1882 = (struct aarch64_insn_relocation_data *) data;
1883 /* We know exactly the address the ADR{P,} instruction will compute.
1884 We can just write it to the destination register. */
1885 CORE_ADDR address = data->insn_addr + offset;
bb903df0 1886
0badd99f
YQ
1887 if (is_adrp)
1888 {
1889 /* Clear the lower 12 bits of the offset to get the 4K page. */
1890 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1891 aarch64_register (rd, 1),
1892 address & ~0xfff);
1893 }
1894 else
1895 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1896 aarch64_register (rd, 1), address);
1897}
bb903df0 1898
0badd99f 1899/* Implementation of aarch64_insn_visitor method "ldr_literal". */
bb903df0 1900
0badd99f
YQ
1901static void
1902aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1903 const unsigned rt, const int is64,
1904 struct aarch64_insn_data *data)
1905{
1906 struct aarch64_insn_relocation_data *insn_reloc
1907 = (struct aarch64_insn_relocation_data *) data;
1908 CORE_ADDR address = data->insn_addr + offset;
1909
1910 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1911 aarch64_register (rt, 1), address);
1912
1913 /* We know exactly what address to load from, and what register we
1914 can use:
1915
1916 MOV xd, #(oldloc + offset)
1917 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1918 ...
1919
1920 LDR xd, [xd] ; or LDRSW xd, [xd]
1921
1922 */
1923
1924 if (is_sw)
1925 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1926 aarch64_register (rt, 1),
1927 aarch64_register (rt, 1),
1928 offset_memory_operand (0));
bb903df0 1929 else
0badd99f
YQ
1930 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1931 aarch64_register (rt, is64),
1932 aarch64_register (rt, 1),
1933 offset_memory_operand (0));
1934}
1935
1936/* Implementation of aarch64_insn_visitor method "others". */
1937
1938static void
1939aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1940 struct aarch64_insn_data *data)
1941{
1942 struct aarch64_insn_relocation_data *insn_reloc
1943 = (struct aarch64_insn_relocation_data *) data;
bb903df0 1944
0badd99f
YQ
1945 /* The instruction is not PC relative. Just re-emit it at the new
1946 location. */
e1c587c3 1947 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
0badd99f
YQ
1948}
1949
1950static const struct aarch64_insn_visitor visitor =
1951{
1952 aarch64_ftrace_insn_reloc_b,
1953 aarch64_ftrace_insn_reloc_b_cond,
1954 aarch64_ftrace_insn_reloc_cb,
1955 aarch64_ftrace_insn_reloc_tb,
1956 aarch64_ftrace_insn_reloc_adr,
1957 aarch64_ftrace_insn_reloc_ldr_literal,
1958 aarch64_ftrace_insn_reloc_others,
1959};
1960
bb903df0
PL
1961/* Implementation of linux_target_ops method
1962 "install_fast_tracepoint_jump_pad". */
1963
1964static int
1965aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1966 CORE_ADDR tpaddr,
1967 CORE_ADDR collector,
1968 CORE_ADDR lockaddr,
1969 ULONGEST orig_size,
1970 CORE_ADDR *jump_entry,
1971 CORE_ADDR *trampoline,
1972 ULONGEST *trampoline_size,
1973 unsigned char *jjump_pad_insn,
1974 ULONGEST *jjump_pad_insn_size,
1975 CORE_ADDR *adjusted_insn_addr,
1976 CORE_ADDR *adjusted_insn_addr_end,
1977 char *err)
1978{
1979 uint32_t buf[256];
1980 uint32_t *p = buf;
2ac09a5b 1981 int64_t offset;
bb903df0 1982 int i;
70b439f0 1983 uint32_t insn;
bb903df0 1984 CORE_ADDR buildaddr = *jump_entry;
0badd99f 1985 struct aarch64_insn_relocation_data insn_data;
bb903df0
PL
1986
1987 /* We need to save the current state on the stack both to restore it
1988 later and to collect register values when the tracepoint is hit.
1989
1990 The saved registers are pushed in a layout that needs to be in sync
1991 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1992 the supply_fast_tracepoint_registers function will fill in the
1993 register cache from a pointer to saved registers on the stack we build
1994 here.
1995
1996 For simplicity, we set the size of each cell on the stack to 16 bytes.
1997 This way one cell can hold any register type, from system registers
1998 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1999 has to be 16 bytes aligned anyway.
2000
2001 Note that the CPSR register does not exist on AArch64. Instead we
2002 can access system bits describing the process state with the
2003 MRS/MSR instructions, namely the condition flags. We save them as
2004 if they are part of a CPSR register because that's how GDB
2005 interprets these system bits. At the moment, only the condition
2006 flags are saved in CPSR (NZCV).
2007
2008 Stack layout, each cell is 16 bytes (descending):
2009
2010 High *-------- SIMD&FP registers from 31 down to 0. --------*
2011 | q31 |
2012 . .
2013 . . 32 cells
2014 . .
2015 | q0 |
2016 *---- General purpose registers from 30 down to 0. ----*
2017 | x30 |
2018 . .
2019 . . 31 cells
2020 . .
2021 | x0 |
2022 *------------- Special purpose registers. -------------*
2023 | SP |
2024 | PC |
2025 | CPSR (NZCV) | 5 cells
2026 | FPSR |
2027 | FPCR | <- SP + 16
2028 *------------- collecting_t object --------------------*
2029 | TPIDR_EL0 | struct tracepoint * |
2030 Low *------------------------------------------------------*
2031
2032 After this stack is set up, we issue a call to the collector, passing
2033 it the saved registers at (SP + 16). */
2034
2035 /* Push SIMD&FP registers on the stack:
2036
2037 SUB sp, sp, #(32 * 16)
2038
2039 STP q30, q31, [sp, #(30 * 16)]
2040 ...
2041 STP q0, q1, [sp]
2042
2043 */
2044 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
2045 for (i = 30; i >= 0; i -= 2)
2046 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
2047
30baf67b 2048 /* Push general purpose registers on the stack. Note that we do not need
bb903df0
PL
2049 to push x31 as it represents the xzr register and not the stack
2050 pointer in a STR instruction.
2051
2052 SUB sp, sp, #(31 * 16)
2053
2054 STR x30, [sp, #(30 * 16)]
2055 ...
2056 STR x0, [sp]
2057
2058 */
2059 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
2060 for (i = 30; i >= 0; i -= 1)
2061 p += emit_str (p, aarch64_register (i, 1), sp,
2062 offset_memory_operand (i * 16));
2063
2064 /* Make space for 5 more cells.
2065
2066 SUB sp, sp, #(5 * 16)
2067
2068 */
2069 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
2070
2071
2072 /* Save SP:
2073
2074 ADD x4, sp, #((32 + 31 + 5) * 16)
2075 STR x4, [sp, #(4 * 16)]
2076
2077 */
2078 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
2079 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
2080
2081 /* Save PC (tracepoint address):
2082
2083 MOV x3, #(tpaddr)
2084 ...
2085
2086 STR x3, [sp, #(3 * 16)]
2087
2088 */
2089
2090 p += emit_mov_addr (p, x3, tpaddr);
2091 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
2092
2093 /* Save CPSR (NZCV), FPSR and FPCR:
2094
2095 MRS x2, nzcv
2096 MRS x1, fpsr
2097 MRS x0, fpcr
2098
2099 STR x2, [sp, #(2 * 16)]
2100 STR x1, [sp, #(1 * 16)]
2101 STR x0, [sp, #(0 * 16)]
2102
2103 */
2104 p += emit_mrs (p, x2, NZCV);
2105 p += emit_mrs (p, x1, FPSR);
2106 p += emit_mrs (p, x0, FPCR);
2107 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2108 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2109 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2110
2111 /* Push the collecting_t object. It consist of the address of the
2112 tracepoint and an ID for the current thread. We get the latter by
2113 reading the tpidr_el0 system register. It corresponds to the
2114 NT_ARM_TLS register accessible with ptrace.
2115
2116 MOV x0, #(tpoint)
2117 ...
2118
2119 MRS x1, tpidr_el0
2120
2121 STP x0, x1, [sp, #-16]!
2122
2123 */
2124
2125 p += emit_mov_addr (p, x0, tpoint);
2126 p += emit_mrs (p, x1, TPIDR_EL0);
2127 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2128
2129 /* Spin-lock:
2130
2131 The shared memory for the lock is at lockaddr. It will hold zero
2132 if no-one is holding the lock, otherwise it contains the address of
2133 the collecting_t object on the stack of the thread which acquired it.
2134
2135 At this stage, the stack pointer points to this thread's collecting_t
2136 object.
2137
2138 We use the following registers:
2139 - x0: Address of the lock.
2140 - x1: Pointer to collecting_t object.
2141 - x2: Scratch register.
2142
2143 MOV x0, #(lockaddr)
2144 ...
2145 MOV x1, sp
2146
2147 ; Trigger an event local to this core. So the following WFE
2148 ; instruction is ignored.
2149 SEVL
2150 again:
2151 ; Wait for an event. The event is triggered by either the SEVL
2152 ; or STLR instructions (store release).
2153 WFE
2154
2155 ; Atomically read at lockaddr. This marks the memory location as
2156 ; exclusive. This instruction also has memory constraints which
2157 ; make sure all previous data reads and writes are done before
2158 ; executing it.
2159 LDAXR x2, [x0]
2160
2161 ; Try again if another thread holds the lock.
2162 CBNZ x2, again
2163
2164 ; We can lock it! Write the address of the collecting_t object.
2165 ; This instruction will fail if the memory location is not marked
2166 ; as exclusive anymore. If it succeeds, it will remove the
2167 ; exclusive mark on the memory location. This way, if another
2168 ; thread executes this instruction before us, we will fail and try
2169 ; all over again.
2170 STXR w2, x1, [x0]
2171 CBNZ w2, again
2172
2173 */
2174
2175 p += emit_mov_addr (p, x0, lockaddr);
2176 p += emit_mov (p, x1, register_operand (sp));
2177
2178 p += emit_sevl (p);
2179 p += emit_wfe (p);
2180 p += emit_ldaxr (p, x2, x0);
2181 p += emit_cb (p, 1, w2, -2 * 4);
2182 p += emit_stxr (p, w2, x1, x0);
2183 p += emit_cb (p, 1, x2, -4 * 4);
2184
2185 /* Call collector (struct tracepoint *, unsigned char *):
2186
2187 MOV x0, #(tpoint)
2188 ...
2189
2190 ; Saved registers start after the collecting_t object.
2191 ADD x1, sp, #16
2192
2193 ; We use an intra-procedure-call scratch register.
2194 MOV ip0, #(collector)
2195 ...
2196
2197 ; And call back to C!
2198 BLR ip0
2199
2200 */
2201
2202 p += emit_mov_addr (p, x0, tpoint);
2203 p += emit_add (p, x1, sp, immediate_operand (16));
2204
2205 p += emit_mov_addr (p, ip0, collector);
2206 p += emit_blr (p, ip0);
2207
2208 /* Release the lock.
2209
2210 MOV x0, #(lockaddr)
2211 ...
2212
2213 ; This instruction is a normal store with memory ordering
2214 ; constraints. Thanks to this we do not have to put a data
2215 ; barrier instruction to make sure all data read and writes are done
30baf67b 2216 ; before this instruction is executed. Furthermore, this instruction
bb903df0
PL
2217 ; will trigger an event, letting other threads know they can grab
2218 ; the lock.
2219 STLR xzr, [x0]
2220
2221 */
2222 p += emit_mov_addr (p, x0, lockaddr);
2223 p += emit_stlr (p, xzr, x0);
2224
2225 /* Free collecting_t object:
2226
2227 ADD sp, sp, #16
2228
2229 */
2230 p += emit_add (p, sp, sp, immediate_operand (16));
2231
2232 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2233 registers from the stack.
2234
2235 LDR x2, [sp, #(2 * 16)]
2236 LDR x1, [sp, #(1 * 16)]
2237 LDR x0, [sp, #(0 * 16)]
2238
2239 MSR NZCV, x2
2240 MSR FPSR, x1
2241 MSR FPCR, x0
2242
2243 ADD sp, sp #(5 * 16)
2244
2245 */
2246 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2247 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2248 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2249 p += emit_msr (p, NZCV, x2);
2250 p += emit_msr (p, FPSR, x1);
2251 p += emit_msr (p, FPCR, x0);
2252
2253 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2254
2255 /* Pop general purpose registers:
2256
2257 LDR x0, [sp]
2258 ...
2259 LDR x30, [sp, #(30 * 16)]
2260
2261 ADD sp, sp, #(31 * 16)
2262
2263 */
2264 for (i = 0; i <= 30; i += 1)
2265 p += emit_ldr (p, aarch64_register (i, 1), sp,
2266 offset_memory_operand (i * 16));
2267 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2268
2269 /* Pop SIMD&FP registers:
2270
2271 LDP q0, q1, [sp]
2272 ...
2273 LDP q30, q31, [sp, #(30 * 16)]
2274
2275 ADD sp, sp, #(32 * 16)
2276
2277 */
2278 for (i = 0; i <= 30; i += 2)
2279 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2280 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2281
2282 /* Write the code into the inferior memory. */
2283 append_insns (&buildaddr, p - buf, buf);
2284
2285 /* Now emit the relocated instruction. */
2286 *adjusted_insn_addr = buildaddr;
70b439f0 2287 target_read_uint32 (tpaddr, &insn);
0badd99f
YQ
2288
2289 insn_data.base.insn_addr = tpaddr;
2290 insn_data.new_addr = buildaddr;
2291 insn_data.insn_ptr = buf;
2292
2293 aarch64_relocate_instruction (insn, &visitor,
2294 (struct aarch64_insn_data *) &insn_data);
2295
bb903df0 2296 /* We may not have been able to relocate the instruction. */
0badd99f 2297 if (insn_data.insn_ptr == buf)
bb903df0
PL
2298 {
2299 sprintf (err,
2300 "E.Could not relocate instruction from %s to %s.",
2301 core_addr_to_string_nz (tpaddr),
2302 core_addr_to_string_nz (buildaddr));
2303 return 1;
2304 }
dfaffe9d 2305 else
0badd99f 2306 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
dfaffe9d 2307 *adjusted_insn_addr_end = buildaddr;
bb903df0
PL
2308
2309 /* Go back to the start of the buffer. */
2310 p = buf;
2311
2312 /* Emit a branch back from the jump pad. */
2313 offset = (tpaddr + orig_size - buildaddr);
2314 if (!can_encode_int32 (offset, 28))
2315 {
2316 sprintf (err,
2317 "E.Jump back from jump pad too far from tracepoint "
2ac09a5b 2318 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2319 offset);
2320 return 1;
2321 }
2322
2323 p += emit_b (p, 0, offset);
2324 append_insns (&buildaddr, p - buf, buf);
2325
2326 /* Give the caller a branch instruction into the jump pad. */
2327 offset = (*jump_entry - tpaddr);
2328 if (!can_encode_int32 (offset, 28))
2329 {
2330 sprintf (err,
2331 "E.Jump pad too far from tracepoint "
2ac09a5b 2332 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2333 offset);
2334 return 1;
2335 }
2336
2337 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2338 *jjump_pad_insn_size = 4;
2339
2340 /* Return the end address of our pad. */
2341 *jump_entry = buildaddr;
2342
2343 return 0;
2344}
2345
afbe19f8
PL
2346/* Helper function writing LEN instructions from START into
2347 current_insn_ptr. */
2348
2349static void
2350emit_ops_insns (const uint32_t *start, int len)
2351{
2352 CORE_ADDR buildaddr = current_insn_ptr;
2353
2354 if (debug_threads)
2355 debug_printf ("Adding %d instrucions at %s\n",
2356 len, paddress (buildaddr));
2357
2358 append_insns (&buildaddr, len, start);
2359 current_insn_ptr = buildaddr;
2360}
2361
2362/* Pop a register from the stack. */
2363
2364static int
2365emit_pop (uint32_t *buf, struct aarch64_register rt)
2366{
2367 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2368}
2369
2370/* Push a register on the stack. */
2371
2372static int
2373emit_push (uint32_t *buf, struct aarch64_register rt)
2374{
2375 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2376}
2377
2378/* Implementation of emit_ops method "emit_prologue". */
2379
2380static void
2381aarch64_emit_prologue (void)
2382{
2383 uint32_t buf[16];
2384 uint32_t *p = buf;
2385
2386 /* This function emit a prologue for the following function prototype:
2387
2388 enum eval_result_type f (unsigned char *regs,
2389 ULONGEST *value);
2390
2391 The first argument is a buffer of raw registers. The second
2392 argument is the result of
2393 evaluating the expression, which will be set to whatever is on top of
2394 the stack at the end.
2395
2396 The stack set up by the prologue is as such:
2397
2398 High *------------------------------------------------------*
2399 | LR |
2400 | FP | <- FP
2401 | x1 (ULONGEST *value) |
2402 | x0 (unsigned char *regs) |
2403 Low *------------------------------------------------------*
2404
2405 As we are implementing a stack machine, each opcode can expand the
2406 stack so we never know how far we are from the data saved by this
2407 prologue. In order to be able refer to value and regs later, we save
2408 the current stack pointer in the frame pointer. This way, it is not
2409 clobbered when calling C functions.
2410
30baf67b 2411 Finally, throughout every operation, we are using register x0 as the
afbe19f8
PL
2412 top of the stack, and x1 as a scratch register. */
2413
2414 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2415 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2416 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2417
2418 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2419
2420
2421 emit_ops_insns (buf, p - buf);
2422}
2423
2424/* Implementation of emit_ops method "emit_epilogue". */
2425
2426static void
2427aarch64_emit_epilogue (void)
2428{
2429 uint32_t buf[16];
2430 uint32_t *p = buf;
2431
2432 /* Store the result of the expression (x0) in *value. */
2433 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2434 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2435 p += emit_str (p, x0, x1, offset_memory_operand (0));
2436
2437 /* Restore the previous state. */
2438 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2439 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2440
2441 /* Return expr_eval_no_error. */
2442 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2443 p += emit_ret (p, lr);
2444
2445 emit_ops_insns (buf, p - buf);
2446}
2447
2448/* Implementation of emit_ops method "emit_add". */
2449
2450static void
2451aarch64_emit_add (void)
2452{
2453 uint32_t buf[16];
2454 uint32_t *p = buf;
2455
2456 p += emit_pop (p, x1);
45e3745e 2457 p += emit_add (p, x0, x1, register_operand (x0));
afbe19f8
PL
2458
2459 emit_ops_insns (buf, p - buf);
2460}
2461
2462/* Implementation of emit_ops method "emit_sub". */
2463
2464static void
2465aarch64_emit_sub (void)
2466{
2467 uint32_t buf[16];
2468 uint32_t *p = buf;
2469
2470 p += emit_pop (p, x1);
45e3745e 2471 p += emit_sub (p, x0, x1, register_operand (x0));
afbe19f8
PL
2472
2473 emit_ops_insns (buf, p - buf);
2474}
2475
2476/* Implementation of emit_ops method "emit_mul". */
2477
2478static void
2479aarch64_emit_mul (void)
2480{
2481 uint32_t buf[16];
2482 uint32_t *p = buf;
2483
2484 p += emit_pop (p, x1);
2485 p += emit_mul (p, x0, x1, x0);
2486
2487 emit_ops_insns (buf, p - buf);
2488}
2489
2490/* Implementation of emit_ops method "emit_lsh". */
2491
2492static void
2493aarch64_emit_lsh (void)
2494{
2495 uint32_t buf[16];
2496 uint32_t *p = buf;
2497
2498 p += emit_pop (p, x1);
2499 p += emit_lslv (p, x0, x1, x0);
2500
2501 emit_ops_insns (buf, p - buf);
2502}
2503
2504/* Implementation of emit_ops method "emit_rsh_signed". */
2505
2506static void
2507aarch64_emit_rsh_signed (void)
2508{
2509 uint32_t buf[16];
2510 uint32_t *p = buf;
2511
2512 p += emit_pop (p, x1);
2513 p += emit_asrv (p, x0, x1, x0);
2514
2515 emit_ops_insns (buf, p - buf);
2516}
2517
2518/* Implementation of emit_ops method "emit_rsh_unsigned". */
2519
2520static void
2521aarch64_emit_rsh_unsigned (void)
2522{
2523 uint32_t buf[16];
2524 uint32_t *p = buf;
2525
2526 p += emit_pop (p, x1);
2527 p += emit_lsrv (p, x0, x1, x0);
2528
2529 emit_ops_insns (buf, p - buf);
2530}
2531
2532/* Implementation of emit_ops method "emit_ext". */
2533
2534static void
2535aarch64_emit_ext (int arg)
2536{
2537 uint32_t buf[16];
2538 uint32_t *p = buf;
2539
2540 p += emit_sbfx (p, x0, x0, 0, arg);
2541
2542 emit_ops_insns (buf, p - buf);
2543}
2544
2545/* Implementation of emit_ops method "emit_log_not". */
2546
2547static void
2548aarch64_emit_log_not (void)
2549{
2550 uint32_t buf[16];
2551 uint32_t *p = buf;
2552
2553 /* If the top of the stack is 0, replace it with 1. Else replace it with
2554 0. */
2555
2556 p += emit_cmp (p, x0, immediate_operand (0));
2557 p += emit_cset (p, x0, EQ);
2558
2559 emit_ops_insns (buf, p - buf);
2560}
2561
2562/* Implementation of emit_ops method "emit_bit_and". */
2563
2564static void
2565aarch64_emit_bit_and (void)
2566{
2567 uint32_t buf[16];
2568 uint32_t *p = buf;
2569
2570 p += emit_pop (p, x1);
2571 p += emit_and (p, x0, x0, x1);
2572
2573 emit_ops_insns (buf, p - buf);
2574}
2575
2576/* Implementation of emit_ops method "emit_bit_or". */
2577
2578static void
2579aarch64_emit_bit_or (void)
2580{
2581 uint32_t buf[16];
2582 uint32_t *p = buf;
2583
2584 p += emit_pop (p, x1);
2585 p += emit_orr (p, x0, x0, x1);
2586
2587 emit_ops_insns (buf, p - buf);
2588}
2589
2590/* Implementation of emit_ops method "emit_bit_xor". */
2591
2592static void
2593aarch64_emit_bit_xor (void)
2594{
2595 uint32_t buf[16];
2596 uint32_t *p = buf;
2597
2598 p += emit_pop (p, x1);
2599 p += emit_eor (p, x0, x0, x1);
2600
2601 emit_ops_insns (buf, p - buf);
2602}
2603
2604/* Implementation of emit_ops method "emit_bit_not". */
2605
2606static void
2607aarch64_emit_bit_not (void)
2608{
2609 uint32_t buf[16];
2610 uint32_t *p = buf;
2611
2612 p += emit_mvn (p, x0, x0);
2613
2614 emit_ops_insns (buf, p - buf);
2615}
2616
2617/* Implementation of emit_ops method "emit_equal". */
2618
2619static void
2620aarch64_emit_equal (void)
2621{
2622 uint32_t buf[16];
2623 uint32_t *p = buf;
2624
2625 p += emit_pop (p, x1);
2626 p += emit_cmp (p, x0, register_operand (x1));
2627 p += emit_cset (p, x0, EQ);
2628
2629 emit_ops_insns (buf, p - buf);
2630}
2631
2632/* Implementation of emit_ops method "emit_less_signed". */
2633
2634static void
2635aarch64_emit_less_signed (void)
2636{
2637 uint32_t buf[16];
2638 uint32_t *p = buf;
2639
2640 p += emit_pop (p, x1);
2641 p += emit_cmp (p, x1, register_operand (x0));
2642 p += emit_cset (p, x0, LT);
2643
2644 emit_ops_insns (buf, p - buf);
2645}
2646
2647/* Implementation of emit_ops method "emit_less_unsigned". */
2648
2649static void
2650aarch64_emit_less_unsigned (void)
2651{
2652 uint32_t buf[16];
2653 uint32_t *p = buf;
2654
2655 p += emit_pop (p, x1);
2656 p += emit_cmp (p, x1, register_operand (x0));
2657 p += emit_cset (p, x0, LO);
2658
2659 emit_ops_insns (buf, p - buf);
2660}
2661
2662/* Implementation of emit_ops method "emit_ref". */
2663
2664static void
2665aarch64_emit_ref (int size)
2666{
2667 uint32_t buf[16];
2668 uint32_t *p = buf;
2669
2670 switch (size)
2671 {
2672 case 1:
2673 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2674 break;
2675 case 2:
2676 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2677 break;
2678 case 4:
2679 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2680 break;
2681 case 8:
2682 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2683 break;
2684 default:
2685 /* Unknown size, bail on compilation. */
2686 emit_error = 1;
2687 break;
2688 }
2689
2690 emit_ops_insns (buf, p - buf);
2691}
2692
2693/* Implementation of emit_ops method "emit_if_goto". */
2694
2695static void
2696aarch64_emit_if_goto (int *offset_p, int *size_p)
2697{
2698 uint32_t buf[16];
2699 uint32_t *p = buf;
2700
2701 /* The Z flag is set or cleared here. */
2702 p += emit_cmp (p, x0, immediate_operand (0));
2703 /* This instruction must not change the Z flag. */
2704 p += emit_pop (p, x0);
2705 /* Branch over the next instruction if x0 == 0. */
2706 p += emit_bcond (p, EQ, 8);
2707
2708 /* The NOP instruction will be patched with an unconditional branch. */
2709 if (offset_p)
2710 *offset_p = (p - buf) * 4;
2711 if (size_p)
2712 *size_p = 4;
2713 p += emit_nop (p);
2714
2715 emit_ops_insns (buf, p - buf);
2716}
2717
2718/* Implementation of emit_ops method "emit_goto". */
2719
2720static void
2721aarch64_emit_goto (int *offset_p, int *size_p)
2722{
2723 uint32_t buf[16];
2724 uint32_t *p = buf;
2725
2726 /* The NOP instruction will be patched with an unconditional branch. */
2727 if (offset_p)
2728 *offset_p = 0;
2729 if (size_p)
2730 *size_p = 4;
2731 p += emit_nop (p);
2732
2733 emit_ops_insns (buf, p - buf);
2734}
2735
2736/* Implementation of emit_ops method "write_goto_address". */
2737
bb1183e2 2738static void
afbe19f8
PL
2739aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2740{
2741 uint32_t insn;
2742
2743 emit_b (&insn, 0, to - from);
2744 append_insns (&from, 1, &insn);
2745}
2746
2747/* Implementation of emit_ops method "emit_const". */
2748
2749static void
2750aarch64_emit_const (LONGEST num)
2751{
2752 uint32_t buf[16];
2753 uint32_t *p = buf;
2754
2755 p += emit_mov_addr (p, x0, num);
2756
2757 emit_ops_insns (buf, p - buf);
2758}
2759
2760/* Implementation of emit_ops method "emit_call". */
2761
2762static void
2763aarch64_emit_call (CORE_ADDR fn)
2764{
2765 uint32_t buf[16];
2766 uint32_t *p = buf;
2767
2768 p += emit_mov_addr (p, ip0, fn);
2769 p += emit_blr (p, ip0);
2770
2771 emit_ops_insns (buf, p - buf);
2772}
2773
2774/* Implementation of emit_ops method "emit_reg". */
2775
2776static void
2777aarch64_emit_reg (int reg)
2778{
2779 uint32_t buf[16];
2780 uint32_t *p = buf;
2781
2782 /* Set x0 to unsigned char *regs. */
2783 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2784 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2785 p += emit_mov (p, x1, immediate_operand (reg));
2786
2787 emit_ops_insns (buf, p - buf);
2788
2789 aarch64_emit_call (get_raw_reg_func_addr ());
2790}
2791
2792/* Implementation of emit_ops method "emit_pop". */
2793
2794static void
2795aarch64_emit_pop (void)
2796{
2797 uint32_t buf[16];
2798 uint32_t *p = buf;
2799
2800 p += emit_pop (p, x0);
2801
2802 emit_ops_insns (buf, p - buf);
2803}
2804
2805/* Implementation of emit_ops method "emit_stack_flush". */
2806
2807static void
2808aarch64_emit_stack_flush (void)
2809{
2810 uint32_t buf[16];
2811 uint32_t *p = buf;
2812
2813 p += emit_push (p, x0);
2814
2815 emit_ops_insns (buf, p - buf);
2816}
2817
2818/* Implementation of emit_ops method "emit_zero_ext". */
2819
2820static void
2821aarch64_emit_zero_ext (int arg)
2822{
2823 uint32_t buf[16];
2824 uint32_t *p = buf;
2825
2826 p += emit_ubfx (p, x0, x0, 0, arg);
2827
2828 emit_ops_insns (buf, p - buf);
2829}
2830
2831/* Implementation of emit_ops method "emit_swap". */
2832
2833static void
2834aarch64_emit_swap (void)
2835{
2836 uint32_t buf[16];
2837 uint32_t *p = buf;
2838
2839 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2840 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2841 p += emit_mov (p, x0, register_operand (x1));
2842
2843 emit_ops_insns (buf, p - buf);
2844}
2845
2846/* Implementation of emit_ops method "emit_stack_adjust". */
2847
2848static void
2849aarch64_emit_stack_adjust (int n)
2850{
2851 /* This is not needed with our design. */
2852 uint32_t buf[16];
2853 uint32_t *p = buf;
2854
2855 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2856
2857 emit_ops_insns (buf, p - buf);
2858}
2859
2860/* Implementation of emit_ops method "emit_int_call_1". */
2861
2862static void
2863aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2864{
2865 uint32_t buf[16];
2866 uint32_t *p = buf;
2867
2868 p += emit_mov (p, x0, immediate_operand (arg1));
2869
2870 emit_ops_insns (buf, p - buf);
2871
2872 aarch64_emit_call (fn);
2873}
2874
2875/* Implementation of emit_ops method "emit_void_call_2". */
2876
2877static void
2878aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2879{
2880 uint32_t buf[16];
2881 uint32_t *p = buf;
2882
2883 /* Push x0 on the stack. */
2884 aarch64_emit_stack_flush ();
2885
2886 /* Setup arguments for the function call:
2887
2888 x0: arg1
2889 x1: top of the stack
2890
2891 MOV x1, x0
2892 MOV x0, #arg1 */
2893
2894 p += emit_mov (p, x1, register_operand (x0));
2895 p += emit_mov (p, x0, immediate_operand (arg1));
2896
2897 emit_ops_insns (buf, p - buf);
2898
2899 aarch64_emit_call (fn);
2900
2901 /* Restore x0. */
2902 aarch64_emit_pop ();
2903}
2904
2905/* Implementation of emit_ops method "emit_eq_goto". */
2906
2907static void
2908aarch64_emit_eq_goto (int *offset_p, int *size_p)
2909{
2910 uint32_t buf[16];
2911 uint32_t *p = buf;
2912
2913 p += emit_pop (p, x1);
2914 p += emit_cmp (p, x1, register_operand (x0));
2915 /* Branch over the next instruction if x0 != x1. */
2916 p += emit_bcond (p, NE, 8);
2917 /* The NOP instruction will be patched with an unconditional branch. */
2918 if (offset_p)
2919 *offset_p = (p - buf) * 4;
2920 if (size_p)
2921 *size_p = 4;
2922 p += emit_nop (p);
2923
2924 emit_ops_insns (buf, p - buf);
2925}
2926
2927/* Implementation of emit_ops method "emit_ne_goto". */
2928
2929static void
2930aarch64_emit_ne_goto (int *offset_p, int *size_p)
2931{
2932 uint32_t buf[16];
2933 uint32_t *p = buf;
2934
2935 p += emit_pop (p, x1);
2936 p += emit_cmp (p, x1, register_operand (x0));
2937 /* Branch over the next instruction if x0 == x1. */
2938 p += emit_bcond (p, EQ, 8);
2939 /* The NOP instruction will be patched with an unconditional branch. */
2940 if (offset_p)
2941 *offset_p = (p - buf) * 4;
2942 if (size_p)
2943 *size_p = 4;
2944 p += emit_nop (p);
2945
2946 emit_ops_insns (buf, p - buf);
2947}
2948
2949/* Implementation of emit_ops method "emit_lt_goto". */
2950
2951static void
2952aarch64_emit_lt_goto (int *offset_p, int *size_p)
2953{
2954 uint32_t buf[16];
2955 uint32_t *p = buf;
2956
2957 p += emit_pop (p, x1);
2958 p += emit_cmp (p, x1, register_operand (x0));
2959 /* Branch over the next instruction if x0 >= x1. */
2960 p += emit_bcond (p, GE, 8);
2961 /* The NOP instruction will be patched with an unconditional branch. */
2962 if (offset_p)
2963 *offset_p = (p - buf) * 4;
2964 if (size_p)
2965 *size_p = 4;
2966 p += emit_nop (p);
2967
2968 emit_ops_insns (buf, p - buf);
2969}
2970
2971/* Implementation of emit_ops method "emit_le_goto". */
2972
2973static void
2974aarch64_emit_le_goto (int *offset_p, int *size_p)
2975{
2976 uint32_t buf[16];
2977 uint32_t *p = buf;
2978
2979 p += emit_pop (p, x1);
2980 p += emit_cmp (p, x1, register_operand (x0));
2981 /* Branch over the next instruction if x0 > x1. */
2982 p += emit_bcond (p, GT, 8);
2983 /* The NOP instruction will be patched with an unconditional branch. */
2984 if (offset_p)
2985 *offset_p = (p - buf) * 4;
2986 if (size_p)
2987 *size_p = 4;
2988 p += emit_nop (p);
2989
2990 emit_ops_insns (buf, p - buf);
2991}
2992
2993/* Implementation of emit_ops method "emit_gt_goto". */
2994
2995static void
2996aarch64_emit_gt_goto (int *offset_p, int *size_p)
2997{
2998 uint32_t buf[16];
2999 uint32_t *p = buf;
3000
3001 p += emit_pop (p, x1);
3002 p += emit_cmp (p, x1, register_operand (x0));
3003 /* Branch over the next instruction if x0 <= x1. */
3004 p += emit_bcond (p, LE, 8);
3005 /* The NOP instruction will be patched with an unconditional branch. */
3006 if (offset_p)
3007 *offset_p = (p - buf) * 4;
3008 if (size_p)
3009 *size_p = 4;
3010 p += emit_nop (p);
3011
3012 emit_ops_insns (buf, p - buf);
3013}
3014
3015/* Implementation of emit_ops method "emit_ge_got". */
3016
3017static void
3018aarch64_emit_ge_got (int *offset_p, int *size_p)
3019{
3020 uint32_t buf[16];
3021 uint32_t *p = buf;
3022
3023 p += emit_pop (p, x1);
3024 p += emit_cmp (p, x1, register_operand (x0));
3025 /* Branch over the next instruction if x0 <= x1. */
3026 p += emit_bcond (p, LT, 8);
3027 /* The NOP instruction will be patched with an unconditional branch. */
3028 if (offset_p)
3029 *offset_p = (p - buf) * 4;
3030 if (size_p)
3031 *size_p = 4;
3032 p += emit_nop (p);
3033
3034 emit_ops_insns (buf, p - buf);
3035}
3036
3037static struct emit_ops aarch64_emit_ops_impl =
3038{
3039 aarch64_emit_prologue,
3040 aarch64_emit_epilogue,
3041 aarch64_emit_add,
3042 aarch64_emit_sub,
3043 aarch64_emit_mul,
3044 aarch64_emit_lsh,
3045 aarch64_emit_rsh_signed,
3046 aarch64_emit_rsh_unsigned,
3047 aarch64_emit_ext,
3048 aarch64_emit_log_not,
3049 aarch64_emit_bit_and,
3050 aarch64_emit_bit_or,
3051 aarch64_emit_bit_xor,
3052 aarch64_emit_bit_not,
3053 aarch64_emit_equal,
3054 aarch64_emit_less_signed,
3055 aarch64_emit_less_unsigned,
3056 aarch64_emit_ref,
3057 aarch64_emit_if_goto,
3058 aarch64_emit_goto,
3059 aarch64_write_goto_address,
3060 aarch64_emit_const,
3061 aarch64_emit_call,
3062 aarch64_emit_reg,
3063 aarch64_emit_pop,
3064 aarch64_emit_stack_flush,
3065 aarch64_emit_zero_ext,
3066 aarch64_emit_swap,
3067 aarch64_emit_stack_adjust,
3068 aarch64_emit_int_call_1,
3069 aarch64_emit_void_call_2,
3070 aarch64_emit_eq_goto,
3071 aarch64_emit_ne_goto,
3072 aarch64_emit_lt_goto,
3073 aarch64_emit_le_goto,
3074 aarch64_emit_gt_goto,
3075 aarch64_emit_ge_got,
3076};
3077
3078/* Implementation of linux_target_ops method "emit_ops". */
3079
3080static struct emit_ops *
3081aarch64_emit_ops (void)
3082{
3083 return &aarch64_emit_ops_impl;
3084}
3085
bb903df0
PL
3086/* Implementation of linux_target_ops method
3087 "get_min_fast_tracepoint_insn_len". */
3088
3089static int
3090aarch64_get_min_fast_tracepoint_insn_len (void)
3091{
3092 return 4;
3093}
3094
d1d0aea1
PL
3095/* Implementation of linux_target_ops method "supports_range_stepping". */
3096
3097static int
3098aarch64_supports_range_stepping (void)
3099{
3100 return 1;
3101}
3102
3ca4edb6 3103/* Implementation of target ops method "sw_breakpoint_from_kind". */
dd373349 3104
3ca4edb6
TBA
3105const gdb_byte *
3106aarch64_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349 3107{
17b1509a
YQ
3108 if (is_64bit_tdesc ())
3109 {
3110 *size = aarch64_breakpoint_len;
3111 return aarch64_breakpoint;
3112 }
3113 else
3114 return arm_sw_breakpoint_from_kind (kind, size);
3115}
3116
06250e4e 3117/* Implementation of target ops method "breakpoint_kind_from_pc". */
17b1509a 3118
06250e4e
TBA
3119int
3120aarch64_target::breakpoint_kind_from_pc (CORE_ADDR *pcptr)
17b1509a
YQ
3121{
3122 if (is_64bit_tdesc ())
3123 return aarch64_breakpoint_len;
3124 else
3125 return arm_breakpoint_kind_from_pc (pcptr);
3126}
3127
06250e4e 3128/* Implementation of the target ops method
17b1509a
YQ
3129 "breakpoint_kind_from_current_state". */
3130
06250e4e
TBA
3131int
3132aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
17b1509a
YQ
3133{
3134 if (is_64bit_tdesc ())
3135 return aarch64_breakpoint_len;
3136 else
3137 return arm_breakpoint_kind_from_current_state (pcptr);
dd373349
AT
3138}
3139
7d00775e
AT
3140/* Support for hardware single step. */
3141
3142static int
3143aarch64_supports_hardware_single_step (void)
3144{
3145 return 1;
3146}
3147
176eb98c
MS
3148struct linux_target_ops the_low_target =
3149{
421530db 3150 NULL, /* process_qsupported */
7671bf47 3151 aarch64_supports_tracepoints,
bb903df0
PL
3152 aarch64_get_thread_area,
3153 aarch64_install_fast_tracepoint_jump_pad,
afbe19f8 3154 aarch64_emit_ops,
bb903df0 3155 aarch64_get_min_fast_tracepoint_insn_len,
d1d0aea1 3156 aarch64_supports_range_stepping,
7d00775e 3157 aarch64_supports_hardware_single_step,
061fc021 3158 aarch64_get_syscall_trapinfo,
176eb98c 3159};
3aee8918 3160
ef0478f6
TBA
3161/* The linux target ops object. */
3162
3163linux_process_target *the_linux_target = &the_aarch64_target;
3164
3aee8918
PA
3165void
3166initialize_low_arch (void)
3167{
3b53ae99
YQ
3168 initialize_low_arch_aarch32 ();
3169
3aee8918 3170 initialize_regsets_info (&aarch64_regsets_info);
02895270 3171 initialize_regsets_info (&aarch64_sve_regsets_info);
3aee8918 3172}
This page took 0.715781 seconds and 4 git commands to generate.