gdbserver/linux-low: turn process/thread addition/deletion ops into methods
[deliverable/binutils-gdb.git] / gdbserver / linux-aarch64-low.cc
CommitLineData
176eb98c
MS
1/* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
b811d2c2 4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
176eb98c
MS
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "server.h"
23#include "linux-low.h"
db3cb7cb 24#include "nat/aarch64-linux.h"
554717a3 25#include "nat/aarch64-linux-hw-point.h"
bb903df0 26#include "arch/aarch64-insn.h"
3b53ae99 27#include "linux-aarch32-low.h"
176eb98c 28#include "elf/common.h"
afbe19f8
PL
29#include "ax.h"
30#include "tracepoint.h"
f9d949fb 31#include "debug.h"
176eb98c
MS
32
33#include <signal.h>
34#include <sys/user.h>
5826e159 35#include "nat/gdb_ptrace.h"
e9dae05e 36#include <asm/ptrace.h>
bb903df0
PL
37#include <inttypes.h>
38#include <endian.h>
39#include <sys/uio.h>
176eb98c
MS
40
41#include "gdb_proc_service.h"
cc628f3d 42#include "arch/aarch64.h"
7cc17433 43#include "linux-aarch32-tdesc.h"
d6d7ce56 44#include "linux-aarch64-tdesc.h"
fefa175e 45#include "nat/aarch64-sve-linux-ptrace.h"
02895270 46#include "tdesc.h"
176eb98c 47
176eb98c
MS
48#ifdef HAVE_SYS_REG_H
49#include <sys/reg.h>
50#endif
51
ef0478f6
TBA
52/* Linux target op definitions for the AArch64 architecture. */
53
54class aarch64_target : public linux_process_target
55{
56public:
57
aa8d21c9
TBA
58 const regs_info *get_regs_info () override;
59
06250e4e
TBA
60 int breakpoint_kind_from_pc (CORE_ADDR *pcptr) override;
61
62 int breakpoint_kind_from_current_state (CORE_ADDR *pcptr) override;
63
3ca4edb6
TBA
64 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
65
007c9b97
TBA
66 bool supports_z_point_type (char z_type) override;
67
797bcff5
TBA
68protected:
69
70 void low_arch_setup () override;
daca57a7
TBA
71
72 bool low_cannot_fetch_register (int regno) override;
73
74 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
75
76 bool low_supports_breakpoints () override;
77
78 CORE_ADDR low_get_pc (regcache *regcache) override;
79
80 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
d7146cda
TBA
81
82 bool low_breakpoint_at (CORE_ADDR pc) override;
9db9aa23
TBA
83
84 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
85 int size, raw_breakpoint *bp) override;
86
87 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
88 int size, raw_breakpoint *bp) override;
ac1bbaca
TBA
89
90 bool low_stopped_by_watchpoint () override;
91
92 CORE_ADDR low_stopped_data_address () override;
cb63de7c
TBA
93
94 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
95 int direction) override;
fd000fb3
TBA
96
97 arch_process_info *low_new_process () override;
98
99 void low_delete_process (arch_process_info *info) override;
100
101 void low_new_thread (lwp_info *) override;
102
103 void low_delete_thread (arch_lwp_info *) override;
104
105 void low_new_fork (process_info *parent, process_info *child) override;
ef0478f6
TBA
106};
107
108/* The singleton target ops object. */
109
110static aarch64_target the_aarch64_target;
111
daca57a7
TBA
112bool
113aarch64_target::low_cannot_fetch_register (int regno)
114{
115 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
116 "is not implemented by the target");
117}
118
119bool
120aarch64_target::low_cannot_store_register (int regno)
121{
122 gdb_assert_not_reached ("linux target op low_cannot_store_register "
123 "is not implemented by the target");
124}
125
176eb98c
MS
126/* Per-process arch-specific data we want to keep. */
127
128struct arch_process_info
129{
130 /* Hardware breakpoint/watchpoint data.
131 The reason for them to be per-process rather than per-thread is
132 due to the lack of information in the gdbserver environment;
133 gdbserver is not told that whether a requested hardware
134 breakpoint/watchpoint is thread specific or not, so it has to set
135 each hw bp/wp for every thread in the current process. The
136 higher level bp/wp management in gdb will resume a thread if a hw
137 bp/wp trap is not expected for it. Since the hw bp/wp setting is
138 same for each thread, it is reasonable for the data to live here.
139 */
140 struct aarch64_debug_reg_state debug_reg_state;
141};
142
3b53ae99
YQ
143/* Return true if the size of register 0 is 8 byte. */
144
145static int
146is_64bit_tdesc (void)
147{
148 struct regcache *regcache = get_thread_regcache (current_thread, 0);
149
150 return register_size (regcache->tdesc, 0) == 8;
151}
152
02895270
AH
153/* Return true if the regcache contains the number of SVE registers. */
154
155static bool
156is_sve_tdesc (void)
157{
158 struct regcache *regcache = get_thread_regcache (current_thread, 0);
159
6cdd651f 160 return tdesc_contains_feature (regcache->tdesc, "org.gnu.gdb.aarch64.sve");
02895270
AH
161}
162
176eb98c
MS
163static void
164aarch64_fill_gregset (struct regcache *regcache, void *buf)
165{
6a69a054 166 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
176eb98c
MS
167 int i;
168
169 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
170 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
171 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
172 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
173 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
174}
175
176static void
177aarch64_store_gregset (struct regcache *regcache, const void *buf)
178{
6a69a054 179 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
176eb98c
MS
180 int i;
181
182 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
183 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
184 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
185 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
186 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
187}
188
189static void
190aarch64_fill_fpregset (struct regcache *regcache, void *buf)
191{
9caa3311 192 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
176eb98c
MS
193 int i;
194
195 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
196 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
197 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
198 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
199}
200
201static void
202aarch64_store_fpregset (struct regcache *regcache, const void *buf)
203{
9caa3311
YQ
204 const struct user_fpsimd_state *regset
205 = (const struct user_fpsimd_state *) buf;
176eb98c
MS
206 int i;
207
208 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
209 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
210 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
211 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
212}
213
1ef53e6b
AH
214/* Store the pauth registers to regcache. */
215
216static void
217aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
218{
219 uint64_t *pauth_regset = (uint64_t *) buf;
220 int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
221
222 if (pauth_base == 0)
223 return;
224
225 supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
226 &pauth_regset[0]);
227 supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
228 &pauth_regset[1]);
229}
230
bf9ae9d8
TBA
231bool
232aarch64_target::low_supports_breakpoints ()
233{
234 return true;
235}
236
237/* Implementation of linux target ops method "low_get_pc". */
421530db 238
bf9ae9d8
TBA
239CORE_ADDR
240aarch64_target::low_get_pc (regcache *regcache)
176eb98c 241{
8a7e4587 242 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 243 return linux_get_pc_64bit (regcache);
8a7e4587 244 else
a5652c21 245 return linux_get_pc_32bit (regcache);
176eb98c
MS
246}
247
bf9ae9d8 248/* Implementation of linux target ops method "low_set_pc". */
421530db 249
bf9ae9d8
TBA
250void
251aarch64_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
176eb98c 252{
8a7e4587 253 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 254 linux_set_pc_64bit (regcache, pc);
8a7e4587 255 else
a5652c21 256 linux_set_pc_32bit (regcache, pc);
176eb98c
MS
257}
258
176eb98c
MS
259#define aarch64_breakpoint_len 4
260
37d66942
PL
261/* AArch64 BRK software debug mode instruction.
262 This instruction needs to match gdb/aarch64-tdep.c
263 (aarch64_default_breakpoint). */
264static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
176eb98c 265
d7146cda 266/* Implementation of linux target ops method "low_breakpoint_at". */
421530db 267
d7146cda
TBA
268bool
269aarch64_target::low_breakpoint_at (CORE_ADDR where)
176eb98c 270{
db91f502
YQ
271 if (is_64bit_tdesc ())
272 {
273 gdb_byte insn[aarch64_breakpoint_len];
176eb98c 274
d7146cda 275 read_memory (where, (unsigned char *) &insn, aarch64_breakpoint_len);
db91f502 276 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
d7146cda 277 return true;
176eb98c 278
d7146cda 279 return false;
db91f502
YQ
280 }
281 else
282 return arm_breakpoint_at (where);
176eb98c
MS
283}
284
176eb98c
MS
285static void
286aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
287{
288 int i;
289
290 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
291 {
292 state->dr_addr_bp[i] = 0;
293 state->dr_ctrl_bp[i] = 0;
294 state->dr_ref_count_bp[i] = 0;
295 }
296
297 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
298 {
299 state->dr_addr_wp[i] = 0;
300 state->dr_ctrl_wp[i] = 0;
301 state->dr_ref_count_wp[i] = 0;
302 }
303}
304
176eb98c
MS
305/* Return the pointer to the debug register state structure in the
306 current process' arch-specific data area. */
307
db3cb7cb 308struct aarch64_debug_reg_state *
88e2cf7e 309aarch64_get_debug_reg_state (pid_t pid)
176eb98c 310{
88e2cf7e 311 struct process_info *proc = find_process_pid (pid);
176eb98c 312
fe978cb0 313 return &proc->priv->arch_private->debug_reg_state;
176eb98c
MS
314}
315
007c9b97 316/* Implementation of target ops method "supports_z_point_type". */
421530db 317
007c9b97
TBA
318bool
319aarch64_target::supports_z_point_type (char z_type)
4ff0d3d8
PA
320{
321 switch (z_type)
322 {
96c97461 323 case Z_PACKET_SW_BP:
4ff0d3d8
PA
324 case Z_PACKET_HW_BP:
325 case Z_PACKET_WRITE_WP:
326 case Z_PACKET_READ_WP:
327 case Z_PACKET_ACCESS_WP:
007c9b97 328 return true;
4ff0d3d8 329 default:
007c9b97 330 return false;
4ff0d3d8
PA
331 }
332}
333
9db9aa23 334/* Implementation of linux target ops method "low_insert_point".
176eb98c 335
421530db
PL
336 It actually only records the info of the to-be-inserted bp/wp;
337 the actual insertion will happen when threads are resumed. */
176eb98c 338
9db9aa23
TBA
339int
340aarch64_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
341 int len, raw_breakpoint *bp)
176eb98c
MS
342{
343 int ret;
4ff0d3d8 344 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
345 struct aarch64_debug_reg_state *state
346 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 347
c5e92cca 348 if (show_debug_regs)
176eb98c
MS
349 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
350 (unsigned long) addr, len);
351
802e8e6d
PA
352 /* Determine the type from the raw breakpoint type. */
353 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
354
355 if (targ_type != hw_execute)
39edd165
YQ
356 {
357 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
358 ret = aarch64_handle_watchpoint (targ_type, addr, len,
359 1 /* is_insert */, state);
360 else
361 ret = -1;
362 }
176eb98c 363 else
8d689ee5
YQ
364 {
365 if (len == 3)
366 {
367 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
368 instruction. Set it to 2 to correctly encode length bit
369 mask in hardware/watchpoint control register. */
370 len = 2;
371 }
372 ret = aarch64_handle_breakpoint (targ_type, addr, len,
373 1 /* is_insert */, state);
374 }
176eb98c 375
60a191ed 376 if (show_debug_regs)
88e2cf7e
YQ
377 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
378 targ_type);
176eb98c
MS
379
380 return ret;
381}
382
9db9aa23 383/* Implementation of linux target ops method "low_remove_point".
176eb98c 384
421530db
PL
385 It actually only records the info of the to-be-removed bp/wp,
386 the actual removal will be done when threads are resumed. */
176eb98c 387
9db9aa23
TBA
388int
389aarch64_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
390 int len, raw_breakpoint *bp)
176eb98c
MS
391{
392 int ret;
4ff0d3d8 393 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
394 struct aarch64_debug_reg_state *state
395 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 396
c5e92cca 397 if (show_debug_regs)
176eb98c
MS
398 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
399 (unsigned long) addr, len);
400
802e8e6d
PA
401 /* Determine the type from the raw breakpoint type. */
402 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
403
404 /* Set up state pointers. */
405 if (targ_type != hw_execute)
406 ret =
c67ca4de
YQ
407 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
408 state);
176eb98c 409 else
8d689ee5
YQ
410 {
411 if (len == 3)
412 {
413 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
414 instruction. Set it to 2 to correctly encode length bit
415 mask in hardware/watchpoint control register. */
416 len = 2;
417 }
418 ret = aarch64_handle_breakpoint (targ_type, addr, len,
419 0 /* is_insert */, state);
420 }
176eb98c 421
60a191ed 422 if (show_debug_regs)
88e2cf7e
YQ
423 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
424 targ_type);
176eb98c
MS
425
426 return ret;
427}
428
ac1bbaca 429/* Implementation of linux target ops method "low_stopped_data_address". */
176eb98c 430
ac1bbaca
TBA
431CORE_ADDR
432aarch64_target::low_stopped_data_address ()
176eb98c
MS
433{
434 siginfo_t siginfo;
435 int pid, i;
436 struct aarch64_debug_reg_state *state;
437
0bfdf32f 438 pid = lwpid_of (current_thread);
176eb98c
MS
439
440 /* Get the siginfo. */
441 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
442 return (CORE_ADDR) 0;
443
444 /* Need to be a hardware breakpoint/watchpoint trap. */
445 if (siginfo.si_signo != SIGTRAP
446 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
447 return (CORE_ADDR) 0;
448
449 /* Check if the address matches any watched address. */
88e2cf7e 450 state = aarch64_get_debug_reg_state (pid_of (current_thread));
176eb98c
MS
451 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
452 {
a3b60e45
JK
453 const unsigned int offset
454 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
176eb98c
MS
455 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
456 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
a3b60e45
JK
457 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
458 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
459 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
460
176eb98c
MS
461 if (state->dr_ref_count_wp[i]
462 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
a3b60e45 463 && addr_trap >= addr_watch_aligned
176eb98c 464 && addr_trap < addr_watch + len)
a3b60e45
JK
465 {
466 /* ADDR_TRAP reports the first address of the memory range
467 accessed by the CPU, regardless of what was the memory
468 range watched. Thus, a large CPU access that straddles
469 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
470 ADDR_TRAP that is lower than the
471 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
472
473 addr: | 4 | 5 | 6 | 7 | 8 |
474 |---- range watched ----|
475 |----------- range accessed ------------|
476
477 In this case, ADDR_TRAP will be 4.
478
479 To match a watchpoint known to GDB core, we must never
480 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
481 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
482 positive on kernels older than 4.10. See PR
483 external/20207. */
484 return addr_orig;
485 }
176eb98c
MS
486 }
487
488 return (CORE_ADDR) 0;
489}
490
ac1bbaca 491/* Implementation of linux target ops method "low_stopped_by_watchpoint". */
176eb98c 492
ac1bbaca
TBA
493bool
494aarch64_target::low_stopped_by_watchpoint ()
176eb98c 495{
ac1bbaca 496 return (low_stopped_data_address () != 0);
176eb98c
MS
497}
498
499/* Fetch the thread-local storage pointer for libthread_db. */
500
501ps_err_e
754653a7 502ps_get_thread_area (struct ps_prochandle *ph,
176eb98c
MS
503 lwpid_t lwpid, int idx, void **base)
504{
a0cc84cd
YQ
505 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
506 is_64bit_tdesc ());
176eb98c
MS
507}
508
cb63de7c 509/* Implementation of linux target ops method "low_siginfo_fixup". */
ade90bde 510
cb63de7c
TBA
511bool
512aarch64_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
513 int direction)
ade90bde
YQ
514{
515 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
516 if (!is_64bit_tdesc ())
517 {
518 if (direction == 0)
519 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
520 native);
521 else
522 aarch64_siginfo_from_compat_siginfo (native,
523 (struct compat_siginfo *) inf);
524
cb63de7c 525 return true;
ade90bde
YQ
526 }
527
cb63de7c 528 return false;
ade90bde
YQ
529}
530
fd000fb3 531/* Implementation of linux target ops method "low_new_process". */
176eb98c 532
fd000fb3
TBA
533arch_process_info *
534aarch64_target::low_new_process ()
176eb98c 535{
8d749320 536 struct arch_process_info *info = XCNEW (struct arch_process_info);
176eb98c
MS
537
538 aarch64_init_debug_reg_state (&info->debug_reg_state);
539
540 return info;
541}
542
fd000fb3 543/* Implementation of linux target ops method "low_delete_process". */
04ec7890 544
fd000fb3
TBA
545void
546aarch64_target::low_delete_process (arch_process_info *info)
04ec7890
SM
547{
548 xfree (info);
549}
550
fd000fb3
TBA
551void
552aarch64_target::low_new_thread (lwp_info *lwp)
553{
554 aarch64_linux_new_thread (lwp);
555}
421530db 556
fd000fb3
TBA
557void
558aarch64_target::low_delete_thread (arch_lwp_info *arch_lwp)
559{
560 aarch64_linux_delete_thread (arch_lwp);
561}
562
563/* Implementation of linux target ops method "low_new_fork". */
564
565void
566aarch64_target::low_new_fork (process_info *parent,
567 process_info *child)
3a8a0396
DB
568{
569 /* These are allocated by linux_add_process. */
61a7418c
DB
570 gdb_assert (parent->priv != NULL
571 && parent->priv->arch_private != NULL);
572 gdb_assert (child->priv != NULL
573 && child->priv->arch_private != NULL);
3a8a0396
DB
574
575 /* Linux kernel before 2.6.33 commit
576 72f674d203cd230426437cdcf7dd6f681dad8b0d
577 will inherit hardware debug registers from parent
578 on fork/vfork/clone. Newer Linux kernels create such tasks with
579 zeroed debug registers.
580
581 GDB core assumes the child inherits the watchpoints/hw
582 breakpoints of the parent, and will remove them all from the
583 forked off process. Copy the debug registers mirrors into the
584 new process so that all breakpoints and watchpoints can be
585 removed together. The debug registers mirror will become zeroed
586 in the end before detaching the forked off process, thus making
587 this compatible with older Linux kernels too. */
588
61a7418c 589 *child->priv->arch_private = *parent->priv->arch_private;
3a8a0396
DB
590}
591
ee4fbcfa
AH
592/* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
593#define AARCH64_HWCAP_PACA (1 << 30)
594
797bcff5 595/* Implementation of linux target ops method "low_arch_setup". */
3b53ae99 596
797bcff5
TBA
597void
598aarch64_target::low_arch_setup ()
3b53ae99
YQ
599{
600 unsigned int machine;
601 int is_elf64;
602 int tid;
603
604 tid = lwpid_of (current_thread);
605
606 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
607
608 if (is_elf64)
fefa175e
AH
609 {
610 uint64_t vq = aarch64_sve_get_vq (tid);
974c89e0
AH
611 unsigned long hwcap = linux_get_hwcap (8);
612 bool pauth_p = hwcap & AARCH64_HWCAP_PACA;
ee4fbcfa
AH
613
614 current_process ()->tdesc = aarch64_linux_read_description (vq, pauth_p);
fefa175e 615 }
3b53ae99 616 else
7cc17433 617 current_process ()->tdesc = aarch32_linux_read_description ();
176eb98c 618
af1b22f3 619 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
176eb98c
MS
620}
621
02895270
AH
622/* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
623
624static void
625aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
626{
627 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
628}
629
630/* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
631
632static void
633aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
634{
635 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
636}
637
3aee8918 638static struct regset_info aarch64_regsets[] =
176eb98c
MS
639{
640 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
641 sizeof (struct user_pt_regs), GENERAL_REGS,
642 aarch64_fill_gregset, aarch64_store_gregset },
643 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
644 sizeof (struct user_fpsimd_state), FP_REGS,
645 aarch64_fill_fpregset, aarch64_store_fpregset
646 },
1ef53e6b
AH
647 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
648 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
649 NULL, aarch64_store_pauthregset },
50bc912a 650 NULL_REGSET
176eb98c
MS
651};
652
3aee8918
PA
653static struct regsets_info aarch64_regsets_info =
654 {
655 aarch64_regsets, /* regsets */
656 0, /* num_regsets */
657 NULL, /* disabled_regsets */
658 };
659
3b53ae99 660static struct regs_info regs_info_aarch64 =
3aee8918
PA
661 {
662 NULL, /* regset_bitmap */
c2d65f38 663 NULL, /* usrregs */
3aee8918
PA
664 &aarch64_regsets_info,
665 };
666
02895270
AH
667static struct regset_info aarch64_sve_regsets[] =
668{
669 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
670 sizeof (struct user_pt_regs), GENERAL_REGS,
671 aarch64_fill_gregset, aarch64_store_gregset },
672 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
673 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
674 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
675 },
1ef53e6b
AH
676 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
677 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
678 NULL, aarch64_store_pauthregset },
02895270
AH
679 NULL_REGSET
680};
681
682static struct regsets_info aarch64_sve_regsets_info =
683 {
684 aarch64_sve_regsets, /* regsets. */
685 0, /* num_regsets. */
686 NULL, /* disabled_regsets. */
687 };
688
689static struct regs_info regs_info_aarch64_sve =
690 {
691 NULL, /* regset_bitmap. */
692 NULL, /* usrregs. */
693 &aarch64_sve_regsets_info,
694 };
695
aa8d21c9 696/* Implementation of linux target ops method "get_regs_info". */
421530db 697
aa8d21c9
TBA
698const regs_info *
699aarch64_target::get_regs_info ()
3aee8918 700{
02895270 701 if (!is_64bit_tdesc ())
3b53ae99 702 return &regs_info_aarch32;
02895270
AH
703
704 if (is_sve_tdesc ())
705 return &regs_info_aarch64_sve;
706
707 return &regs_info_aarch64;
3aee8918
PA
708}
709
7671bf47
PL
710/* Implementation of linux_target_ops method "supports_tracepoints". */
711
712static int
713aarch64_supports_tracepoints (void)
714{
524b57e6
YQ
715 if (current_thread == NULL)
716 return 1;
717 else
718 {
719 /* We don't support tracepoints on aarch32 now. */
720 return is_64bit_tdesc ();
721 }
7671bf47
PL
722}
723
bb903df0
PL
724/* Implementation of linux_target_ops method "get_thread_area". */
725
726static int
727aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
728{
729 struct iovec iovec;
730 uint64_t reg;
731
732 iovec.iov_base = &reg;
733 iovec.iov_len = sizeof (reg);
734
735 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
736 return -1;
737
738 *addrp = reg;
739
740 return 0;
741}
742
061fc021
YQ
743/* Implementation of linux_target_ops method "get_syscall_trapinfo". */
744
745static void
746aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
747{
748 int use_64bit = register_size (regcache->tdesc, 0) == 8;
749
750 if (use_64bit)
751 {
752 long l_sysno;
753
754 collect_register_by_name (regcache, "x8", &l_sysno);
755 *sysno = (int) l_sysno;
756 }
757 else
758 collect_register_by_name (regcache, "r7", sysno);
759}
760
afbe19f8
PL
761/* List of condition codes that we need. */
762
763enum aarch64_condition_codes
764{
765 EQ = 0x0,
766 NE = 0x1,
767 LO = 0x3,
768 GE = 0xa,
769 LT = 0xb,
770 GT = 0xc,
771 LE = 0xd,
bb903df0
PL
772};
773
6c1c9a8b
YQ
774enum aarch64_operand_type
775{
776 OPERAND_IMMEDIATE,
777 OPERAND_REGISTER,
778};
779
bb903df0
PL
780/* Representation of an operand. At this time, it only supports register
781 and immediate types. */
782
783struct aarch64_operand
784{
785 /* Type of the operand. */
6c1c9a8b
YQ
786 enum aarch64_operand_type type;
787
bb903df0
PL
788 /* Value of the operand according to the type. */
789 union
790 {
791 uint32_t imm;
792 struct aarch64_register reg;
793 };
794};
795
796/* List of registers that we are currently using, we can add more here as
797 we need to use them. */
798
799/* General purpose scratch registers (64 bit). */
800static const struct aarch64_register x0 = { 0, 1 };
801static const struct aarch64_register x1 = { 1, 1 };
802static const struct aarch64_register x2 = { 2, 1 };
803static const struct aarch64_register x3 = { 3, 1 };
804static const struct aarch64_register x4 = { 4, 1 };
805
806/* General purpose scratch registers (32 bit). */
afbe19f8 807static const struct aarch64_register w0 = { 0, 0 };
bb903df0
PL
808static const struct aarch64_register w2 = { 2, 0 };
809
810/* Intra-procedure scratch registers. */
811static const struct aarch64_register ip0 = { 16, 1 };
812
813/* Special purpose registers. */
afbe19f8
PL
814static const struct aarch64_register fp = { 29, 1 };
815static const struct aarch64_register lr = { 30, 1 };
bb903df0
PL
816static const struct aarch64_register sp = { 31, 1 };
817static const struct aarch64_register xzr = { 31, 1 };
818
819/* Dynamically allocate a new register. If we know the register
820 statically, we should make it a global as above instead of using this
821 helper function. */
822
823static struct aarch64_register
824aarch64_register (unsigned num, int is64)
825{
826 return (struct aarch64_register) { num, is64 };
827}
828
829/* Helper function to create a register operand, for instructions with
830 different types of operands.
831
832 For example:
833 p += emit_mov (p, x0, register_operand (x1)); */
834
835static struct aarch64_operand
836register_operand (struct aarch64_register reg)
837{
838 struct aarch64_operand operand;
839
840 operand.type = OPERAND_REGISTER;
841 operand.reg = reg;
842
843 return operand;
844}
845
846/* Helper function to create an immediate operand, for instructions with
847 different types of operands.
848
849 For example:
850 p += emit_mov (p, x0, immediate_operand (12)); */
851
852static struct aarch64_operand
853immediate_operand (uint32_t imm)
854{
855 struct aarch64_operand operand;
856
857 operand.type = OPERAND_IMMEDIATE;
858 operand.imm = imm;
859
860 return operand;
861}
862
bb903df0
PL
863/* Helper function to create an offset memory operand.
864
865 For example:
866 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
867
868static struct aarch64_memory_operand
869offset_memory_operand (int32_t offset)
870{
871 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
872}
873
874/* Helper function to create a pre-index memory operand.
875
876 For example:
877 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
878
879static struct aarch64_memory_operand
880preindex_memory_operand (int32_t index)
881{
882 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
883}
884
afbe19f8
PL
885/* Helper function to create a post-index memory operand.
886
887 For example:
888 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
889
890static struct aarch64_memory_operand
891postindex_memory_operand (int32_t index)
892{
893 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
894}
895
bb903df0
PL
896/* System control registers. These special registers can be written and
897 read with the MRS and MSR instructions.
898
899 - NZCV: Condition flags. GDB refers to this register under the CPSR
900 name.
901 - FPSR: Floating-point status register.
902 - FPCR: Floating-point control registers.
903 - TPIDR_EL0: Software thread ID register. */
904
905enum aarch64_system_control_registers
906{
907 /* op0 op1 crn crm op2 */
908 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
909 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
910 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
911 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
912};
913
bb903df0
PL
914/* Write a BLR instruction into *BUF.
915
916 BLR rn
917
918 RN is the register to branch to. */
919
920static int
921emit_blr (uint32_t *buf, struct aarch64_register rn)
922{
e1c587c3 923 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
bb903df0
PL
924}
925
afbe19f8 926/* Write a RET instruction into *BUF.
bb903df0 927
afbe19f8 928 RET xn
bb903df0 929
afbe19f8 930 RN is the register to branch to. */
bb903df0
PL
931
932static int
afbe19f8
PL
933emit_ret (uint32_t *buf, struct aarch64_register rn)
934{
e1c587c3 935 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
afbe19f8
PL
936}
937
938static int
939emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
940 struct aarch64_register rt,
941 struct aarch64_register rt2,
942 struct aarch64_register rn,
943 struct aarch64_memory_operand operand)
bb903df0
PL
944{
945 uint32_t opc;
946 uint32_t pre_index;
947 uint32_t write_back;
948
949 if (rt.is64)
950 opc = ENCODE (2, 2, 30);
951 else
952 opc = ENCODE (0, 2, 30);
953
954 switch (operand.type)
955 {
956 case MEMORY_OPERAND_OFFSET:
957 {
958 pre_index = ENCODE (1, 1, 24);
959 write_back = ENCODE (0, 1, 23);
960 break;
961 }
afbe19f8
PL
962 case MEMORY_OPERAND_POSTINDEX:
963 {
964 pre_index = ENCODE (0, 1, 24);
965 write_back = ENCODE (1, 1, 23);
966 break;
967 }
bb903df0
PL
968 case MEMORY_OPERAND_PREINDEX:
969 {
970 pre_index = ENCODE (1, 1, 24);
971 write_back = ENCODE (1, 1, 23);
972 break;
973 }
974 default:
975 return 0;
976 }
977
e1c587c3
YQ
978 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
979 | ENCODE (operand.index >> 3, 7, 15)
980 | ENCODE (rt2.num, 5, 10)
981 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
982}
983
afbe19f8
PL
984/* Write a STP instruction into *BUF.
985
986 STP rt, rt2, [rn, #offset]
987 STP rt, rt2, [rn, #index]!
988 STP rt, rt2, [rn], #index
989
990 RT and RT2 are the registers to store.
991 RN is the base address register.
992 OFFSET is the immediate to add to the base address. It is limited to a
993 -512 .. 504 range (7 bits << 3). */
994
995static int
996emit_stp (uint32_t *buf, struct aarch64_register rt,
997 struct aarch64_register rt2, struct aarch64_register rn,
998 struct aarch64_memory_operand operand)
999{
1000 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
1001}
1002
1003/* Write a LDP instruction into *BUF.
1004
1005 LDP rt, rt2, [rn, #offset]
1006 LDP rt, rt2, [rn, #index]!
1007 LDP rt, rt2, [rn], #index
1008
1009 RT and RT2 are the registers to store.
1010 RN is the base address register.
1011 OFFSET is the immediate to add to the base address. It is limited to a
1012 -512 .. 504 range (7 bits << 3). */
1013
1014static int
1015emit_ldp (uint32_t *buf, struct aarch64_register rt,
1016 struct aarch64_register rt2, struct aarch64_register rn,
1017 struct aarch64_memory_operand operand)
1018{
1019 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
1020}
1021
bb903df0
PL
1022/* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
1023
1024 LDP qt, qt2, [rn, #offset]
1025
1026 RT and RT2 are the Q registers to store.
1027 RN is the base address register.
1028 OFFSET is the immediate to add to the base address. It is limited to
1029 -1024 .. 1008 range (7 bits << 4). */
1030
1031static int
1032emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1033 struct aarch64_register rn, int32_t offset)
1034{
1035 uint32_t opc = ENCODE (2, 2, 30);
1036 uint32_t pre_index = ENCODE (1, 1, 24);
1037
e1c587c3
YQ
1038 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
1039 | ENCODE (offset >> 4, 7, 15)
1040 | ENCODE (rt2, 5, 10)
1041 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1042}
1043
1044/* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1045
1046 STP qt, qt2, [rn, #offset]
1047
1048 RT and RT2 are the Q registers to store.
1049 RN is the base address register.
1050 OFFSET is the immediate to add to the base address. It is limited to
1051 -1024 .. 1008 range (7 bits << 4). */
1052
1053static int
1054emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1055 struct aarch64_register rn, int32_t offset)
1056{
1057 uint32_t opc = ENCODE (2, 2, 30);
1058 uint32_t pre_index = ENCODE (1, 1, 24);
1059
e1c587c3 1060 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
b6542f81
YQ
1061 | ENCODE (offset >> 4, 7, 15)
1062 | ENCODE (rt2, 5, 10)
1063 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1064}
1065
afbe19f8
PL
1066/* Write a LDRH instruction into *BUF.
1067
1068 LDRH wt, [xn, #offset]
1069 LDRH wt, [xn, #index]!
1070 LDRH wt, [xn], #index
1071
1072 RT is the register to store.
1073 RN is the base address register.
1074 OFFSET is the immediate to add to the base address. It is limited to
1075 0 .. 32760 range (12 bits << 3). */
1076
1077static int
1078emit_ldrh (uint32_t *buf, struct aarch64_register rt,
1079 struct aarch64_register rn,
1080 struct aarch64_memory_operand operand)
1081{
1c2e1515 1082 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
afbe19f8
PL
1083}
1084
1085/* Write a LDRB instruction into *BUF.
1086
1087 LDRB wt, [xn, #offset]
1088 LDRB wt, [xn, #index]!
1089 LDRB wt, [xn], #index
1090
1091 RT is the register to store.
1092 RN is the base address register.
1093 OFFSET is the immediate to add to the base address. It is limited to
1094 0 .. 32760 range (12 bits << 3). */
1095
1096static int
1097emit_ldrb (uint32_t *buf, struct aarch64_register rt,
1098 struct aarch64_register rn,
1099 struct aarch64_memory_operand operand)
1100{
1c2e1515 1101 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
afbe19f8
PL
1102}
1103
bb903df0 1104
bb903df0
PL
1105
1106/* Write a STR instruction into *BUF.
1107
1108 STR rt, [rn, #offset]
1109 STR rt, [rn, #index]!
afbe19f8 1110 STR rt, [rn], #index
bb903df0
PL
1111
1112 RT is the register to store.
1113 RN is the base address register.
1114 OFFSET is the immediate to add to the base address. It is limited to
1115 0 .. 32760 range (12 bits << 3). */
1116
1117static int
1118emit_str (uint32_t *buf, struct aarch64_register rt,
1119 struct aarch64_register rn,
1120 struct aarch64_memory_operand operand)
1121{
1c2e1515 1122 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
bb903df0
PL
1123}
1124
1125/* Helper function emitting an exclusive load or store instruction. */
1126
1127static int
1128emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1129 enum aarch64_opcodes opcode,
1130 struct aarch64_register rs,
1131 struct aarch64_register rt,
1132 struct aarch64_register rt2,
1133 struct aarch64_register rn)
1134{
e1c587c3
YQ
1135 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1136 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1137 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
1138}
1139
1140/* Write a LAXR instruction into *BUF.
1141
1142 LDAXR rt, [xn]
1143
1144 RT is the destination register.
1145 RN is the base address register. */
1146
1147static int
1148emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1149 struct aarch64_register rn)
1150{
1151 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1152 xzr, rn);
1153}
1154
1155/* Write a STXR instruction into *BUF.
1156
1157 STXR ws, rt, [xn]
1158
1159 RS is the result register, it indicates if the store succeeded or not.
1160 RT is the destination register.
1161 RN is the base address register. */
1162
1163static int
1164emit_stxr (uint32_t *buf, struct aarch64_register rs,
1165 struct aarch64_register rt, struct aarch64_register rn)
1166{
1167 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1168 xzr, rn);
1169}
1170
1171/* Write a STLR instruction into *BUF.
1172
1173 STLR rt, [xn]
1174
1175 RT is the register to store.
1176 RN is the base address register. */
1177
1178static int
1179emit_stlr (uint32_t *buf, struct aarch64_register rt,
1180 struct aarch64_register rn)
1181{
1182 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1183 xzr, rn);
1184}
1185
1186/* Helper function for data processing instructions with register sources. */
1187
1188static int
231c0592 1189emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
bb903df0
PL
1190 struct aarch64_register rd,
1191 struct aarch64_register rn,
1192 struct aarch64_register rm)
1193{
1194 uint32_t size = ENCODE (rd.is64, 1, 31);
1195
e1c587c3
YQ
1196 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1197 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1198}
1199
1200/* Helper function for data processing instructions taking either a register
1201 or an immediate. */
1202
1203static int
1204emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1205 struct aarch64_register rd,
1206 struct aarch64_register rn,
1207 struct aarch64_operand operand)
1208{
1209 uint32_t size = ENCODE (rd.is64, 1, 31);
1210 /* The opcode is different for register and immediate source operands. */
1211 uint32_t operand_opcode;
1212
1213 if (operand.type == OPERAND_IMMEDIATE)
1214 {
1215 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1216 operand_opcode = ENCODE (8, 4, 25);
1217
e1c587c3
YQ
1218 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1219 | ENCODE (operand.imm, 12, 10)
1220 | ENCODE (rn.num, 5, 5)
1221 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1222 }
1223 else
1224 {
1225 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1226 operand_opcode = ENCODE (5, 4, 25);
1227
1228 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1229 rn, operand.reg);
1230 }
1231}
1232
1233/* Write an ADD instruction into *BUF.
1234
1235 ADD rd, rn, #imm
1236 ADD rd, rn, rm
1237
1238 This function handles both an immediate and register add.
1239
1240 RD is the destination register.
1241 RN is the input register.
1242 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1243 OPERAND_REGISTER. */
1244
1245static int
1246emit_add (uint32_t *buf, struct aarch64_register rd,
1247 struct aarch64_register rn, struct aarch64_operand operand)
1248{
1249 return emit_data_processing (buf, ADD, rd, rn, operand);
1250}
1251
1252/* Write a SUB instruction into *BUF.
1253
1254 SUB rd, rn, #imm
1255 SUB rd, rn, rm
1256
1257 This function handles both an immediate and register sub.
1258
1259 RD is the destination register.
1260 RN is the input register.
1261 IMM is the immediate to substract to RN. */
1262
1263static int
1264emit_sub (uint32_t *buf, struct aarch64_register rd,
1265 struct aarch64_register rn, struct aarch64_operand operand)
1266{
1267 return emit_data_processing (buf, SUB, rd, rn, operand);
1268}
1269
1270/* Write a MOV instruction into *BUF.
1271
1272 MOV rd, #imm
1273 MOV rd, rm
1274
1275 This function handles both a wide immediate move and a register move,
1276 with the condition that the source register is not xzr. xzr and the
1277 stack pointer share the same encoding and this function only supports
1278 the stack pointer.
1279
1280 RD is the destination register.
1281 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1282 OPERAND_REGISTER. */
1283
1284static int
1285emit_mov (uint32_t *buf, struct aarch64_register rd,
1286 struct aarch64_operand operand)
1287{
1288 if (operand.type == OPERAND_IMMEDIATE)
1289 {
1290 uint32_t size = ENCODE (rd.is64, 1, 31);
1291 /* Do not shift the immediate. */
1292 uint32_t shift = ENCODE (0, 2, 21);
1293
e1c587c3
YQ
1294 return aarch64_emit_insn (buf, MOV | size | shift
1295 | ENCODE (operand.imm, 16, 5)
1296 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1297 }
1298 else
1299 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1300}
1301
1302/* Write a MOVK instruction into *BUF.
1303
1304 MOVK rd, #imm, lsl #shift
1305
1306 RD is the destination register.
1307 IMM is the immediate.
1308 SHIFT is the logical shift left to apply to IMM. */
1309
1310static int
7781c06f
YQ
1311emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1312 unsigned shift)
bb903df0
PL
1313{
1314 uint32_t size = ENCODE (rd.is64, 1, 31);
1315
e1c587c3
YQ
1316 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1317 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1318}
1319
1320/* Write instructions into *BUF in order to move ADDR into a register.
1321 ADDR can be a 64-bit value.
1322
1323 This function will emit a series of MOV and MOVK instructions, such as:
1324
1325 MOV xd, #(addr)
1326 MOVK xd, #(addr >> 16), lsl #16
1327 MOVK xd, #(addr >> 32), lsl #32
1328 MOVK xd, #(addr >> 48), lsl #48 */
1329
1330static int
1331emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1332{
1333 uint32_t *p = buf;
1334
1335 /* The MOV (wide immediate) instruction clears to top bits of the
1336 register. */
1337 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1338
1339 if ((addr >> 16) != 0)
1340 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1341 else
1342 return p - buf;
1343
1344 if ((addr >> 32) != 0)
1345 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1346 else
1347 return p - buf;
1348
1349 if ((addr >> 48) != 0)
1350 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1351
1352 return p - buf;
1353}
1354
afbe19f8
PL
1355/* Write a SUBS instruction into *BUF.
1356
1357 SUBS rd, rn, rm
1358
1359 This instruction update the condition flags.
1360
1361 RD is the destination register.
1362 RN and RM are the source registers. */
1363
1364static int
1365emit_subs (uint32_t *buf, struct aarch64_register rd,
1366 struct aarch64_register rn, struct aarch64_operand operand)
1367{
1368 return emit_data_processing (buf, SUBS, rd, rn, operand);
1369}
1370
1371/* Write a CMP instruction into *BUF.
1372
1373 CMP rn, rm
1374
1375 This instruction is an alias of SUBS xzr, rn, rm.
1376
1377 RN and RM are the registers to compare. */
1378
1379static int
1380emit_cmp (uint32_t *buf, struct aarch64_register rn,
1381 struct aarch64_operand operand)
1382{
1383 return emit_subs (buf, xzr, rn, operand);
1384}
1385
1386/* Write a AND instruction into *BUF.
1387
1388 AND rd, rn, rm
1389
1390 RD is the destination register.
1391 RN and RM are the source registers. */
1392
1393static int
1394emit_and (uint32_t *buf, struct aarch64_register rd,
1395 struct aarch64_register rn, struct aarch64_register rm)
1396{
1397 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1398}
1399
1400/* Write a ORR instruction into *BUF.
1401
1402 ORR rd, rn, rm
1403
1404 RD is the destination register.
1405 RN and RM are the source registers. */
1406
1407static int
1408emit_orr (uint32_t *buf, struct aarch64_register rd,
1409 struct aarch64_register rn, struct aarch64_register rm)
1410{
1411 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1412}
1413
1414/* Write a ORN instruction into *BUF.
1415
1416 ORN rd, rn, rm
1417
1418 RD is the destination register.
1419 RN and RM are the source registers. */
1420
1421static int
1422emit_orn (uint32_t *buf, struct aarch64_register rd,
1423 struct aarch64_register rn, struct aarch64_register rm)
1424{
1425 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1426}
1427
1428/* Write a EOR instruction into *BUF.
1429
1430 EOR rd, rn, rm
1431
1432 RD is the destination register.
1433 RN and RM are the source registers. */
1434
1435static int
1436emit_eor (uint32_t *buf, struct aarch64_register rd,
1437 struct aarch64_register rn, struct aarch64_register rm)
1438{
1439 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1440}
1441
1442/* Write a MVN instruction into *BUF.
1443
1444 MVN rd, rm
1445
1446 This is an alias for ORN rd, xzr, rm.
1447
1448 RD is the destination register.
1449 RM is the source register. */
1450
1451static int
1452emit_mvn (uint32_t *buf, struct aarch64_register rd,
1453 struct aarch64_register rm)
1454{
1455 return emit_orn (buf, rd, xzr, rm);
1456}
1457
1458/* Write a LSLV instruction into *BUF.
1459
1460 LSLV rd, rn, rm
1461
1462 RD is the destination register.
1463 RN and RM are the source registers. */
1464
1465static int
1466emit_lslv (uint32_t *buf, struct aarch64_register rd,
1467 struct aarch64_register rn, struct aarch64_register rm)
1468{
1469 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1470}
1471
1472/* Write a LSRV instruction into *BUF.
1473
1474 LSRV rd, rn, rm
1475
1476 RD is the destination register.
1477 RN and RM are the source registers. */
1478
1479static int
1480emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1481 struct aarch64_register rn, struct aarch64_register rm)
1482{
1483 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1484}
1485
1486/* Write a ASRV instruction into *BUF.
1487
1488 ASRV rd, rn, rm
1489
1490 RD is the destination register.
1491 RN and RM are the source registers. */
1492
1493static int
1494emit_asrv (uint32_t *buf, struct aarch64_register rd,
1495 struct aarch64_register rn, struct aarch64_register rm)
1496{
1497 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1498}
1499
1500/* Write a MUL instruction into *BUF.
1501
1502 MUL rd, rn, rm
1503
1504 RD is the destination register.
1505 RN and RM are the source registers. */
1506
1507static int
1508emit_mul (uint32_t *buf, struct aarch64_register rd,
1509 struct aarch64_register rn, struct aarch64_register rm)
1510{
1511 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1512}
1513
bb903df0
PL
1514/* Write a MRS instruction into *BUF. The register size is 64-bit.
1515
1516 MRS xt, system_reg
1517
1518 RT is the destination register.
1519 SYSTEM_REG is special purpose register to read. */
1520
1521static int
1522emit_mrs (uint32_t *buf, struct aarch64_register rt,
1523 enum aarch64_system_control_registers system_reg)
1524{
e1c587c3
YQ
1525 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1526 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1527}
1528
1529/* Write a MSR instruction into *BUF. The register size is 64-bit.
1530
1531 MSR system_reg, xt
1532
1533 SYSTEM_REG is special purpose register to write.
1534 RT is the input register. */
1535
1536static int
1537emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1538 struct aarch64_register rt)
1539{
e1c587c3
YQ
1540 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1541 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1542}
1543
1544/* Write a SEVL instruction into *BUF.
1545
1546 This is a hint instruction telling the hardware to trigger an event. */
1547
1548static int
1549emit_sevl (uint32_t *buf)
1550{
e1c587c3 1551 return aarch64_emit_insn (buf, SEVL);
bb903df0
PL
1552}
1553
1554/* Write a WFE instruction into *BUF.
1555
1556 This is a hint instruction telling the hardware to wait for an event. */
1557
1558static int
1559emit_wfe (uint32_t *buf)
1560{
e1c587c3 1561 return aarch64_emit_insn (buf, WFE);
bb903df0
PL
1562}
1563
afbe19f8
PL
1564/* Write a SBFM instruction into *BUF.
1565
1566 SBFM rd, rn, #immr, #imms
1567
1568 This instruction moves the bits from #immr to #imms into the
1569 destination, sign extending the result.
1570
1571 RD is the destination register.
1572 RN is the source register.
1573 IMMR is the bit number to start at (least significant bit).
1574 IMMS is the bit number to stop at (most significant bit). */
1575
1576static int
1577emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1578 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1579{
1580 uint32_t size = ENCODE (rd.is64, 1, 31);
1581 uint32_t n = ENCODE (rd.is64, 1, 22);
1582
e1c587c3
YQ
1583 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1584 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1585 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1586}
1587
1588/* Write a SBFX instruction into *BUF.
1589
1590 SBFX rd, rn, #lsb, #width
1591
1592 This instruction moves #width bits from #lsb into the destination, sign
1593 extending the result. This is an alias for:
1594
1595 SBFM rd, rn, #lsb, #(lsb + width - 1)
1596
1597 RD is the destination register.
1598 RN is the source register.
1599 LSB is the bit number to start at (least significant bit).
1600 WIDTH is the number of bits to move. */
1601
1602static int
1603emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1604 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1605{
1606 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1607}
1608
1609/* Write a UBFM instruction into *BUF.
1610
1611 UBFM rd, rn, #immr, #imms
1612
1613 This instruction moves the bits from #immr to #imms into the
1614 destination, extending the result with zeros.
1615
1616 RD is the destination register.
1617 RN is the source register.
1618 IMMR is the bit number to start at (least significant bit).
1619 IMMS is the bit number to stop at (most significant bit). */
1620
1621static int
1622emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1623 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1624{
1625 uint32_t size = ENCODE (rd.is64, 1, 31);
1626 uint32_t n = ENCODE (rd.is64, 1, 22);
1627
e1c587c3
YQ
1628 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1629 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1630 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1631}
1632
1633/* Write a UBFX instruction into *BUF.
1634
1635 UBFX rd, rn, #lsb, #width
1636
1637 This instruction moves #width bits from #lsb into the destination,
1638 extending the result with zeros. This is an alias for:
1639
1640 UBFM rd, rn, #lsb, #(lsb + width - 1)
1641
1642 RD is the destination register.
1643 RN is the source register.
1644 LSB is the bit number to start at (least significant bit).
1645 WIDTH is the number of bits to move. */
1646
1647static int
1648emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1649 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1650{
1651 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1652}
1653
1654/* Write a CSINC instruction into *BUF.
1655
1656 CSINC rd, rn, rm, cond
1657
1658 This instruction conditionally increments rn or rm and places the result
1659 in rd. rn is chosen is the condition is true.
1660
1661 RD is the destination register.
1662 RN and RM are the source registers.
1663 COND is the encoded condition. */
1664
1665static int
1666emit_csinc (uint32_t *buf, struct aarch64_register rd,
1667 struct aarch64_register rn, struct aarch64_register rm,
1668 unsigned cond)
1669{
1670 uint32_t size = ENCODE (rd.is64, 1, 31);
1671
e1c587c3
YQ
1672 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1673 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1674 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1675}
1676
1677/* Write a CSET instruction into *BUF.
1678
1679 CSET rd, cond
1680
1681 This instruction conditionally write 1 or 0 in the destination register.
1682 1 is written if the condition is true. This is an alias for:
1683
1684 CSINC rd, xzr, xzr, !cond
1685
1686 Note that the condition needs to be inverted.
1687
1688 RD is the destination register.
1689 RN and RM are the source registers.
1690 COND is the encoded condition. */
1691
1692static int
1693emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1694{
1695 /* The least significant bit of the condition needs toggling in order to
1696 invert it. */
1697 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1698}
1699
bb903df0
PL
1700/* Write LEN instructions from BUF into the inferior memory at *TO.
1701
1702 Note instructions are always little endian on AArch64, unlike data. */
1703
1704static void
1705append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1706{
1707 size_t byte_len = len * sizeof (uint32_t);
1708#if (__BYTE_ORDER == __BIG_ENDIAN)
cb93dc7f 1709 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
bb903df0
PL
1710 size_t i;
1711
1712 for (i = 0; i < len; i++)
1713 le_buf[i] = htole32 (buf[i]);
1714
4196ab2a 1715 target_write_memory (*to, (const unsigned char *) le_buf, byte_len);
bb903df0
PL
1716
1717 xfree (le_buf);
1718#else
4196ab2a 1719 target_write_memory (*to, (const unsigned char *) buf, byte_len);
bb903df0
PL
1720#endif
1721
1722 *to += byte_len;
1723}
1724
0badd99f
YQ
1725/* Sub-class of struct aarch64_insn_data, store information of
1726 instruction relocation for fast tracepoint. Visitor can
1727 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1728 the relocated instructions in buffer pointed by INSN_PTR. */
bb903df0 1729
0badd99f
YQ
1730struct aarch64_insn_relocation_data
1731{
1732 struct aarch64_insn_data base;
1733
1734 /* The new address the instruction is relocated to. */
1735 CORE_ADDR new_addr;
1736 /* Pointer to the buffer of relocated instruction(s). */
1737 uint32_t *insn_ptr;
1738};
1739
1740/* Implementation of aarch64_insn_visitor method "b". */
1741
1742static void
1743aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1744 struct aarch64_insn_data *data)
1745{
1746 struct aarch64_insn_relocation_data *insn_reloc
1747 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1748 int64_t new_offset
0badd99f
YQ
1749 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1750
1751 if (can_encode_int32 (new_offset, 28))
1752 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1753}
1754
1755/* Implementation of aarch64_insn_visitor method "b_cond". */
1756
1757static void
1758aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1759 struct aarch64_insn_data *data)
1760{
1761 struct aarch64_insn_relocation_data *insn_reloc
1762 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1763 int64_t new_offset
0badd99f
YQ
1764 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1765
1766 if (can_encode_int32 (new_offset, 21))
1767 {
1768 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1769 new_offset);
bb903df0 1770 }
0badd99f 1771 else if (can_encode_int32 (new_offset, 28))
bb903df0 1772 {
0badd99f
YQ
1773 /* The offset is out of range for a conditional branch
1774 instruction but not for a unconditional branch. We can use
1775 the following instructions instead:
bb903df0 1776
0badd99f
YQ
1777 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1778 B NOT_TAKEN ; Else jump over TAKEN and continue.
1779 TAKEN:
1780 B #(offset - 8)
1781 NOT_TAKEN:
1782
1783 */
1784
1785 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1786 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1787 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
bb903df0 1788 }
0badd99f 1789}
bb903df0 1790
0badd99f
YQ
1791/* Implementation of aarch64_insn_visitor method "cb". */
1792
1793static void
1794aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1795 const unsigned rn, int is64,
1796 struct aarch64_insn_data *data)
1797{
1798 struct aarch64_insn_relocation_data *insn_reloc
1799 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1800 int64_t new_offset
0badd99f
YQ
1801 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1802
1803 if (can_encode_int32 (new_offset, 21))
1804 {
1805 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1806 aarch64_register (rn, is64), new_offset);
bb903df0 1807 }
0badd99f 1808 else if (can_encode_int32 (new_offset, 28))
bb903df0 1809 {
0badd99f
YQ
1810 /* The offset is out of range for a compare and branch
1811 instruction but not for a unconditional branch. We can use
1812 the following instructions instead:
1813
1814 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1815 B NOT_TAKEN ; Else jump over TAKEN and continue.
1816 TAKEN:
1817 B #(offset - 8)
1818 NOT_TAKEN:
1819
1820 */
1821 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1822 aarch64_register (rn, is64), 8);
1823 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1824 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1825 }
1826}
bb903df0 1827
0badd99f 1828/* Implementation of aarch64_insn_visitor method "tb". */
bb903df0 1829
0badd99f
YQ
1830static void
1831aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1832 const unsigned rt, unsigned bit,
1833 struct aarch64_insn_data *data)
1834{
1835 struct aarch64_insn_relocation_data *insn_reloc
1836 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1837 int64_t new_offset
0badd99f
YQ
1838 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1839
1840 if (can_encode_int32 (new_offset, 16))
1841 {
1842 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1843 aarch64_register (rt, 1), new_offset);
bb903df0 1844 }
0badd99f 1845 else if (can_encode_int32 (new_offset, 28))
bb903df0 1846 {
0badd99f
YQ
1847 /* The offset is out of range for a test bit and branch
1848 instruction but not for a unconditional branch. We can use
1849 the following instructions instead:
1850
1851 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1852 B NOT_TAKEN ; Else jump over TAKEN and continue.
1853 TAKEN:
1854 B #(offset - 8)
1855 NOT_TAKEN:
1856
1857 */
1858 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1859 aarch64_register (rt, 1), 8);
1860 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1861 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1862 new_offset - 8);
1863 }
1864}
bb903df0 1865
0badd99f 1866/* Implementation of aarch64_insn_visitor method "adr". */
bb903df0 1867
0badd99f
YQ
1868static void
1869aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1870 const int is_adrp,
1871 struct aarch64_insn_data *data)
1872{
1873 struct aarch64_insn_relocation_data *insn_reloc
1874 = (struct aarch64_insn_relocation_data *) data;
1875 /* We know exactly the address the ADR{P,} instruction will compute.
1876 We can just write it to the destination register. */
1877 CORE_ADDR address = data->insn_addr + offset;
bb903df0 1878
0badd99f
YQ
1879 if (is_adrp)
1880 {
1881 /* Clear the lower 12 bits of the offset to get the 4K page. */
1882 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1883 aarch64_register (rd, 1),
1884 address & ~0xfff);
1885 }
1886 else
1887 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1888 aarch64_register (rd, 1), address);
1889}
bb903df0 1890
0badd99f 1891/* Implementation of aarch64_insn_visitor method "ldr_literal". */
bb903df0 1892
0badd99f
YQ
1893static void
1894aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1895 const unsigned rt, const int is64,
1896 struct aarch64_insn_data *data)
1897{
1898 struct aarch64_insn_relocation_data *insn_reloc
1899 = (struct aarch64_insn_relocation_data *) data;
1900 CORE_ADDR address = data->insn_addr + offset;
1901
1902 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1903 aarch64_register (rt, 1), address);
1904
1905 /* We know exactly what address to load from, and what register we
1906 can use:
1907
1908 MOV xd, #(oldloc + offset)
1909 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1910 ...
1911
1912 LDR xd, [xd] ; or LDRSW xd, [xd]
1913
1914 */
1915
1916 if (is_sw)
1917 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1918 aarch64_register (rt, 1),
1919 aarch64_register (rt, 1),
1920 offset_memory_operand (0));
bb903df0 1921 else
0badd99f
YQ
1922 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1923 aarch64_register (rt, is64),
1924 aarch64_register (rt, 1),
1925 offset_memory_operand (0));
1926}
1927
1928/* Implementation of aarch64_insn_visitor method "others". */
1929
1930static void
1931aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1932 struct aarch64_insn_data *data)
1933{
1934 struct aarch64_insn_relocation_data *insn_reloc
1935 = (struct aarch64_insn_relocation_data *) data;
bb903df0 1936
0badd99f
YQ
1937 /* The instruction is not PC relative. Just re-emit it at the new
1938 location. */
e1c587c3 1939 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
0badd99f
YQ
1940}
1941
1942static const struct aarch64_insn_visitor visitor =
1943{
1944 aarch64_ftrace_insn_reloc_b,
1945 aarch64_ftrace_insn_reloc_b_cond,
1946 aarch64_ftrace_insn_reloc_cb,
1947 aarch64_ftrace_insn_reloc_tb,
1948 aarch64_ftrace_insn_reloc_adr,
1949 aarch64_ftrace_insn_reloc_ldr_literal,
1950 aarch64_ftrace_insn_reloc_others,
1951};
1952
bb903df0
PL
1953/* Implementation of linux_target_ops method
1954 "install_fast_tracepoint_jump_pad". */
1955
1956static int
1957aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1958 CORE_ADDR tpaddr,
1959 CORE_ADDR collector,
1960 CORE_ADDR lockaddr,
1961 ULONGEST orig_size,
1962 CORE_ADDR *jump_entry,
1963 CORE_ADDR *trampoline,
1964 ULONGEST *trampoline_size,
1965 unsigned char *jjump_pad_insn,
1966 ULONGEST *jjump_pad_insn_size,
1967 CORE_ADDR *adjusted_insn_addr,
1968 CORE_ADDR *adjusted_insn_addr_end,
1969 char *err)
1970{
1971 uint32_t buf[256];
1972 uint32_t *p = buf;
2ac09a5b 1973 int64_t offset;
bb903df0 1974 int i;
70b439f0 1975 uint32_t insn;
bb903df0 1976 CORE_ADDR buildaddr = *jump_entry;
0badd99f 1977 struct aarch64_insn_relocation_data insn_data;
bb903df0
PL
1978
1979 /* We need to save the current state on the stack both to restore it
1980 later and to collect register values when the tracepoint is hit.
1981
1982 The saved registers are pushed in a layout that needs to be in sync
1983 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1984 the supply_fast_tracepoint_registers function will fill in the
1985 register cache from a pointer to saved registers on the stack we build
1986 here.
1987
1988 For simplicity, we set the size of each cell on the stack to 16 bytes.
1989 This way one cell can hold any register type, from system registers
1990 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1991 has to be 16 bytes aligned anyway.
1992
1993 Note that the CPSR register does not exist on AArch64. Instead we
1994 can access system bits describing the process state with the
1995 MRS/MSR instructions, namely the condition flags. We save them as
1996 if they are part of a CPSR register because that's how GDB
1997 interprets these system bits. At the moment, only the condition
1998 flags are saved in CPSR (NZCV).
1999
2000 Stack layout, each cell is 16 bytes (descending):
2001
2002 High *-------- SIMD&FP registers from 31 down to 0. --------*
2003 | q31 |
2004 . .
2005 . . 32 cells
2006 . .
2007 | q0 |
2008 *---- General purpose registers from 30 down to 0. ----*
2009 | x30 |
2010 . .
2011 . . 31 cells
2012 . .
2013 | x0 |
2014 *------------- Special purpose registers. -------------*
2015 | SP |
2016 | PC |
2017 | CPSR (NZCV) | 5 cells
2018 | FPSR |
2019 | FPCR | <- SP + 16
2020 *------------- collecting_t object --------------------*
2021 | TPIDR_EL0 | struct tracepoint * |
2022 Low *------------------------------------------------------*
2023
2024 After this stack is set up, we issue a call to the collector, passing
2025 it the saved registers at (SP + 16). */
2026
2027 /* Push SIMD&FP registers on the stack:
2028
2029 SUB sp, sp, #(32 * 16)
2030
2031 STP q30, q31, [sp, #(30 * 16)]
2032 ...
2033 STP q0, q1, [sp]
2034
2035 */
2036 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
2037 for (i = 30; i >= 0; i -= 2)
2038 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
2039
30baf67b 2040 /* Push general purpose registers on the stack. Note that we do not need
bb903df0
PL
2041 to push x31 as it represents the xzr register and not the stack
2042 pointer in a STR instruction.
2043
2044 SUB sp, sp, #(31 * 16)
2045
2046 STR x30, [sp, #(30 * 16)]
2047 ...
2048 STR x0, [sp]
2049
2050 */
2051 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
2052 for (i = 30; i >= 0; i -= 1)
2053 p += emit_str (p, aarch64_register (i, 1), sp,
2054 offset_memory_operand (i * 16));
2055
2056 /* Make space for 5 more cells.
2057
2058 SUB sp, sp, #(5 * 16)
2059
2060 */
2061 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
2062
2063
2064 /* Save SP:
2065
2066 ADD x4, sp, #((32 + 31 + 5) * 16)
2067 STR x4, [sp, #(4 * 16)]
2068
2069 */
2070 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
2071 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
2072
2073 /* Save PC (tracepoint address):
2074
2075 MOV x3, #(tpaddr)
2076 ...
2077
2078 STR x3, [sp, #(3 * 16)]
2079
2080 */
2081
2082 p += emit_mov_addr (p, x3, tpaddr);
2083 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
2084
2085 /* Save CPSR (NZCV), FPSR and FPCR:
2086
2087 MRS x2, nzcv
2088 MRS x1, fpsr
2089 MRS x0, fpcr
2090
2091 STR x2, [sp, #(2 * 16)]
2092 STR x1, [sp, #(1 * 16)]
2093 STR x0, [sp, #(0 * 16)]
2094
2095 */
2096 p += emit_mrs (p, x2, NZCV);
2097 p += emit_mrs (p, x1, FPSR);
2098 p += emit_mrs (p, x0, FPCR);
2099 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2100 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2101 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2102
2103 /* Push the collecting_t object. It consist of the address of the
2104 tracepoint and an ID for the current thread. We get the latter by
2105 reading the tpidr_el0 system register. It corresponds to the
2106 NT_ARM_TLS register accessible with ptrace.
2107
2108 MOV x0, #(tpoint)
2109 ...
2110
2111 MRS x1, tpidr_el0
2112
2113 STP x0, x1, [sp, #-16]!
2114
2115 */
2116
2117 p += emit_mov_addr (p, x0, tpoint);
2118 p += emit_mrs (p, x1, TPIDR_EL0);
2119 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2120
2121 /* Spin-lock:
2122
2123 The shared memory for the lock is at lockaddr. It will hold zero
2124 if no-one is holding the lock, otherwise it contains the address of
2125 the collecting_t object on the stack of the thread which acquired it.
2126
2127 At this stage, the stack pointer points to this thread's collecting_t
2128 object.
2129
2130 We use the following registers:
2131 - x0: Address of the lock.
2132 - x1: Pointer to collecting_t object.
2133 - x2: Scratch register.
2134
2135 MOV x0, #(lockaddr)
2136 ...
2137 MOV x1, sp
2138
2139 ; Trigger an event local to this core. So the following WFE
2140 ; instruction is ignored.
2141 SEVL
2142 again:
2143 ; Wait for an event. The event is triggered by either the SEVL
2144 ; or STLR instructions (store release).
2145 WFE
2146
2147 ; Atomically read at lockaddr. This marks the memory location as
2148 ; exclusive. This instruction also has memory constraints which
2149 ; make sure all previous data reads and writes are done before
2150 ; executing it.
2151 LDAXR x2, [x0]
2152
2153 ; Try again if another thread holds the lock.
2154 CBNZ x2, again
2155
2156 ; We can lock it! Write the address of the collecting_t object.
2157 ; This instruction will fail if the memory location is not marked
2158 ; as exclusive anymore. If it succeeds, it will remove the
2159 ; exclusive mark on the memory location. This way, if another
2160 ; thread executes this instruction before us, we will fail and try
2161 ; all over again.
2162 STXR w2, x1, [x0]
2163 CBNZ w2, again
2164
2165 */
2166
2167 p += emit_mov_addr (p, x0, lockaddr);
2168 p += emit_mov (p, x1, register_operand (sp));
2169
2170 p += emit_sevl (p);
2171 p += emit_wfe (p);
2172 p += emit_ldaxr (p, x2, x0);
2173 p += emit_cb (p, 1, w2, -2 * 4);
2174 p += emit_stxr (p, w2, x1, x0);
2175 p += emit_cb (p, 1, x2, -4 * 4);
2176
2177 /* Call collector (struct tracepoint *, unsigned char *):
2178
2179 MOV x0, #(tpoint)
2180 ...
2181
2182 ; Saved registers start after the collecting_t object.
2183 ADD x1, sp, #16
2184
2185 ; We use an intra-procedure-call scratch register.
2186 MOV ip0, #(collector)
2187 ...
2188
2189 ; And call back to C!
2190 BLR ip0
2191
2192 */
2193
2194 p += emit_mov_addr (p, x0, tpoint);
2195 p += emit_add (p, x1, sp, immediate_operand (16));
2196
2197 p += emit_mov_addr (p, ip0, collector);
2198 p += emit_blr (p, ip0);
2199
2200 /* Release the lock.
2201
2202 MOV x0, #(lockaddr)
2203 ...
2204
2205 ; This instruction is a normal store with memory ordering
2206 ; constraints. Thanks to this we do not have to put a data
2207 ; barrier instruction to make sure all data read and writes are done
30baf67b 2208 ; before this instruction is executed. Furthermore, this instruction
bb903df0
PL
2209 ; will trigger an event, letting other threads know they can grab
2210 ; the lock.
2211 STLR xzr, [x0]
2212
2213 */
2214 p += emit_mov_addr (p, x0, lockaddr);
2215 p += emit_stlr (p, xzr, x0);
2216
2217 /* Free collecting_t object:
2218
2219 ADD sp, sp, #16
2220
2221 */
2222 p += emit_add (p, sp, sp, immediate_operand (16));
2223
2224 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2225 registers from the stack.
2226
2227 LDR x2, [sp, #(2 * 16)]
2228 LDR x1, [sp, #(1 * 16)]
2229 LDR x0, [sp, #(0 * 16)]
2230
2231 MSR NZCV, x2
2232 MSR FPSR, x1
2233 MSR FPCR, x0
2234
2235 ADD sp, sp #(5 * 16)
2236
2237 */
2238 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2239 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2240 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2241 p += emit_msr (p, NZCV, x2);
2242 p += emit_msr (p, FPSR, x1);
2243 p += emit_msr (p, FPCR, x0);
2244
2245 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2246
2247 /* Pop general purpose registers:
2248
2249 LDR x0, [sp]
2250 ...
2251 LDR x30, [sp, #(30 * 16)]
2252
2253 ADD sp, sp, #(31 * 16)
2254
2255 */
2256 for (i = 0; i <= 30; i += 1)
2257 p += emit_ldr (p, aarch64_register (i, 1), sp,
2258 offset_memory_operand (i * 16));
2259 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2260
2261 /* Pop SIMD&FP registers:
2262
2263 LDP q0, q1, [sp]
2264 ...
2265 LDP q30, q31, [sp, #(30 * 16)]
2266
2267 ADD sp, sp, #(32 * 16)
2268
2269 */
2270 for (i = 0; i <= 30; i += 2)
2271 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2272 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2273
2274 /* Write the code into the inferior memory. */
2275 append_insns (&buildaddr, p - buf, buf);
2276
2277 /* Now emit the relocated instruction. */
2278 *adjusted_insn_addr = buildaddr;
70b439f0 2279 target_read_uint32 (tpaddr, &insn);
0badd99f
YQ
2280
2281 insn_data.base.insn_addr = tpaddr;
2282 insn_data.new_addr = buildaddr;
2283 insn_data.insn_ptr = buf;
2284
2285 aarch64_relocate_instruction (insn, &visitor,
2286 (struct aarch64_insn_data *) &insn_data);
2287
bb903df0 2288 /* We may not have been able to relocate the instruction. */
0badd99f 2289 if (insn_data.insn_ptr == buf)
bb903df0
PL
2290 {
2291 sprintf (err,
2292 "E.Could not relocate instruction from %s to %s.",
2293 core_addr_to_string_nz (tpaddr),
2294 core_addr_to_string_nz (buildaddr));
2295 return 1;
2296 }
dfaffe9d 2297 else
0badd99f 2298 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
dfaffe9d 2299 *adjusted_insn_addr_end = buildaddr;
bb903df0
PL
2300
2301 /* Go back to the start of the buffer. */
2302 p = buf;
2303
2304 /* Emit a branch back from the jump pad. */
2305 offset = (tpaddr + orig_size - buildaddr);
2306 if (!can_encode_int32 (offset, 28))
2307 {
2308 sprintf (err,
2309 "E.Jump back from jump pad too far from tracepoint "
2ac09a5b 2310 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2311 offset);
2312 return 1;
2313 }
2314
2315 p += emit_b (p, 0, offset);
2316 append_insns (&buildaddr, p - buf, buf);
2317
2318 /* Give the caller a branch instruction into the jump pad. */
2319 offset = (*jump_entry - tpaddr);
2320 if (!can_encode_int32 (offset, 28))
2321 {
2322 sprintf (err,
2323 "E.Jump pad too far from tracepoint "
2ac09a5b 2324 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2325 offset);
2326 return 1;
2327 }
2328
2329 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2330 *jjump_pad_insn_size = 4;
2331
2332 /* Return the end address of our pad. */
2333 *jump_entry = buildaddr;
2334
2335 return 0;
2336}
2337
afbe19f8
PL
2338/* Helper function writing LEN instructions from START into
2339 current_insn_ptr. */
2340
2341static void
2342emit_ops_insns (const uint32_t *start, int len)
2343{
2344 CORE_ADDR buildaddr = current_insn_ptr;
2345
2346 if (debug_threads)
2347 debug_printf ("Adding %d instrucions at %s\n",
2348 len, paddress (buildaddr));
2349
2350 append_insns (&buildaddr, len, start);
2351 current_insn_ptr = buildaddr;
2352}
2353
2354/* Pop a register from the stack. */
2355
2356static int
2357emit_pop (uint32_t *buf, struct aarch64_register rt)
2358{
2359 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2360}
2361
2362/* Push a register on the stack. */
2363
2364static int
2365emit_push (uint32_t *buf, struct aarch64_register rt)
2366{
2367 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2368}
2369
2370/* Implementation of emit_ops method "emit_prologue". */
2371
2372static void
2373aarch64_emit_prologue (void)
2374{
2375 uint32_t buf[16];
2376 uint32_t *p = buf;
2377
2378 /* This function emit a prologue for the following function prototype:
2379
2380 enum eval_result_type f (unsigned char *regs,
2381 ULONGEST *value);
2382
2383 The first argument is a buffer of raw registers. The second
2384 argument is the result of
2385 evaluating the expression, which will be set to whatever is on top of
2386 the stack at the end.
2387
2388 The stack set up by the prologue is as such:
2389
2390 High *------------------------------------------------------*
2391 | LR |
2392 | FP | <- FP
2393 | x1 (ULONGEST *value) |
2394 | x0 (unsigned char *regs) |
2395 Low *------------------------------------------------------*
2396
2397 As we are implementing a stack machine, each opcode can expand the
2398 stack so we never know how far we are from the data saved by this
2399 prologue. In order to be able refer to value and regs later, we save
2400 the current stack pointer in the frame pointer. This way, it is not
2401 clobbered when calling C functions.
2402
30baf67b 2403 Finally, throughout every operation, we are using register x0 as the
afbe19f8
PL
2404 top of the stack, and x1 as a scratch register. */
2405
2406 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2407 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2408 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2409
2410 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2411
2412
2413 emit_ops_insns (buf, p - buf);
2414}
2415
2416/* Implementation of emit_ops method "emit_epilogue". */
2417
2418static void
2419aarch64_emit_epilogue (void)
2420{
2421 uint32_t buf[16];
2422 uint32_t *p = buf;
2423
2424 /* Store the result of the expression (x0) in *value. */
2425 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2426 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2427 p += emit_str (p, x0, x1, offset_memory_operand (0));
2428
2429 /* Restore the previous state. */
2430 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2431 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2432
2433 /* Return expr_eval_no_error. */
2434 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2435 p += emit_ret (p, lr);
2436
2437 emit_ops_insns (buf, p - buf);
2438}
2439
2440/* Implementation of emit_ops method "emit_add". */
2441
2442static void
2443aarch64_emit_add (void)
2444{
2445 uint32_t buf[16];
2446 uint32_t *p = buf;
2447
2448 p += emit_pop (p, x1);
45e3745e 2449 p += emit_add (p, x0, x1, register_operand (x0));
afbe19f8
PL
2450
2451 emit_ops_insns (buf, p - buf);
2452}
2453
2454/* Implementation of emit_ops method "emit_sub". */
2455
2456static void
2457aarch64_emit_sub (void)
2458{
2459 uint32_t buf[16];
2460 uint32_t *p = buf;
2461
2462 p += emit_pop (p, x1);
45e3745e 2463 p += emit_sub (p, x0, x1, register_operand (x0));
afbe19f8
PL
2464
2465 emit_ops_insns (buf, p - buf);
2466}
2467
2468/* Implementation of emit_ops method "emit_mul". */
2469
2470static void
2471aarch64_emit_mul (void)
2472{
2473 uint32_t buf[16];
2474 uint32_t *p = buf;
2475
2476 p += emit_pop (p, x1);
2477 p += emit_mul (p, x0, x1, x0);
2478
2479 emit_ops_insns (buf, p - buf);
2480}
2481
2482/* Implementation of emit_ops method "emit_lsh". */
2483
2484static void
2485aarch64_emit_lsh (void)
2486{
2487 uint32_t buf[16];
2488 uint32_t *p = buf;
2489
2490 p += emit_pop (p, x1);
2491 p += emit_lslv (p, x0, x1, x0);
2492
2493 emit_ops_insns (buf, p - buf);
2494}
2495
2496/* Implementation of emit_ops method "emit_rsh_signed". */
2497
2498static void
2499aarch64_emit_rsh_signed (void)
2500{
2501 uint32_t buf[16];
2502 uint32_t *p = buf;
2503
2504 p += emit_pop (p, x1);
2505 p += emit_asrv (p, x0, x1, x0);
2506
2507 emit_ops_insns (buf, p - buf);
2508}
2509
2510/* Implementation of emit_ops method "emit_rsh_unsigned". */
2511
2512static void
2513aarch64_emit_rsh_unsigned (void)
2514{
2515 uint32_t buf[16];
2516 uint32_t *p = buf;
2517
2518 p += emit_pop (p, x1);
2519 p += emit_lsrv (p, x0, x1, x0);
2520
2521 emit_ops_insns (buf, p - buf);
2522}
2523
2524/* Implementation of emit_ops method "emit_ext". */
2525
2526static void
2527aarch64_emit_ext (int arg)
2528{
2529 uint32_t buf[16];
2530 uint32_t *p = buf;
2531
2532 p += emit_sbfx (p, x0, x0, 0, arg);
2533
2534 emit_ops_insns (buf, p - buf);
2535}
2536
2537/* Implementation of emit_ops method "emit_log_not". */
2538
2539static void
2540aarch64_emit_log_not (void)
2541{
2542 uint32_t buf[16];
2543 uint32_t *p = buf;
2544
2545 /* If the top of the stack is 0, replace it with 1. Else replace it with
2546 0. */
2547
2548 p += emit_cmp (p, x0, immediate_operand (0));
2549 p += emit_cset (p, x0, EQ);
2550
2551 emit_ops_insns (buf, p - buf);
2552}
2553
2554/* Implementation of emit_ops method "emit_bit_and". */
2555
2556static void
2557aarch64_emit_bit_and (void)
2558{
2559 uint32_t buf[16];
2560 uint32_t *p = buf;
2561
2562 p += emit_pop (p, x1);
2563 p += emit_and (p, x0, x0, x1);
2564
2565 emit_ops_insns (buf, p - buf);
2566}
2567
2568/* Implementation of emit_ops method "emit_bit_or". */
2569
2570static void
2571aarch64_emit_bit_or (void)
2572{
2573 uint32_t buf[16];
2574 uint32_t *p = buf;
2575
2576 p += emit_pop (p, x1);
2577 p += emit_orr (p, x0, x0, x1);
2578
2579 emit_ops_insns (buf, p - buf);
2580}
2581
2582/* Implementation of emit_ops method "emit_bit_xor". */
2583
2584static void
2585aarch64_emit_bit_xor (void)
2586{
2587 uint32_t buf[16];
2588 uint32_t *p = buf;
2589
2590 p += emit_pop (p, x1);
2591 p += emit_eor (p, x0, x0, x1);
2592
2593 emit_ops_insns (buf, p - buf);
2594}
2595
2596/* Implementation of emit_ops method "emit_bit_not". */
2597
2598static void
2599aarch64_emit_bit_not (void)
2600{
2601 uint32_t buf[16];
2602 uint32_t *p = buf;
2603
2604 p += emit_mvn (p, x0, x0);
2605
2606 emit_ops_insns (buf, p - buf);
2607}
2608
2609/* Implementation of emit_ops method "emit_equal". */
2610
2611static void
2612aarch64_emit_equal (void)
2613{
2614 uint32_t buf[16];
2615 uint32_t *p = buf;
2616
2617 p += emit_pop (p, x1);
2618 p += emit_cmp (p, x0, register_operand (x1));
2619 p += emit_cset (p, x0, EQ);
2620
2621 emit_ops_insns (buf, p - buf);
2622}
2623
2624/* Implementation of emit_ops method "emit_less_signed". */
2625
2626static void
2627aarch64_emit_less_signed (void)
2628{
2629 uint32_t buf[16];
2630 uint32_t *p = buf;
2631
2632 p += emit_pop (p, x1);
2633 p += emit_cmp (p, x1, register_operand (x0));
2634 p += emit_cset (p, x0, LT);
2635
2636 emit_ops_insns (buf, p - buf);
2637}
2638
2639/* Implementation of emit_ops method "emit_less_unsigned". */
2640
2641static void
2642aarch64_emit_less_unsigned (void)
2643{
2644 uint32_t buf[16];
2645 uint32_t *p = buf;
2646
2647 p += emit_pop (p, x1);
2648 p += emit_cmp (p, x1, register_operand (x0));
2649 p += emit_cset (p, x0, LO);
2650
2651 emit_ops_insns (buf, p - buf);
2652}
2653
2654/* Implementation of emit_ops method "emit_ref". */
2655
2656static void
2657aarch64_emit_ref (int size)
2658{
2659 uint32_t buf[16];
2660 uint32_t *p = buf;
2661
2662 switch (size)
2663 {
2664 case 1:
2665 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2666 break;
2667 case 2:
2668 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2669 break;
2670 case 4:
2671 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2672 break;
2673 case 8:
2674 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2675 break;
2676 default:
2677 /* Unknown size, bail on compilation. */
2678 emit_error = 1;
2679 break;
2680 }
2681
2682 emit_ops_insns (buf, p - buf);
2683}
2684
2685/* Implementation of emit_ops method "emit_if_goto". */
2686
2687static void
2688aarch64_emit_if_goto (int *offset_p, int *size_p)
2689{
2690 uint32_t buf[16];
2691 uint32_t *p = buf;
2692
2693 /* The Z flag is set or cleared here. */
2694 p += emit_cmp (p, x0, immediate_operand (0));
2695 /* This instruction must not change the Z flag. */
2696 p += emit_pop (p, x0);
2697 /* Branch over the next instruction if x0 == 0. */
2698 p += emit_bcond (p, EQ, 8);
2699
2700 /* The NOP instruction will be patched with an unconditional branch. */
2701 if (offset_p)
2702 *offset_p = (p - buf) * 4;
2703 if (size_p)
2704 *size_p = 4;
2705 p += emit_nop (p);
2706
2707 emit_ops_insns (buf, p - buf);
2708}
2709
2710/* Implementation of emit_ops method "emit_goto". */
2711
2712static void
2713aarch64_emit_goto (int *offset_p, int *size_p)
2714{
2715 uint32_t buf[16];
2716 uint32_t *p = buf;
2717
2718 /* The NOP instruction will be patched with an unconditional branch. */
2719 if (offset_p)
2720 *offset_p = 0;
2721 if (size_p)
2722 *size_p = 4;
2723 p += emit_nop (p);
2724
2725 emit_ops_insns (buf, p - buf);
2726}
2727
2728/* Implementation of emit_ops method "write_goto_address". */
2729
bb1183e2 2730static void
afbe19f8
PL
2731aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2732{
2733 uint32_t insn;
2734
2735 emit_b (&insn, 0, to - from);
2736 append_insns (&from, 1, &insn);
2737}
2738
2739/* Implementation of emit_ops method "emit_const". */
2740
2741static void
2742aarch64_emit_const (LONGEST num)
2743{
2744 uint32_t buf[16];
2745 uint32_t *p = buf;
2746
2747 p += emit_mov_addr (p, x0, num);
2748
2749 emit_ops_insns (buf, p - buf);
2750}
2751
2752/* Implementation of emit_ops method "emit_call". */
2753
2754static void
2755aarch64_emit_call (CORE_ADDR fn)
2756{
2757 uint32_t buf[16];
2758 uint32_t *p = buf;
2759
2760 p += emit_mov_addr (p, ip0, fn);
2761 p += emit_blr (p, ip0);
2762
2763 emit_ops_insns (buf, p - buf);
2764}
2765
2766/* Implementation of emit_ops method "emit_reg". */
2767
2768static void
2769aarch64_emit_reg (int reg)
2770{
2771 uint32_t buf[16];
2772 uint32_t *p = buf;
2773
2774 /* Set x0 to unsigned char *regs. */
2775 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2776 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2777 p += emit_mov (p, x1, immediate_operand (reg));
2778
2779 emit_ops_insns (buf, p - buf);
2780
2781 aarch64_emit_call (get_raw_reg_func_addr ());
2782}
2783
2784/* Implementation of emit_ops method "emit_pop". */
2785
2786static void
2787aarch64_emit_pop (void)
2788{
2789 uint32_t buf[16];
2790 uint32_t *p = buf;
2791
2792 p += emit_pop (p, x0);
2793
2794 emit_ops_insns (buf, p - buf);
2795}
2796
2797/* Implementation of emit_ops method "emit_stack_flush". */
2798
2799static void
2800aarch64_emit_stack_flush (void)
2801{
2802 uint32_t buf[16];
2803 uint32_t *p = buf;
2804
2805 p += emit_push (p, x0);
2806
2807 emit_ops_insns (buf, p - buf);
2808}
2809
2810/* Implementation of emit_ops method "emit_zero_ext". */
2811
2812static void
2813aarch64_emit_zero_ext (int arg)
2814{
2815 uint32_t buf[16];
2816 uint32_t *p = buf;
2817
2818 p += emit_ubfx (p, x0, x0, 0, arg);
2819
2820 emit_ops_insns (buf, p - buf);
2821}
2822
2823/* Implementation of emit_ops method "emit_swap". */
2824
2825static void
2826aarch64_emit_swap (void)
2827{
2828 uint32_t buf[16];
2829 uint32_t *p = buf;
2830
2831 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2832 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2833 p += emit_mov (p, x0, register_operand (x1));
2834
2835 emit_ops_insns (buf, p - buf);
2836}
2837
2838/* Implementation of emit_ops method "emit_stack_adjust". */
2839
2840static void
2841aarch64_emit_stack_adjust (int n)
2842{
2843 /* This is not needed with our design. */
2844 uint32_t buf[16];
2845 uint32_t *p = buf;
2846
2847 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2848
2849 emit_ops_insns (buf, p - buf);
2850}
2851
2852/* Implementation of emit_ops method "emit_int_call_1". */
2853
2854static void
2855aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2856{
2857 uint32_t buf[16];
2858 uint32_t *p = buf;
2859
2860 p += emit_mov (p, x0, immediate_operand (arg1));
2861
2862 emit_ops_insns (buf, p - buf);
2863
2864 aarch64_emit_call (fn);
2865}
2866
2867/* Implementation of emit_ops method "emit_void_call_2". */
2868
2869static void
2870aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2871{
2872 uint32_t buf[16];
2873 uint32_t *p = buf;
2874
2875 /* Push x0 on the stack. */
2876 aarch64_emit_stack_flush ();
2877
2878 /* Setup arguments for the function call:
2879
2880 x0: arg1
2881 x1: top of the stack
2882
2883 MOV x1, x0
2884 MOV x0, #arg1 */
2885
2886 p += emit_mov (p, x1, register_operand (x0));
2887 p += emit_mov (p, x0, immediate_operand (arg1));
2888
2889 emit_ops_insns (buf, p - buf);
2890
2891 aarch64_emit_call (fn);
2892
2893 /* Restore x0. */
2894 aarch64_emit_pop ();
2895}
2896
2897/* Implementation of emit_ops method "emit_eq_goto". */
2898
2899static void
2900aarch64_emit_eq_goto (int *offset_p, int *size_p)
2901{
2902 uint32_t buf[16];
2903 uint32_t *p = buf;
2904
2905 p += emit_pop (p, x1);
2906 p += emit_cmp (p, x1, register_operand (x0));
2907 /* Branch over the next instruction if x0 != x1. */
2908 p += emit_bcond (p, NE, 8);
2909 /* The NOP instruction will be patched with an unconditional branch. */
2910 if (offset_p)
2911 *offset_p = (p - buf) * 4;
2912 if (size_p)
2913 *size_p = 4;
2914 p += emit_nop (p);
2915
2916 emit_ops_insns (buf, p - buf);
2917}
2918
2919/* Implementation of emit_ops method "emit_ne_goto". */
2920
2921static void
2922aarch64_emit_ne_goto (int *offset_p, int *size_p)
2923{
2924 uint32_t buf[16];
2925 uint32_t *p = buf;
2926
2927 p += emit_pop (p, x1);
2928 p += emit_cmp (p, x1, register_operand (x0));
2929 /* Branch over the next instruction if x0 == x1. */
2930 p += emit_bcond (p, EQ, 8);
2931 /* The NOP instruction will be patched with an unconditional branch. */
2932 if (offset_p)
2933 *offset_p = (p - buf) * 4;
2934 if (size_p)
2935 *size_p = 4;
2936 p += emit_nop (p);
2937
2938 emit_ops_insns (buf, p - buf);
2939}
2940
2941/* Implementation of emit_ops method "emit_lt_goto". */
2942
2943static void
2944aarch64_emit_lt_goto (int *offset_p, int *size_p)
2945{
2946 uint32_t buf[16];
2947 uint32_t *p = buf;
2948
2949 p += emit_pop (p, x1);
2950 p += emit_cmp (p, x1, register_operand (x0));
2951 /* Branch over the next instruction if x0 >= x1. */
2952 p += emit_bcond (p, GE, 8);
2953 /* The NOP instruction will be patched with an unconditional branch. */
2954 if (offset_p)
2955 *offset_p = (p - buf) * 4;
2956 if (size_p)
2957 *size_p = 4;
2958 p += emit_nop (p);
2959
2960 emit_ops_insns (buf, p - buf);
2961}
2962
2963/* Implementation of emit_ops method "emit_le_goto". */
2964
2965static void
2966aarch64_emit_le_goto (int *offset_p, int *size_p)
2967{
2968 uint32_t buf[16];
2969 uint32_t *p = buf;
2970
2971 p += emit_pop (p, x1);
2972 p += emit_cmp (p, x1, register_operand (x0));
2973 /* Branch over the next instruction if x0 > x1. */
2974 p += emit_bcond (p, GT, 8);
2975 /* The NOP instruction will be patched with an unconditional branch. */
2976 if (offset_p)
2977 *offset_p = (p - buf) * 4;
2978 if (size_p)
2979 *size_p = 4;
2980 p += emit_nop (p);
2981
2982 emit_ops_insns (buf, p - buf);
2983}
2984
2985/* Implementation of emit_ops method "emit_gt_goto". */
2986
2987static void
2988aarch64_emit_gt_goto (int *offset_p, int *size_p)
2989{
2990 uint32_t buf[16];
2991 uint32_t *p = buf;
2992
2993 p += emit_pop (p, x1);
2994 p += emit_cmp (p, x1, register_operand (x0));
2995 /* Branch over the next instruction if x0 <= x1. */
2996 p += emit_bcond (p, LE, 8);
2997 /* The NOP instruction will be patched with an unconditional branch. */
2998 if (offset_p)
2999 *offset_p = (p - buf) * 4;
3000 if (size_p)
3001 *size_p = 4;
3002 p += emit_nop (p);
3003
3004 emit_ops_insns (buf, p - buf);
3005}
3006
3007/* Implementation of emit_ops method "emit_ge_got". */
3008
3009static void
3010aarch64_emit_ge_got (int *offset_p, int *size_p)
3011{
3012 uint32_t buf[16];
3013 uint32_t *p = buf;
3014
3015 p += emit_pop (p, x1);
3016 p += emit_cmp (p, x1, register_operand (x0));
3017 /* Branch over the next instruction if x0 <= x1. */
3018 p += emit_bcond (p, LT, 8);
3019 /* The NOP instruction will be patched with an unconditional branch. */
3020 if (offset_p)
3021 *offset_p = (p - buf) * 4;
3022 if (size_p)
3023 *size_p = 4;
3024 p += emit_nop (p);
3025
3026 emit_ops_insns (buf, p - buf);
3027}
3028
3029static struct emit_ops aarch64_emit_ops_impl =
3030{
3031 aarch64_emit_prologue,
3032 aarch64_emit_epilogue,
3033 aarch64_emit_add,
3034 aarch64_emit_sub,
3035 aarch64_emit_mul,
3036 aarch64_emit_lsh,
3037 aarch64_emit_rsh_signed,
3038 aarch64_emit_rsh_unsigned,
3039 aarch64_emit_ext,
3040 aarch64_emit_log_not,
3041 aarch64_emit_bit_and,
3042 aarch64_emit_bit_or,
3043 aarch64_emit_bit_xor,
3044 aarch64_emit_bit_not,
3045 aarch64_emit_equal,
3046 aarch64_emit_less_signed,
3047 aarch64_emit_less_unsigned,
3048 aarch64_emit_ref,
3049 aarch64_emit_if_goto,
3050 aarch64_emit_goto,
3051 aarch64_write_goto_address,
3052 aarch64_emit_const,
3053 aarch64_emit_call,
3054 aarch64_emit_reg,
3055 aarch64_emit_pop,
3056 aarch64_emit_stack_flush,
3057 aarch64_emit_zero_ext,
3058 aarch64_emit_swap,
3059 aarch64_emit_stack_adjust,
3060 aarch64_emit_int_call_1,
3061 aarch64_emit_void_call_2,
3062 aarch64_emit_eq_goto,
3063 aarch64_emit_ne_goto,
3064 aarch64_emit_lt_goto,
3065 aarch64_emit_le_goto,
3066 aarch64_emit_gt_goto,
3067 aarch64_emit_ge_got,
3068};
3069
3070/* Implementation of linux_target_ops method "emit_ops". */
3071
3072static struct emit_ops *
3073aarch64_emit_ops (void)
3074{
3075 return &aarch64_emit_ops_impl;
3076}
3077
bb903df0
PL
3078/* Implementation of linux_target_ops method
3079 "get_min_fast_tracepoint_insn_len". */
3080
3081static int
3082aarch64_get_min_fast_tracepoint_insn_len (void)
3083{
3084 return 4;
3085}
3086
d1d0aea1
PL
3087/* Implementation of linux_target_ops method "supports_range_stepping". */
3088
3089static int
3090aarch64_supports_range_stepping (void)
3091{
3092 return 1;
3093}
3094
3ca4edb6 3095/* Implementation of target ops method "sw_breakpoint_from_kind". */
dd373349 3096
3ca4edb6
TBA
3097const gdb_byte *
3098aarch64_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349 3099{
17b1509a
YQ
3100 if (is_64bit_tdesc ())
3101 {
3102 *size = aarch64_breakpoint_len;
3103 return aarch64_breakpoint;
3104 }
3105 else
3106 return arm_sw_breakpoint_from_kind (kind, size);
3107}
3108
06250e4e 3109/* Implementation of target ops method "breakpoint_kind_from_pc". */
17b1509a 3110
06250e4e
TBA
3111int
3112aarch64_target::breakpoint_kind_from_pc (CORE_ADDR *pcptr)
17b1509a
YQ
3113{
3114 if (is_64bit_tdesc ())
3115 return aarch64_breakpoint_len;
3116 else
3117 return arm_breakpoint_kind_from_pc (pcptr);
3118}
3119
06250e4e 3120/* Implementation of the target ops method
17b1509a
YQ
3121 "breakpoint_kind_from_current_state". */
3122
06250e4e
TBA
3123int
3124aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
17b1509a
YQ
3125{
3126 if (is_64bit_tdesc ())
3127 return aarch64_breakpoint_len;
3128 else
3129 return arm_breakpoint_kind_from_current_state (pcptr);
dd373349
AT
3130}
3131
7d00775e
AT
3132/* Support for hardware single step. */
3133
3134static int
3135aarch64_supports_hardware_single_step (void)
3136{
3137 return 1;
3138}
3139
176eb98c
MS
3140struct linux_target_ops the_low_target =
3141{
176eb98c 3142 aarch64_linux_prepare_to_resume,
421530db 3143 NULL, /* process_qsupported */
7671bf47 3144 aarch64_supports_tracepoints,
bb903df0
PL
3145 aarch64_get_thread_area,
3146 aarch64_install_fast_tracepoint_jump_pad,
afbe19f8 3147 aarch64_emit_ops,
bb903df0 3148 aarch64_get_min_fast_tracepoint_insn_len,
d1d0aea1 3149 aarch64_supports_range_stepping,
7d00775e 3150 aarch64_supports_hardware_single_step,
061fc021 3151 aarch64_get_syscall_trapinfo,
176eb98c 3152};
3aee8918 3153
ef0478f6
TBA
3154/* The linux target ops object. */
3155
3156linux_process_target *the_linux_target = &the_aarch64_target;
3157
3aee8918
PA
3158void
3159initialize_low_arch (void)
3160{
3b53ae99
YQ
3161 initialize_low_arch_aarch32 ();
3162
3aee8918 3163 initialize_regsets_info (&aarch64_regsets_info);
02895270 3164 initialize_regsets_info (&aarch64_sve_regsets_info);
3aee8918 3165}
This page took 1.015619 seconds and 4 git commands to generate.