gdbserver/linux-low: turn 'breakpoint_kind_from_{pc, current_state}' into methods
[deliverable/binutils-gdb.git] / gdbserver / linux-aarch64-low.cc
CommitLineData
176eb98c
MS
1/* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
b811d2c2 4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
176eb98c
MS
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "server.h"
23#include "linux-low.h"
db3cb7cb 24#include "nat/aarch64-linux.h"
554717a3 25#include "nat/aarch64-linux-hw-point.h"
bb903df0 26#include "arch/aarch64-insn.h"
3b53ae99 27#include "linux-aarch32-low.h"
176eb98c 28#include "elf/common.h"
afbe19f8
PL
29#include "ax.h"
30#include "tracepoint.h"
f9d949fb 31#include "debug.h"
176eb98c
MS
32
33#include <signal.h>
34#include <sys/user.h>
5826e159 35#include "nat/gdb_ptrace.h"
e9dae05e 36#include <asm/ptrace.h>
bb903df0
PL
37#include <inttypes.h>
38#include <endian.h>
39#include <sys/uio.h>
176eb98c
MS
40
41#include "gdb_proc_service.h"
cc628f3d 42#include "arch/aarch64.h"
7cc17433 43#include "linux-aarch32-tdesc.h"
d6d7ce56 44#include "linux-aarch64-tdesc.h"
fefa175e 45#include "nat/aarch64-sve-linux-ptrace.h"
02895270 46#include "tdesc.h"
176eb98c 47
176eb98c
MS
48#ifdef HAVE_SYS_REG_H
49#include <sys/reg.h>
50#endif
51
ef0478f6
TBA
52/* Linux target op definitions for the AArch64 architecture. */
53
54class aarch64_target : public linux_process_target
55{
56public:
57
aa8d21c9
TBA
58 const regs_info *get_regs_info () override;
59
06250e4e
TBA
60 int breakpoint_kind_from_pc (CORE_ADDR *pcptr) override;
61
62 int breakpoint_kind_from_current_state (CORE_ADDR *pcptr) override;
63
797bcff5
TBA
64protected:
65
66 void low_arch_setup () override;
daca57a7
TBA
67
68 bool low_cannot_fetch_register (int regno) override;
69
70 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
71
72 bool low_supports_breakpoints () override;
73
74 CORE_ADDR low_get_pc (regcache *regcache) override;
75
76 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
ef0478f6
TBA
77};
78
79/* The singleton target ops object. */
80
81static aarch64_target the_aarch64_target;
82
daca57a7
TBA
83bool
84aarch64_target::low_cannot_fetch_register (int regno)
85{
86 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
87 "is not implemented by the target");
88}
89
90bool
91aarch64_target::low_cannot_store_register (int regno)
92{
93 gdb_assert_not_reached ("linux target op low_cannot_store_register "
94 "is not implemented by the target");
95}
96
176eb98c
MS
97/* Per-process arch-specific data we want to keep. */
98
99struct arch_process_info
100{
101 /* Hardware breakpoint/watchpoint data.
102 The reason for them to be per-process rather than per-thread is
103 due to the lack of information in the gdbserver environment;
104 gdbserver is not told that whether a requested hardware
105 breakpoint/watchpoint is thread specific or not, so it has to set
106 each hw bp/wp for every thread in the current process. The
107 higher level bp/wp management in gdb will resume a thread if a hw
108 bp/wp trap is not expected for it. Since the hw bp/wp setting is
109 same for each thread, it is reasonable for the data to live here.
110 */
111 struct aarch64_debug_reg_state debug_reg_state;
112};
113
3b53ae99
YQ
114/* Return true if the size of register 0 is 8 byte. */
115
116static int
117is_64bit_tdesc (void)
118{
119 struct regcache *regcache = get_thread_regcache (current_thread, 0);
120
121 return register_size (regcache->tdesc, 0) == 8;
122}
123
02895270
AH
124/* Return true if the regcache contains the number of SVE registers. */
125
126static bool
127is_sve_tdesc (void)
128{
129 struct regcache *regcache = get_thread_regcache (current_thread, 0);
130
6cdd651f 131 return tdesc_contains_feature (regcache->tdesc, "org.gnu.gdb.aarch64.sve");
02895270
AH
132}
133
176eb98c
MS
134static void
135aarch64_fill_gregset (struct regcache *regcache, void *buf)
136{
6a69a054 137 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
176eb98c
MS
138 int i;
139
140 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
141 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
142 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
143 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
144 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
145}
146
147static void
148aarch64_store_gregset (struct regcache *regcache, const void *buf)
149{
6a69a054 150 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
176eb98c
MS
151 int i;
152
153 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
154 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
155 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
156 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
157 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
158}
159
160static void
161aarch64_fill_fpregset (struct regcache *regcache, void *buf)
162{
9caa3311 163 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
176eb98c
MS
164 int i;
165
166 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
167 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
168 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
169 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
170}
171
172static void
173aarch64_store_fpregset (struct regcache *regcache, const void *buf)
174{
9caa3311
YQ
175 const struct user_fpsimd_state *regset
176 = (const struct user_fpsimd_state *) buf;
176eb98c
MS
177 int i;
178
179 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
180 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
181 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
182 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
183}
184
1ef53e6b
AH
185/* Store the pauth registers to regcache. */
186
187static void
188aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
189{
190 uint64_t *pauth_regset = (uint64_t *) buf;
191 int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
192
193 if (pauth_base == 0)
194 return;
195
196 supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
197 &pauth_regset[0]);
198 supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
199 &pauth_regset[1]);
200}
201
bf9ae9d8
TBA
202bool
203aarch64_target::low_supports_breakpoints ()
204{
205 return true;
206}
207
208/* Implementation of linux target ops method "low_get_pc". */
421530db 209
bf9ae9d8
TBA
210CORE_ADDR
211aarch64_target::low_get_pc (regcache *regcache)
176eb98c 212{
8a7e4587 213 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 214 return linux_get_pc_64bit (regcache);
8a7e4587 215 else
a5652c21 216 return linux_get_pc_32bit (regcache);
176eb98c
MS
217}
218
bf9ae9d8 219/* Implementation of linux target ops method "low_set_pc". */
421530db 220
bf9ae9d8
TBA
221void
222aarch64_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
176eb98c 223{
8a7e4587 224 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 225 linux_set_pc_64bit (regcache, pc);
8a7e4587 226 else
a5652c21 227 linux_set_pc_32bit (regcache, pc);
176eb98c
MS
228}
229
176eb98c
MS
230#define aarch64_breakpoint_len 4
231
37d66942
PL
232/* AArch64 BRK software debug mode instruction.
233 This instruction needs to match gdb/aarch64-tdep.c
234 (aarch64_default_breakpoint). */
235static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
176eb98c 236
421530db
PL
237/* Implementation of linux_target_ops method "breakpoint_at". */
238
176eb98c
MS
239static int
240aarch64_breakpoint_at (CORE_ADDR where)
241{
db91f502
YQ
242 if (is_64bit_tdesc ())
243 {
244 gdb_byte insn[aarch64_breakpoint_len];
176eb98c 245
52405d85
TBA
246 the_target->read_memory (where, (unsigned char *) &insn,
247 aarch64_breakpoint_len);
db91f502
YQ
248 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
249 return 1;
176eb98c 250
db91f502
YQ
251 return 0;
252 }
253 else
254 return arm_breakpoint_at (where);
176eb98c
MS
255}
256
176eb98c
MS
257static void
258aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
259{
260 int i;
261
262 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
263 {
264 state->dr_addr_bp[i] = 0;
265 state->dr_ctrl_bp[i] = 0;
266 state->dr_ref_count_bp[i] = 0;
267 }
268
269 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
270 {
271 state->dr_addr_wp[i] = 0;
272 state->dr_ctrl_wp[i] = 0;
273 state->dr_ref_count_wp[i] = 0;
274 }
275}
276
176eb98c
MS
277/* Return the pointer to the debug register state structure in the
278 current process' arch-specific data area. */
279
db3cb7cb 280struct aarch64_debug_reg_state *
88e2cf7e 281aarch64_get_debug_reg_state (pid_t pid)
176eb98c 282{
88e2cf7e 283 struct process_info *proc = find_process_pid (pid);
176eb98c 284
fe978cb0 285 return &proc->priv->arch_private->debug_reg_state;
176eb98c
MS
286}
287
421530db
PL
288/* Implementation of linux_target_ops method "supports_z_point_type". */
289
4ff0d3d8
PA
290static int
291aarch64_supports_z_point_type (char z_type)
292{
293 switch (z_type)
294 {
96c97461 295 case Z_PACKET_SW_BP:
4ff0d3d8
PA
296 case Z_PACKET_HW_BP:
297 case Z_PACKET_WRITE_WP:
298 case Z_PACKET_READ_WP:
299 case Z_PACKET_ACCESS_WP:
300 return 1;
301 default:
4ff0d3d8
PA
302 return 0;
303 }
304}
305
421530db 306/* Implementation of linux_target_ops method "insert_point".
176eb98c 307
421530db
PL
308 It actually only records the info of the to-be-inserted bp/wp;
309 the actual insertion will happen when threads are resumed. */
176eb98c
MS
310
311static int
802e8e6d
PA
312aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
313 int len, struct raw_breakpoint *bp)
176eb98c
MS
314{
315 int ret;
4ff0d3d8 316 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
317 struct aarch64_debug_reg_state *state
318 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 319
c5e92cca 320 if (show_debug_regs)
176eb98c
MS
321 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
322 (unsigned long) addr, len);
323
802e8e6d
PA
324 /* Determine the type from the raw breakpoint type. */
325 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
326
327 if (targ_type != hw_execute)
39edd165
YQ
328 {
329 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
330 ret = aarch64_handle_watchpoint (targ_type, addr, len,
331 1 /* is_insert */, state);
332 else
333 ret = -1;
334 }
176eb98c 335 else
8d689ee5
YQ
336 {
337 if (len == 3)
338 {
339 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
340 instruction. Set it to 2 to correctly encode length bit
341 mask in hardware/watchpoint control register. */
342 len = 2;
343 }
344 ret = aarch64_handle_breakpoint (targ_type, addr, len,
345 1 /* is_insert */, state);
346 }
176eb98c 347
60a191ed 348 if (show_debug_regs)
88e2cf7e
YQ
349 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
350 targ_type);
176eb98c
MS
351
352 return ret;
353}
354
421530db 355/* Implementation of linux_target_ops method "remove_point".
176eb98c 356
421530db
PL
357 It actually only records the info of the to-be-removed bp/wp,
358 the actual removal will be done when threads are resumed. */
176eb98c
MS
359
360static int
802e8e6d
PA
361aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
362 int len, struct raw_breakpoint *bp)
176eb98c
MS
363{
364 int ret;
4ff0d3d8 365 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
366 struct aarch64_debug_reg_state *state
367 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 368
c5e92cca 369 if (show_debug_regs)
176eb98c
MS
370 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
371 (unsigned long) addr, len);
372
802e8e6d
PA
373 /* Determine the type from the raw breakpoint type. */
374 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
375
376 /* Set up state pointers. */
377 if (targ_type != hw_execute)
378 ret =
c67ca4de
YQ
379 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
380 state);
176eb98c 381 else
8d689ee5
YQ
382 {
383 if (len == 3)
384 {
385 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
386 instruction. Set it to 2 to correctly encode length bit
387 mask in hardware/watchpoint control register. */
388 len = 2;
389 }
390 ret = aarch64_handle_breakpoint (targ_type, addr, len,
391 0 /* is_insert */, state);
392 }
176eb98c 393
60a191ed 394 if (show_debug_regs)
88e2cf7e
YQ
395 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
396 targ_type);
176eb98c
MS
397
398 return ret;
399}
400
421530db 401/* Implementation of linux_target_ops method "stopped_data_address". */
176eb98c
MS
402
403static CORE_ADDR
404aarch64_stopped_data_address (void)
405{
406 siginfo_t siginfo;
407 int pid, i;
408 struct aarch64_debug_reg_state *state;
409
0bfdf32f 410 pid = lwpid_of (current_thread);
176eb98c
MS
411
412 /* Get the siginfo. */
413 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
414 return (CORE_ADDR) 0;
415
416 /* Need to be a hardware breakpoint/watchpoint trap. */
417 if (siginfo.si_signo != SIGTRAP
418 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
419 return (CORE_ADDR) 0;
420
421 /* Check if the address matches any watched address. */
88e2cf7e 422 state = aarch64_get_debug_reg_state (pid_of (current_thread));
176eb98c
MS
423 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
424 {
a3b60e45
JK
425 const unsigned int offset
426 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
176eb98c
MS
427 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
428 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
a3b60e45
JK
429 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
430 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
431 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
432
176eb98c
MS
433 if (state->dr_ref_count_wp[i]
434 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
a3b60e45 435 && addr_trap >= addr_watch_aligned
176eb98c 436 && addr_trap < addr_watch + len)
a3b60e45
JK
437 {
438 /* ADDR_TRAP reports the first address of the memory range
439 accessed by the CPU, regardless of what was the memory
440 range watched. Thus, a large CPU access that straddles
441 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
442 ADDR_TRAP that is lower than the
443 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
444
445 addr: | 4 | 5 | 6 | 7 | 8 |
446 |---- range watched ----|
447 |----------- range accessed ------------|
448
449 In this case, ADDR_TRAP will be 4.
450
451 To match a watchpoint known to GDB core, we must never
452 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
453 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
454 positive on kernels older than 4.10. See PR
455 external/20207. */
456 return addr_orig;
457 }
176eb98c
MS
458 }
459
460 return (CORE_ADDR) 0;
461}
462
421530db 463/* Implementation of linux_target_ops method "stopped_by_watchpoint". */
176eb98c
MS
464
465static int
466aarch64_stopped_by_watchpoint (void)
467{
468 if (aarch64_stopped_data_address () != 0)
469 return 1;
470 else
471 return 0;
472}
473
474/* Fetch the thread-local storage pointer for libthread_db. */
475
476ps_err_e
754653a7 477ps_get_thread_area (struct ps_prochandle *ph,
176eb98c
MS
478 lwpid_t lwpid, int idx, void **base)
479{
a0cc84cd
YQ
480 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
481 is_64bit_tdesc ());
176eb98c
MS
482}
483
ade90bde
YQ
484/* Implementation of linux_target_ops method "siginfo_fixup". */
485
486static int
8adce034 487aarch64_linux_siginfo_fixup (siginfo_t *native, gdb_byte *inf, int direction)
ade90bde
YQ
488{
489 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
490 if (!is_64bit_tdesc ())
491 {
492 if (direction == 0)
493 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
494 native);
495 else
496 aarch64_siginfo_from_compat_siginfo (native,
497 (struct compat_siginfo *) inf);
498
499 return 1;
500 }
501
502 return 0;
503}
504
04ec7890 505/* Implementation of linux_target_ops method "new_process". */
176eb98c
MS
506
507static struct arch_process_info *
508aarch64_linux_new_process (void)
509{
8d749320 510 struct arch_process_info *info = XCNEW (struct arch_process_info);
176eb98c
MS
511
512 aarch64_init_debug_reg_state (&info->debug_reg_state);
513
514 return info;
515}
516
04ec7890
SM
517/* Implementation of linux_target_ops method "delete_process". */
518
519static void
520aarch64_linux_delete_process (struct arch_process_info *info)
521{
522 xfree (info);
523}
524
421530db
PL
525/* Implementation of linux_target_ops method "linux_new_fork". */
526
3a8a0396
DB
527static void
528aarch64_linux_new_fork (struct process_info *parent,
529 struct process_info *child)
530{
531 /* These are allocated by linux_add_process. */
61a7418c
DB
532 gdb_assert (parent->priv != NULL
533 && parent->priv->arch_private != NULL);
534 gdb_assert (child->priv != NULL
535 && child->priv->arch_private != NULL);
3a8a0396
DB
536
537 /* Linux kernel before 2.6.33 commit
538 72f674d203cd230426437cdcf7dd6f681dad8b0d
539 will inherit hardware debug registers from parent
540 on fork/vfork/clone. Newer Linux kernels create such tasks with
541 zeroed debug registers.
542
543 GDB core assumes the child inherits the watchpoints/hw
544 breakpoints of the parent, and will remove them all from the
545 forked off process. Copy the debug registers mirrors into the
546 new process so that all breakpoints and watchpoints can be
547 removed together. The debug registers mirror will become zeroed
548 in the end before detaching the forked off process, thus making
549 this compatible with older Linux kernels too. */
550
61a7418c 551 *child->priv->arch_private = *parent->priv->arch_private;
3a8a0396
DB
552}
553
ee4fbcfa
AH
554/* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
555#define AARCH64_HWCAP_PACA (1 << 30)
556
797bcff5 557/* Implementation of linux target ops method "low_arch_setup". */
3b53ae99 558
797bcff5
TBA
559void
560aarch64_target::low_arch_setup ()
3b53ae99
YQ
561{
562 unsigned int machine;
563 int is_elf64;
564 int tid;
565
566 tid = lwpid_of (current_thread);
567
568 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
569
570 if (is_elf64)
fefa175e
AH
571 {
572 uint64_t vq = aarch64_sve_get_vq (tid);
974c89e0
AH
573 unsigned long hwcap = linux_get_hwcap (8);
574 bool pauth_p = hwcap & AARCH64_HWCAP_PACA;
ee4fbcfa
AH
575
576 current_process ()->tdesc = aarch64_linux_read_description (vq, pauth_p);
fefa175e 577 }
3b53ae99 578 else
7cc17433 579 current_process ()->tdesc = aarch32_linux_read_description ();
176eb98c 580
af1b22f3 581 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
176eb98c
MS
582}
583
02895270
AH
584/* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
585
586static void
587aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
588{
589 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
590}
591
592/* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
593
594static void
595aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
596{
597 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
598}
599
3aee8918 600static struct regset_info aarch64_regsets[] =
176eb98c
MS
601{
602 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
603 sizeof (struct user_pt_regs), GENERAL_REGS,
604 aarch64_fill_gregset, aarch64_store_gregset },
605 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
606 sizeof (struct user_fpsimd_state), FP_REGS,
607 aarch64_fill_fpregset, aarch64_store_fpregset
608 },
1ef53e6b
AH
609 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
610 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
611 NULL, aarch64_store_pauthregset },
50bc912a 612 NULL_REGSET
176eb98c
MS
613};
614
3aee8918
PA
615static struct regsets_info aarch64_regsets_info =
616 {
617 aarch64_regsets, /* regsets */
618 0, /* num_regsets */
619 NULL, /* disabled_regsets */
620 };
621
3b53ae99 622static struct regs_info regs_info_aarch64 =
3aee8918
PA
623 {
624 NULL, /* regset_bitmap */
c2d65f38 625 NULL, /* usrregs */
3aee8918
PA
626 &aarch64_regsets_info,
627 };
628
02895270
AH
629static struct regset_info aarch64_sve_regsets[] =
630{
631 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
632 sizeof (struct user_pt_regs), GENERAL_REGS,
633 aarch64_fill_gregset, aarch64_store_gregset },
634 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
635 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
636 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
637 },
1ef53e6b
AH
638 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
639 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
640 NULL, aarch64_store_pauthregset },
02895270
AH
641 NULL_REGSET
642};
643
644static struct regsets_info aarch64_sve_regsets_info =
645 {
646 aarch64_sve_regsets, /* regsets. */
647 0, /* num_regsets. */
648 NULL, /* disabled_regsets. */
649 };
650
651static struct regs_info regs_info_aarch64_sve =
652 {
653 NULL, /* regset_bitmap. */
654 NULL, /* usrregs. */
655 &aarch64_sve_regsets_info,
656 };
657
aa8d21c9 658/* Implementation of linux target ops method "get_regs_info". */
421530db 659
aa8d21c9
TBA
660const regs_info *
661aarch64_target::get_regs_info ()
3aee8918 662{
02895270 663 if (!is_64bit_tdesc ())
3b53ae99 664 return &regs_info_aarch32;
02895270
AH
665
666 if (is_sve_tdesc ())
667 return &regs_info_aarch64_sve;
668
669 return &regs_info_aarch64;
3aee8918
PA
670}
671
7671bf47
PL
672/* Implementation of linux_target_ops method "supports_tracepoints". */
673
674static int
675aarch64_supports_tracepoints (void)
676{
524b57e6
YQ
677 if (current_thread == NULL)
678 return 1;
679 else
680 {
681 /* We don't support tracepoints on aarch32 now. */
682 return is_64bit_tdesc ();
683 }
7671bf47
PL
684}
685
bb903df0
PL
686/* Implementation of linux_target_ops method "get_thread_area". */
687
688static int
689aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
690{
691 struct iovec iovec;
692 uint64_t reg;
693
694 iovec.iov_base = &reg;
695 iovec.iov_len = sizeof (reg);
696
697 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
698 return -1;
699
700 *addrp = reg;
701
702 return 0;
703}
704
061fc021
YQ
705/* Implementation of linux_target_ops method "get_syscall_trapinfo". */
706
707static void
708aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
709{
710 int use_64bit = register_size (regcache->tdesc, 0) == 8;
711
712 if (use_64bit)
713 {
714 long l_sysno;
715
716 collect_register_by_name (regcache, "x8", &l_sysno);
717 *sysno = (int) l_sysno;
718 }
719 else
720 collect_register_by_name (regcache, "r7", sysno);
721}
722
afbe19f8
PL
723/* List of condition codes that we need. */
724
725enum aarch64_condition_codes
726{
727 EQ = 0x0,
728 NE = 0x1,
729 LO = 0x3,
730 GE = 0xa,
731 LT = 0xb,
732 GT = 0xc,
733 LE = 0xd,
bb903df0
PL
734};
735
6c1c9a8b
YQ
736enum aarch64_operand_type
737{
738 OPERAND_IMMEDIATE,
739 OPERAND_REGISTER,
740};
741
bb903df0
PL
742/* Representation of an operand. At this time, it only supports register
743 and immediate types. */
744
745struct aarch64_operand
746{
747 /* Type of the operand. */
6c1c9a8b
YQ
748 enum aarch64_operand_type type;
749
bb903df0
PL
750 /* Value of the operand according to the type. */
751 union
752 {
753 uint32_t imm;
754 struct aarch64_register reg;
755 };
756};
757
758/* List of registers that we are currently using, we can add more here as
759 we need to use them. */
760
761/* General purpose scratch registers (64 bit). */
762static const struct aarch64_register x0 = { 0, 1 };
763static const struct aarch64_register x1 = { 1, 1 };
764static const struct aarch64_register x2 = { 2, 1 };
765static const struct aarch64_register x3 = { 3, 1 };
766static const struct aarch64_register x4 = { 4, 1 };
767
768/* General purpose scratch registers (32 bit). */
afbe19f8 769static const struct aarch64_register w0 = { 0, 0 };
bb903df0
PL
770static const struct aarch64_register w2 = { 2, 0 };
771
772/* Intra-procedure scratch registers. */
773static const struct aarch64_register ip0 = { 16, 1 };
774
775/* Special purpose registers. */
afbe19f8
PL
776static const struct aarch64_register fp = { 29, 1 };
777static const struct aarch64_register lr = { 30, 1 };
bb903df0
PL
778static const struct aarch64_register sp = { 31, 1 };
779static const struct aarch64_register xzr = { 31, 1 };
780
781/* Dynamically allocate a new register. If we know the register
782 statically, we should make it a global as above instead of using this
783 helper function. */
784
785static struct aarch64_register
786aarch64_register (unsigned num, int is64)
787{
788 return (struct aarch64_register) { num, is64 };
789}
790
791/* Helper function to create a register operand, for instructions with
792 different types of operands.
793
794 For example:
795 p += emit_mov (p, x0, register_operand (x1)); */
796
797static struct aarch64_operand
798register_operand (struct aarch64_register reg)
799{
800 struct aarch64_operand operand;
801
802 operand.type = OPERAND_REGISTER;
803 operand.reg = reg;
804
805 return operand;
806}
807
808/* Helper function to create an immediate operand, for instructions with
809 different types of operands.
810
811 For example:
812 p += emit_mov (p, x0, immediate_operand (12)); */
813
814static struct aarch64_operand
815immediate_operand (uint32_t imm)
816{
817 struct aarch64_operand operand;
818
819 operand.type = OPERAND_IMMEDIATE;
820 operand.imm = imm;
821
822 return operand;
823}
824
bb903df0
PL
825/* Helper function to create an offset memory operand.
826
827 For example:
828 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
829
830static struct aarch64_memory_operand
831offset_memory_operand (int32_t offset)
832{
833 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
834}
835
836/* Helper function to create a pre-index memory operand.
837
838 For example:
839 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
840
841static struct aarch64_memory_operand
842preindex_memory_operand (int32_t index)
843{
844 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
845}
846
afbe19f8
PL
847/* Helper function to create a post-index memory operand.
848
849 For example:
850 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
851
852static struct aarch64_memory_operand
853postindex_memory_operand (int32_t index)
854{
855 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
856}
857
bb903df0
PL
858/* System control registers. These special registers can be written and
859 read with the MRS and MSR instructions.
860
861 - NZCV: Condition flags. GDB refers to this register under the CPSR
862 name.
863 - FPSR: Floating-point status register.
864 - FPCR: Floating-point control registers.
865 - TPIDR_EL0: Software thread ID register. */
866
867enum aarch64_system_control_registers
868{
869 /* op0 op1 crn crm op2 */
870 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
871 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
872 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
873 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
874};
875
bb903df0
PL
876/* Write a BLR instruction into *BUF.
877
878 BLR rn
879
880 RN is the register to branch to. */
881
882static int
883emit_blr (uint32_t *buf, struct aarch64_register rn)
884{
e1c587c3 885 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
bb903df0
PL
886}
887
afbe19f8 888/* Write a RET instruction into *BUF.
bb903df0 889
afbe19f8 890 RET xn
bb903df0 891
afbe19f8 892 RN is the register to branch to. */
bb903df0
PL
893
894static int
afbe19f8
PL
895emit_ret (uint32_t *buf, struct aarch64_register rn)
896{
e1c587c3 897 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
afbe19f8
PL
898}
899
900static int
901emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
902 struct aarch64_register rt,
903 struct aarch64_register rt2,
904 struct aarch64_register rn,
905 struct aarch64_memory_operand operand)
bb903df0
PL
906{
907 uint32_t opc;
908 uint32_t pre_index;
909 uint32_t write_back;
910
911 if (rt.is64)
912 opc = ENCODE (2, 2, 30);
913 else
914 opc = ENCODE (0, 2, 30);
915
916 switch (operand.type)
917 {
918 case MEMORY_OPERAND_OFFSET:
919 {
920 pre_index = ENCODE (1, 1, 24);
921 write_back = ENCODE (0, 1, 23);
922 break;
923 }
afbe19f8
PL
924 case MEMORY_OPERAND_POSTINDEX:
925 {
926 pre_index = ENCODE (0, 1, 24);
927 write_back = ENCODE (1, 1, 23);
928 break;
929 }
bb903df0
PL
930 case MEMORY_OPERAND_PREINDEX:
931 {
932 pre_index = ENCODE (1, 1, 24);
933 write_back = ENCODE (1, 1, 23);
934 break;
935 }
936 default:
937 return 0;
938 }
939
e1c587c3
YQ
940 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
941 | ENCODE (operand.index >> 3, 7, 15)
942 | ENCODE (rt2.num, 5, 10)
943 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
944}
945
afbe19f8
PL
946/* Write a STP instruction into *BUF.
947
948 STP rt, rt2, [rn, #offset]
949 STP rt, rt2, [rn, #index]!
950 STP rt, rt2, [rn], #index
951
952 RT and RT2 are the registers to store.
953 RN is the base address register.
954 OFFSET is the immediate to add to the base address. It is limited to a
955 -512 .. 504 range (7 bits << 3). */
956
957static int
958emit_stp (uint32_t *buf, struct aarch64_register rt,
959 struct aarch64_register rt2, struct aarch64_register rn,
960 struct aarch64_memory_operand operand)
961{
962 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
963}
964
965/* Write a LDP instruction into *BUF.
966
967 LDP rt, rt2, [rn, #offset]
968 LDP rt, rt2, [rn, #index]!
969 LDP rt, rt2, [rn], #index
970
971 RT and RT2 are the registers to store.
972 RN is the base address register.
973 OFFSET is the immediate to add to the base address. It is limited to a
974 -512 .. 504 range (7 bits << 3). */
975
976static int
977emit_ldp (uint32_t *buf, struct aarch64_register rt,
978 struct aarch64_register rt2, struct aarch64_register rn,
979 struct aarch64_memory_operand operand)
980{
981 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
982}
983
bb903df0
PL
984/* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
985
986 LDP qt, qt2, [rn, #offset]
987
988 RT and RT2 are the Q registers to store.
989 RN is the base address register.
990 OFFSET is the immediate to add to the base address. It is limited to
991 -1024 .. 1008 range (7 bits << 4). */
992
993static int
994emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
995 struct aarch64_register rn, int32_t offset)
996{
997 uint32_t opc = ENCODE (2, 2, 30);
998 uint32_t pre_index = ENCODE (1, 1, 24);
999
e1c587c3
YQ
1000 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
1001 | ENCODE (offset >> 4, 7, 15)
1002 | ENCODE (rt2, 5, 10)
1003 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1004}
1005
1006/* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1007
1008 STP qt, qt2, [rn, #offset]
1009
1010 RT and RT2 are the Q registers to store.
1011 RN is the base address register.
1012 OFFSET is the immediate to add to the base address. It is limited to
1013 -1024 .. 1008 range (7 bits << 4). */
1014
1015static int
1016emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1017 struct aarch64_register rn, int32_t offset)
1018{
1019 uint32_t opc = ENCODE (2, 2, 30);
1020 uint32_t pre_index = ENCODE (1, 1, 24);
1021
e1c587c3 1022 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
b6542f81
YQ
1023 | ENCODE (offset >> 4, 7, 15)
1024 | ENCODE (rt2, 5, 10)
1025 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
1026}
1027
afbe19f8
PL
1028/* Write a LDRH instruction into *BUF.
1029
1030 LDRH wt, [xn, #offset]
1031 LDRH wt, [xn, #index]!
1032 LDRH wt, [xn], #index
1033
1034 RT is the register to store.
1035 RN is the base address register.
1036 OFFSET is the immediate to add to the base address. It is limited to
1037 0 .. 32760 range (12 bits << 3). */
1038
1039static int
1040emit_ldrh (uint32_t *buf, struct aarch64_register rt,
1041 struct aarch64_register rn,
1042 struct aarch64_memory_operand operand)
1043{
1c2e1515 1044 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
afbe19f8
PL
1045}
1046
1047/* Write a LDRB instruction into *BUF.
1048
1049 LDRB wt, [xn, #offset]
1050 LDRB wt, [xn, #index]!
1051 LDRB wt, [xn], #index
1052
1053 RT is the register to store.
1054 RN is the base address register.
1055 OFFSET is the immediate to add to the base address. It is limited to
1056 0 .. 32760 range (12 bits << 3). */
1057
1058static int
1059emit_ldrb (uint32_t *buf, struct aarch64_register rt,
1060 struct aarch64_register rn,
1061 struct aarch64_memory_operand operand)
1062{
1c2e1515 1063 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
afbe19f8
PL
1064}
1065
bb903df0 1066
bb903df0
PL
1067
1068/* Write a STR instruction into *BUF.
1069
1070 STR rt, [rn, #offset]
1071 STR rt, [rn, #index]!
afbe19f8 1072 STR rt, [rn], #index
bb903df0
PL
1073
1074 RT is the register to store.
1075 RN is the base address register.
1076 OFFSET is the immediate to add to the base address. It is limited to
1077 0 .. 32760 range (12 bits << 3). */
1078
1079static int
1080emit_str (uint32_t *buf, struct aarch64_register rt,
1081 struct aarch64_register rn,
1082 struct aarch64_memory_operand operand)
1083{
1c2e1515 1084 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
bb903df0
PL
1085}
1086
1087/* Helper function emitting an exclusive load or store instruction. */
1088
1089static int
1090emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1091 enum aarch64_opcodes opcode,
1092 struct aarch64_register rs,
1093 struct aarch64_register rt,
1094 struct aarch64_register rt2,
1095 struct aarch64_register rn)
1096{
e1c587c3
YQ
1097 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1098 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1099 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
1100}
1101
1102/* Write a LAXR instruction into *BUF.
1103
1104 LDAXR rt, [xn]
1105
1106 RT is the destination register.
1107 RN is the base address register. */
1108
1109static int
1110emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1111 struct aarch64_register rn)
1112{
1113 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1114 xzr, rn);
1115}
1116
1117/* Write a STXR instruction into *BUF.
1118
1119 STXR ws, rt, [xn]
1120
1121 RS is the result register, it indicates if the store succeeded or not.
1122 RT is the destination register.
1123 RN is the base address register. */
1124
1125static int
1126emit_stxr (uint32_t *buf, struct aarch64_register rs,
1127 struct aarch64_register rt, struct aarch64_register rn)
1128{
1129 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1130 xzr, rn);
1131}
1132
1133/* Write a STLR instruction into *BUF.
1134
1135 STLR rt, [xn]
1136
1137 RT is the register to store.
1138 RN is the base address register. */
1139
1140static int
1141emit_stlr (uint32_t *buf, struct aarch64_register rt,
1142 struct aarch64_register rn)
1143{
1144 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1145 xzr, rn);
1146}
1147
1148/* Helper function for data processing instructions with register sources. */
1149
1150static int
231c0592 1151emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
bb903df0
PL
1152 struct aarch64_register rd,
1153 struct aarch64_register rn,
1154 struct aarch64_register rm)
1155{
1156 uint32_t size = ENCODE (rd.is64, 1, 31);
1157
e1c587c3
YQ
1158 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1159 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1160}
1161
1162/* Helper function for data processing instructions taking either a register
1163 or an immediate. */
1164
1165static int
1166emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1167 struct aarch64_register rd,
1168 struct aarch64_register rn,
1169 struct aarch64_operand operand)
1170{
1171 uint32_t size = ENCODE (rd.is64, 1, 31);
1172 /* The opcode is different for register and immediate source operands. */
1173 uint32_t operand_opcode;
1174
1175 if (operand.type == OPERAND_IMMEDIATE)
1176 {
1177 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1178 operand_opcode = ENCODE (8, 4, 25);
1179
e1c587c3
YQ
1180 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1181 | ENCODE (operand.imm, 12, 10)
1182 | ENCODE (rn.num, 5, 5)
1183 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1184 }
1185 else
1186 {
1187 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1188 operand_opcode = ENCODE (5, 4, 25);
1189
1190 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1191 rn, operand.reg);
1192 }
1193}
1194
1195/* Write an ADD instruction into *BUF.
1196
1197 ADD rd, rn, #imm
1198 ADD rd, rn, rm
1199
1200 This function handles both an immediate and register add.
1201
1202 RD is the destination register.
1203 RN is the input register.
1204 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1205 OPERAND_REGISTER. */
1206
1207static int
1208emit_add (uint32_t *buf, struct aarch64_register rd,
1209 struct aarch64_register rn, struct aarch64_operand operand)
1210{
1211 return emit_data_processing (buf, ADD, rd, rn, operand);
1212}
1213
1214/* Write a SUB instruction into *BUF.
1215
1216 SUB rd, rn, #imm
1217 SUB rd, rn, rm
1218
1219 This function handles both an immediate and register sub.
1220
1221 RD is the destination register.
1222 RN is the input register.
1223 IMM is the immediate to substract to RN. */
1224
1225static int
1226emit_sub (uint32_t *buf, struct aarch64_register rd,
1227 struct aarch64_register rn, struct aarch64_operand operand)
1228{
1229 return emit_data_processing (buf, SUB, rd, rn, operand);
1230}
1231
1232/* Write a MOV instruction into *BUF.
1233
1234 MOV rd, #imm
1235 MOV rd, rm
1236
1237 This function handles both a wide immediate move and a register move,
1238 with the condition that the source register is not xzr. xzr and the
1239 stack pointer share the same encoding and this function only supports
1240 the stack pointer.
1241
1242 RD is the destination register.
1243 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1244 OPERAND_REGISTER. */
1245
1246static int
1247emit_mov (uint32_t *buf, struct aarch64_register rd,
1248 struct aarch64_operand operand)
1249{
1250 if (operand.type == OPERAND_IMMEDIATE)
1251 {
1252 uint32_t size = ENCODE (rd.is64, 1, 31);
1253 /* Do not shift the immediate. */
1254 uint32_t shift = ENCODE (0, 2, 21);
1255
e1c587c3
YQ
1256 return aarch64_emit_insn (buf, MOV | size | shift
1257 | ENCODE (operand.imm, 16, 5)
1258 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1259 }
1260 else
1261 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1262}
1263
1264/* Write a MOVK instruction into *BUF.
1265
1266 MOVK rd, #imm, lsl #shift
1267
1268 RD is the destination register.
1269 IMM is the immediate.
1270 SHIFT is the logical shift left to apply to IMM. */
1271
1272static int
7781c06f
YQ
1273emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1274 unsigned shift)
bb903df0
PL
1275{
1276 uint32_t size = ENCODE (rd.is64, 1, 31);
1277
e1c587c3
YQ
1278 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1279 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1280}
1281
1282/* Write instructions into *BUF in order to move ADDR into a register.
1283 ADDR can be a 64-bit value.
1284
1285 This function will emit a series of MOV and MOVK instructions, such as:
1286
1287 MOV xd, #(addr)
1288 MOVK xd, #(addr >> 16), lsl #16
1289 MOVK xd, #(addr >> 32), lsl #32
1290 MOVK xd, #(addr >> 48), lsl #48 */
1291
1292static int
1293emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1294{
1295 uint32_t *p = buf;
1296
1297 /* The MOV (wide immediate) instruction clears to top bits of the
1298 register. */
1299 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1300
1301 if ((addr >> 16) != 0)
1302 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1303 else
1304 return p - buf;
1305
1306 if ((addr >> 32) != 0)
1307 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1308 else
1309 return p - buf;
1310
1311 if ((addr >> 48) != 0)
1312 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1313
1314 return p - buf;
1315}
1316
afbe19f8
PL
1317/* Write a SUBS instruction into *BUF.
1318
1319 SUBS rd, rn, rm
1320
1321 This instruction update the condition flags.
1322
1323 RD is the destination register.
1324 RN and RM are the source registers. */
1325
1326static int
1327emit_subs (uint32_t *buf, struct aarch64_register rd,
1328 struct aarch64_register rn, struct aarch64_operand operand)
1329{
1330 return emit_data_processing (buf, SUBS, rd, rn, operand);
1331}
1332
1333/* Write a CMP instruction into *BUF.
1334
1335 CMP rn, rm
1336
1337 This instruction is an alias of SUBS xzr, rn, rm.
1338
1339 RN and RM are the registers to compare. */
1340
1341static int
1342emit_cmp (uint32_t *buf, struct aarch64_register rn,
1343 struct aarch64_operand operand)
1344{
1345 return emit_subs (buf, xzr, rn, operand);
1346}
1347
1348/* Write a AND instruction into *BUF.
1349
1350 AND rd, rn, rm
1351
1352 RD is the destination register.
1353 RN and RM are the source registers. */
1354
1355static int
1356emit_and (uint32_t *buf, struct aarch64_register rd,
1357 struct aarch64_register rn, struct aarch64_register rm)
1358{
1359 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1360}
1361
1362/* Write a ORR instruction into *BUF.
1363
1364 ORR rd, rn, rm
1365
1366 RD is the destination register.
1367 RN and RM are the source registers. */
1368
1369static int
1370emit_orr (uint32_t *buf, struct aarch64_register rd,
1371 struct aarch64_register rn, struct aarch64_register rm)
1372{
1373 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1374}
1375
1376/* Write a ORN instruction into *BUF.
1377
1378 ORN rd, rn, rm
1379
1380 RD is the destination register.
1381 RN and RM are the source registers. */
1382
1383static int
1384emit_orn (uint32_t *buf, struct aarch64_register rd,
1385 struct aarch64_register rn, struct aarch64_register rm)
1386{
1387 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1388}
1389
1390/* Write a EOR instruction into *BUF.
1391
1392 EOR rd, rn, rm
1393
1394 RD is the destination register.
1395 RN and RM are the source registers. */
1396
1397static int
1398emit_eor (uint32_t *buf, struct aarch64_register rd,
1399 struct aarch64_register rn, struct aarch64_register rm)
1400{
1401 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1402}
1403
1404/* Write a MVN instruction into *BUF.
1405
1406 MVN rd, rm
1407
1408 This is an alias for ORN rd, xzr, rm.
1409
1410 RD is the destination register.
1411 RM is the source register. */
1412
1413static int
1414emit_mvn (uint32_t *buf, struct aarch64_register rd,
1415 struct aarch64_register rm)
1416{
1417 return emit_orn (buf, rd, xzr, rm);
1418}
1419
1420/* Write a LSLV instruction into *BUF.
1421
1422 LSLV rd, rn, rm
1423
1424 RD is the destination register.
1425 RN and RM are the source registers. */
1426
1427static int
1428emit_lslv (uint32_t *buf, struct aarch64_register rd,
1429 struct aarch64_register rn, struct aarch64_register rm)
1430{
1431 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1432}
1433
1434/* Write a LSRV instruction into *BUF.
1435
1436 LSRV rd, rn, rm
1437
1438 RD is the destination register.
1439 RN and RM are the source registers. */
1440
1441static int
1442emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1443 struct aarch64_register rn, struct aarch64_register rm)
1444{
1445 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1446}
1447
1448/* Write a ASRV instruction into *BUF.
1449
1450 ASRV rd, rn, rm
1451
1452 RD is the destination register.
1453 RN and RM are the source registers. */
1454
1455static int
1456emit_asrv (uint32_t *buf, struct aarch64_register rd,
1457 struct aarch64_register rn, struct aarch64_register rm)
1458{
1459 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1460}
1461
1462/* Write a MUL instruction into *BUF.
1463
1464 MUL rd, rn, rm
1465
1466 RD is the destination register.
1467 RN and RM are the source registers. */
1468
1469static int
1470emit_mul (uint32_t *buf, struct aarch64_register rd,
1471 struct aarch64_register rn, struct aarch64_register rm)
1472{
1473 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1474}
1475
bb903df0
PL
1476/* Write a MRS instruction into *BUF. The register size is 64-bit.
1477
1478 MRS xt, system_reg
1479
1480 RT is the destination register.
1481 SYSTEM_REG is special purpose register to read. */
1482
1483static int
1484emit_mrs (uint32_t *buf, struct aarch64_register rt,
1485 enum aarch64_system_control_registers system_reg)
1486{
e1c587c3
YQ
1487 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1488 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1489}
1490
1491/* Write a MSR instruction into *BUF. The register size is 64-bit.
1492
1493 MSR system_reg, xt
1494
1495 SYSTEM_REG is special purpose register to write.
1496 RT is the input register. */
1497
1498static int
1499emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1500 struct aarch64_register rt)
1501{
e1c587c3
YQ
1502 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1503 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1504}
1505
1506/* Write a SEVL instruction into *BUF.
1507
1508 This is a hint instruction telling the hardware to trigger an event. */
1509
1510static int
1511emit_sevl (uint32_t *buf)
1512{
e1c587c3 1513 return aarch64_emit_insn (buf, SEVL);
bb903df0
PL
1514}
1515
1516/* Write a WFE instruction into *BUF.
1517
1518 This is a hint instruction telling the hardware to wait for an event. */
1519
1520static int
1521emit_wfe (uint32_t *buf)
1522{
e1c587c3 1523 return aarch64_emit_insn (buf, WFE);
bb903df0
PL
1524}
1525
afbe19f8
PL
1526/* Write a SBFM instruction into *BUF.
1527
1528 SBFM rd, rn, #immr, #imms
1529
1530 This instruction moves the bits from #immr to #imms into the
1531 destination, sign extending the result.
1532
1533 RD is the destination register.
1534 RN is the source register.
1535 IMMR is the bit number to start at (least significant bit).
1536 IMMS is the bit number to stop at (most significant bit). */
1537
1538static int
1539emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1540 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1541{
1542 uint32_t size = ENCODE (rd.is64, 1, 31);
1543 uint32_t n = ENCODE (rd.is64, 1, 22);
1544
e1c587c3
YQ
1545 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1546 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1547 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1548}
1549
1550/* Write a SBFX instruction into *BUF.
1551
1552 SBFX rd, rn, #lsb, #width
1553
1554 This instruction moves #width bits from #lsb into the destination, sign
1555 extending the result. This is an alias for:
1556
1557 SBFM rd, rn, #lsb, #(lsb + width - 1)
1558
1559 RD is the destination register.
1560 RN is the source register.
1561 LSB is the bit number to start at (least significant bit).
1562 WIDTH is the number of bits to move. */
1563
1564static int
1565emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1566 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1567{
1568 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1569}
1570
1571/* Write a UBFM instruction into *BUF.
1572
1573 UBFM rd, rn, #immr, #imms
1574
1575 This instruction moves the bits from #immr to #imms into the
1576 destination, extending the result with zeros.
1577
1578 RD is the destination register.
1579 RN is the source register.
1580 IMMR is the bit number to start at (least significant bit).
1581 IMMS is the bit number to stop at (most significant bit). */
1582
1583static int
1584emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1585 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1586{
1587 uint32_t size = ENCODE (rd.is64, 1, 31);
1588 uint32_t n = ENCODE (rd.is64, 1, 22);
1589
e1c587c3
YQ
1590 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1591 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1592 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1593}
1594
1595/* Write a UBFX instruction into *BUF.
1596
1597 UBFX rd, rn, #lsb, #width
1598
1599 This instruction moves #width bits from #lsb into the destination,
1600 extending the result with zeros. This is an alias for:
1601
1602 UBFM rd, rn, #lsb, #(lsb + width - 1)
1603
1604 RD is the destination register.
1605 RN is the source register.
1606 LSB is the bit number to start at (least significant bit).
1607 WIDTH is the number of bits to move. */
1608
1609static int
1610emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1611 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1612{
1613 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1614}
1615
1616/* Write a CSINC instruction into *BUF.
1617
1618 CSINC rd, rn, rm, cond
1619
1620 This instruction conditionally increments rn or rm and places the result
1621 in rd. rn is chosen is the condition is true.
1622
1623 RD is the destination register.
1624 RN and RM are the source registers.
1625 COND is the encoded condition. */
1626
1627static int
1628emit_csinc (uint32_t *buf, struct aarch64_register rd,
1629 struct aarch64_register rn, struct aarch64_register rm,
1630 unsigned cond)
1631{
1632 uint32_t size = ENCODE (rd.is64, 1, 31);
1633
e1c587c3
YQ
1634 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1635 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1636 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1637}
1638
1639/* Write a CSET instruction into *BUF.
1640
1641 CSET rd, cond
1642
1643 This instruction conditionally write 1 or 0 in the destination register.
1644 1 is written if the condition is true. This is an alias for:
1645
1646 CSINC rd, xzr, xzr, !cond
1647
1648 Note that the condition needs to be inverted.
1649
1650 RD is the destination register.
1651 RN and RM are the source registers.
1652 COND is the encoded condition. */
1653
1654static int
1655emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1656{
1657 /* The least significant bit of the condition needs toggling in order to
1658 invert it. */
1659 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1660}
1661
bb903df0
PL
1662/* Write LEN instructions from BUF into the inferior memory at *TO.
1663
1664 Note instructions are always little endian on AArch64, unlike data. */
1665
1666static void
1667append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1668{
1669 size_t byte_len = len * sizeof (uint32_t);
1670#if (__BYTE_ORDER == __BIG_ENDIAN)
cb93dc7f 1671 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
bb903df0
PL
1672 size_t i;
1673
1674 for (i = 0; i < len; i++)
1675 le_buf[i] = htole32 (buf[i]);
1676
4196ab2a 1677 target_write_memory (*to, (const unsigned char *) le_buf, byte_len);
bb903df0
PL
1678
1679 xfree (le_buf);
1680#else
4196ab2a 1681 target_write_memory (*to, (const unsigned char *) buf, byte_len);
bb903df0
PL
1682#endif
1683
1684 *to += byte_len;
1685}
1686
0badd99f
YQ
1687/* Sub-class of struct aarch64_insn_data, store information of
1688 instruction relocation for fast tracepoint. Visitor can
1689 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1690 the relocated instructions in buffer pointed by INSN_PTR. */
bb903df0 1691
0badd99f
YQ
1692struct aarch64_insn_relocation_data
1693{
1694 struct aarch64_insn_data base;
1695
1696 /* The new address the instruction is relocated to. */
1697 CORE_ADDR new_addr;
1698 /* Pointer to the buffer of relocated instruction(s). */
1699 uint32_t *insn_ptr;
1700};
1701
1702/* Implementation of aarch64_insn_visitor method "b". */
1703
1704static void
1705aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1706 struct aarch64_insn_data *data)
1707{
1708 struct aarch64_insn_relocation_data *insn_reloc
1709 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1710 int64_t new_offset
0badd99f
YQ
1711 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1712
1713 if (can_encode_int32 (new_offset, 28))
1714 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1715}
1716
1717/* Implementation of aarch64_insn_visitor method "b_cond". */
1718
1719static void
1720aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1721 struct aarch64_insn_data *data)
1722{
1723 struct aarch64_insn_relocation_data *insn_reloc
1724 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1725 int64_t new_offset
0badd99f
YQ
1726 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1727
1728 if (can_encode_int32 (new_offset, 21))
1729 {
1730 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1731 new_offset);
bb903df0 1732 }
0badd99f 1733 else if (can_encode_int32 (new_offset, 28))
bb903df0 1734 {
0badd99f
YQ
1735 /* The offset is out of range for a conditional branch
1736 instruction but not for a unconditional branch. We can use
1737 the following instructions instead:
bb903df0 1738
0badd99f
YQ
1739 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1740 B NOT_TAKEN ; Else jump over TAKEN and continue.
1741 TAKEN:
1742 B #(offset - 8)
1743 NOT_TAKEN:
1744
1745 */
1746
1747 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1748 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1749 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
bb903df0 1750 }
0badd99f 1751}
bb903df0 1752
0badd99f
YQ
1753/* Implementation of aarch64_insn_visitor method "cb". */
1754
1755static void
1756aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1757 const unsigned rn, int is64,
1758 struct aarch64_insn_data *data)
1759{
1760 struct aarch64_insn_relocation_data *insn_reloc
1761 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1762 int64_t new_offset
0badd99f
YQ
1763 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1764
1765 if (can_encode_int32 (new_offset, 21))
1766 {
1767 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1768 aarch64_register (rn, is64), new_offset);
bb903df0 1769 }
0badd99f 1770 else if (can_encode_int32 (new_offset, 28))
bb903df0 1771 {
0badd99f
YQ
1772 /* The offset is out of range for a compare and branch
1773 instruction but not for a unconditional branch. We can use
1774 the following instructions instead:
1775
1776 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1777 B NOT_TAKEN ; Else jump over TAKEN and continue.
1778 TAKEN:
1779 B #(offset - 8)
1780 NOT_TAKEN:
1781
1782 */
1783 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1784 aarch64_register (rn, is64), 8);
1785 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1786 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1787 }
1788}
bb903df0 1789
0badd99f 1790/* Implementation of aarch64_insn_visitor method "tb". */
bb903df0 1791
0badd99f
YQ
1792static void
1793aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1794 const unsigned rt, unsigned bit,
1795 struct aarch64_insn_data *data)
1796{
1797 struct aarch64_insn_relocation_data *insn_reloc
1798 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1799 int64_t new_offset
0badd99f
YQ
1800 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1801
1802 if (can_encode_int32 (new_offset, 16))
1803 {
1804 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1805 aarch64_register (rt, 1), new_offset);
bb903df0 1806 }
0badd99f 1807 else if (can_encode_int32 (new_offset, 28))
bb903df0 1808 {
0badd99f
YQ
1809 /* The offset is out of range for a test bit and branch
1810 instruction but not for a unconditional branch. We can use
1811 the following instructions instead:
1812
1813 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1814 B NOT_TAKEN ; Else jump over TAKEN and continue.
1815 TAKEN:
1816 B #(offset - 8)
1817 NOT_TAKEN:
1818
1819 */
1820 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1821 aarch64_register (rt, 1), 8);
1822 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1823 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1824 new_offset - 8);
1825 }
1826}
bb903df0 1827
0badd99f 1828/* Implementation of aarch64_insn_visitor method "adr". */
bb903df0 1829
0badd99f
YQ
1830static void
1831aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1832 const int is_adrp,
1833 struct aarch64_insn_data *data)
1834{
1835 struct aarch64_insn_relocation_data *insn_reloc
1836 = (struct aarch64_insn_relocation_data *) data;
1837 /* We know exactly the address the ADR{P,} instruction will compute.
1838 We can just write it to the destination register. */
1839 CORE_ADDR address = data->insn_addr + offset;
bb903df0 1840
0badd99f
YQ
1841 if (is_adrp)
1842 {
1843 /* Clear the lower 12 bits of the offset to get the 4K page. */
1844 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1845 aarch64_register (rd, 1),
1846 address & ~0xfff);
1847 }
1848 else
1849 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1850 aarch64_register (rd, 1), address);
1851}
bb903df0 1852
0badd99f 1853/* Implementation of aarch64_insn_visitor method "ldr_literal". */
bb903df0 1854
0badd99f
YQ
1855static void
1856aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1857 const unsigned rt, const int is64,
1858 struct aarch64_insn_data *data)
1859{
1860 struct aarch64_insn_relocation_data *insn_reloc
1861 = (struct aarch64_insn_relocation_data *) data;
1862 CORE_ADDR address = data->insn_addr + offset;
1863
1864 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1865 aarch64_register (rt, 1), address);
1866
1867 /* We know exactly what address to load from, and what register we
1868 can use:
1869
1870 MOV xd, #(oldloc + offset)
1871 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1872 ...
1873
1874 LDR xd, [xd] ; or LDRSW xd, [xd]
1875
1876 */
1877
1878 if (is_sw)
1879 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1880 aarch64_register (rt, 1),
1881 aarch64_register (rt, 1),
1882 offset_memory_operand (0));
bb903df0 1883 else
0badd99f
YQ
1884 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1885 aarch64_register (rt, is64),
1886 aarch64_register (rt, 1),
1887 offset_memory_operand (0));
1888}
1889
1890/* Implementation of aarch64_insn_visitor method "others". */
1891
1892static void
1893aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1894 struct aarch64_insn_data *data)
1895{
1896 struct aarch64_insn_relocation_data *insn_reloc
1897 = (struct aarch64_insn_relocation_data *) data;
bb903df0 1898
0badd99f
YQ
1899 /* The instruction is not PC relative. Just re-emit it at the new
1900 location. */
e1c587c3 1901 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
0badd99f
YQ
1902}
1903
1904static const struct aarch64_insn_visitor visitor =
1905{
1906 aarch64_ftrace_insn_reloc_b,
1907 aarch64_ftrace_insn_reloc_b_cond,
1908 aarch64_ftrace_insn_reloc_cb,
1909 aarch64_ftrace_insn_reloc_tb,
1910 aarch64_ftrace_insn_reloc_adr,
1911 aarch64_ftrace_insn_reloc_ldr_literal,
1912 aarch64_ftrace_insn_reloc_others,
1913};
1914
bb903df0
PL
1915/* Implementation of linux_target_ops method
1916 "install_fast_tracepoint_jump_pad". */
1917
1918static int
1919aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1920 CORE_ADDR tpaddr,
1921 CORE_ADDR collector,
1922 CORE_ADDR lockaddr,
1923 ULONGEST orig_size,
1924 CORE_ADDR *jump_entry,
1925 CORE_ADDR *trampoline,
1926 ULONGEST *trampoline_size,
1927 unsigned char *jjump_pad_insn,
1928 ULONGEST *jjump_pad_insn_size,
1929 CORE_ADDR *adjusted_insn_addr,
1930 CORE_ADDR *adjusted_insn_addr_end,
1931 char *err)
1932{
1933 uint32_t buf[256];
1934 uint32_t *p = buf;
2ac09a5b 1935 int64_t offset;
bb903df0 1936 int i;
70b439f0 1937 uint32_t insn;
bb903df0 1938 CORE_ADDR buildaddr = *jump_entry;
0badd99f 1939 struct aarch64_insn_relocation_data insn_data;
bb903df0
PL
1940
1941 /* We need to save the current state on the stack both to restore it
1942 later and to collect register values when the tracepoint is hit.
1943
1944 The saved registers are pushed in a layout that needs to be in sync
1945 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1946 the supply_fast_tracepoint_registers function will fill in the
1947 register cache from a pointer to saved registers on the stack we build
1948 here.
1949
1950 For simplicity, we set the size of each cell on the stack to 16 bytes.
1951 This way one cell can hold any register type, from system registers
1952 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1953 has to be 16 bytes aligned anyway.
1954
1955 Note that the CPSR register does not exist on AArch64. Instead we
1956 can access system bits describing the process state with the
1957 MRS/MSR instructions, namely the condition flags. We save them as
1958 if they are part of a CPSR register because that's how GDB
1959 interprets these system bits. At the moment, only the condition
1960 flags are saved in CPSR (NZCV).
1961
1962 Stack layout, each cell is 16 bytes (descending):
1963
1964 High *-------- SIMD&FP registers from 31 down to 0. --------*
1965 | q31 |
1966 . .
1967 . . 32 cells
1968 . .
1969 | q0 |
1970 *---- General purpose registers from 30 down to 0. ----*
1971 | x30 |
1972 . .
1973 . . 31 cells
1974 . .
1975 | x0 |
1976 *------------- Special purpose registers. -------------*
1977 | SP |
1978 | PC |
1979 | CPSR (NZCV) | 5 cells
1980 | FPSR |
1981 | FPCR | <- SP + 16
1982 *------------- collecting_t object --------------------*
1983 | TPIDR_EL0 | struct tracepoint * |
1984 Low *------------------------------------------------------*
1985
1986 After this stack is set up, we issue a call to the collector, passing
1987 it the saved registers at (SP + 16). */
1988
1989 /* Push SIMD&FP registers on the stack:
1990
1991 SUB sp, sp, #(32 * 16)
1992
1993 STP q30, q31, [sp, #(30 * 16)]
1994 ...
1995 STP q0, q1, [sp]
1996
1997 */
1998 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
1999 for (i = 30; i >= 0; i -= 2)
2000 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
2001
30baf67b 2002 /* Push general purpose registers on the stack. Note that we do not need
bb903df0
PL
2003 to push x31 as it represents the xzr register and not the stack
2004 pointer in a STR instruction.
2005
2006 SUB sp, sp, #(31 * 16)
2007
2008 STR x30, [sp, #(30 * 16)]
2009 ...
2010 STR x0, [sp]
2011
2012 */
2013 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
2014 for (i = 30; i >= 0; i -= 1)
2015 p += emit_str (p, aarch64_register (i, 1), sp,
2016 offset_memory_operand (i * 16));
2017
2018 /* Make space for 5 more cells.
2019
2020 SUB sp, sp, #(5 * 16)
2021
2022 */
2023 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
2024
2025
2026 /* Save SP:
2027
2028 ADD x4, sp, #((32 + 31 + 5) * 16)
2029 STR x4, [sp, #(4 * 16)]
2030
2031 */
2032 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
2033 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
2034
2035 /* Save PC (tracepoint address):
2036
2037 MOV x3, #(tpaddr)
2038 ...
2039
2040 STR x3, [sp, #(3 * 16)]
2041
2042 */
2043
2044 p += emit_mov_addr (p, x3, tpaddr);
2045 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
2046
2047 /* Save CPSR (NZCV), FPSR and FPCR:
2048
2049 MRS x2, nzcv
2050 MRS x1, fpsr
2051 MRS x0, fpcr
2052
2053 STR x2, [sp, #(2 * 16)]
2054 STR x1, [sp, #(1 * 16)]
2055 STR x0, [sp, #(0 * 16)]
2056
2057 */
2058 p += emit_mrs (p, x2, NZCV);
2059 p += emit_mrs (p, x1, FPSR);
2060 p += emit_mrs (p, x0, FPCR);
2061 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2062 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2063 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2064
2065 /* Push the collecting_t object. It consist of the address of the
2066 tracepoint and an ID for the current thread. We get the latter by
2067 reading the tpidr_el0 system register. It corresponds to the
2068 NT_ARM_TLS register accessible with ptrace.
2069
2070 MOV x0, #(tpoint)
2071 ...
2072
2073 MRS x1, tpidr_el0
2074
2075 STP x0, x1, [sp, #-16]!
2076
2077 */
2078
2079 p += emit_mov_addr (p, x0, tpoint);
2080 p += emit_mrs (p, x1, TPIDR_EL0);
2081 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2082
2083 /* Spin-lock:
2084
2085 The shared memory for the lock is at lockaddr. It will hold zero
2086 if no-one is holding the lock, otherwise it contains the address of
2087 the collecting_t object on the stack of the thread which acquired it.
2088
2089 At this stage, the stack pointer points to this thread's collecting_t
2090 object.
2091
2092 We use the following registers:
2093 - x0: Address of the lock.
2094 - x1: Pointer to collecting_t object.
2095 - x2: Scratch register.
2096
2097 MOV x0, #(lockaddr)
2098 ...
2099 MOV x1, sp
2100
2101 ; Trigger an event local to this core. So the following WFE
2102 ; instruction is ignored.
2103 SEVL
2104 again:
2105 ; Wait for an event. The event is triggered by either the SEVL
2106 ; or STLR instructions (store release).
2107 WFE
2108
2109 ; Atomically read at lockaddr. This marks the memory location as
2110 ; exclusive. This instruction also has memory constraints which
2111 ; make sure all previous data reads and writes are done before
2112 ; executing it.
2113 LDAXR x2, [x0]
2114
2115 ; Try again if another thread holds the lock.
2116 CBNZ x2, again
2117
2118 ; We can lock it! Write the address of the collecting_t object.
2119 ; This instruction will fail if the memory location is not marked
2120 ; as exclusive anymore. If it succeeds, it will remove the
2121 ; exclusive mark on the memory location. This way, if another
2122 ; thread executes this instruction before us, we will fail and try
2123 ; all over again.
2124 STXR w2, x1, [x0]
2125 CBNZ w2, again
2126
2127 */
2128
2129 p += emit_mov_addr (p, x0, lockaddr);
2130 p += emit_mov (p, x1, register_operand (sp));
2131
2132 p += emit_sevl (p);
2133 p += emit_wfe (p);
2134 p += emit_ldaxr (p, x2, x0);
2135 p += emit_cb (p, 1, w2, -2 * 4);
2136 p += emit_stxr (p, w2, x1, x0);
2137 p += emit_cb (p, 1, x2, -4 * 4);
2138
2139 /* Call collector (struct tracepoint *, unsigned char *):
2140
2141 MOV x0, #(tpoint)
2142 ...
2143
2144 ; Saved registers start after the collecting_t object.
2145 ADD x1, sp, #16
2146
2147 ; We use an intra-procedure-call scratch register.
2148 MOV ip0, #(collector)
2149 ...
2150
2151 ; And call back to C!
2152 BLR ip0
2153
2154 */
2155
2156 p += emit_mov_addr (p, x0, tpoint);
2157 p += emit_add (p, x1, sp, immediate_operand (16));
2158
2159 p += emit_mov_addr (p, ip0, collector);
2160 p += emit_blr (p, ip0);
2161
2162 /* Release the lock.
2163
2164 MOV x0, #(lockaddr)
2165 ...
2166
2167 ; This instruction is a normal store with memory ordering
2168 ; constraints. Thanks to this we do not have to put a data
2169 ; barrier instruction to make sure all data read and writes are done
30baf67b 2170 ; before this instruction is executed. Furthermore, this instruction
bb903df0
PL
2171 ; will trigger an event, letting other threads know they can grab
2172 ; the lock.
2173 STLR xzr, [x0]
2174
2175 */
2176 p += emit_mov_addr (p, x0, lockaddr);
2177 p += emit_stlr (p, xzr, x0);
2178
2179 /* Free collecting_t object:
2180
2181 ADD sp, sp, #16
2182
2183 */
2184 p += emit_add (p, sp, sp, immediate_operand (16));
2185
2186 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2187 registers from the stack.
2188
2189 LDR x2, [sp, #(2 * 16)]
2190 LDR x1, [sp, #(1 * 16)]
2191 LDR x0, [sp, #(0 * 16)]
2192
2193 MSR NZCV, x2
2194 MSR FPSR, x1
2195 MSR FPCR, x0
2196
2197 ADD sp, sp #(5 * 16)
2198
2199 */
2200 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2201 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2202 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2203 p += emit_msr (p, NZCV, x2);
2204 p += emit_msr (p, FPSR, x1);
2205 p += emit_msr (p, FPCR, x0);
2206
2207 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2208
2209 /* Pop general purpose registers:
2210
2211 LDR x0, [sp]
2212 ...
2213 LDR x30, [sp, #(30 * 16)]
2214
2215 ADD sp, sp, #(31 * 16)
2216
2217 */
2218 for (i = 0; i <= 30; i += 1)
2219 p += emit_ldr (p, aarch64_register (i, 1), sp,
2220 offset_memory_operand (i * 16));
2221 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2222
2223 /* Pop SIMD&FP registers:
2224
2225 LDP q0, q1, [sp]
2226 ...
2227 LDP q30, q31, [sp, #(30 * 16)]
2228
2229 ADD sp, sp, #(32 * 16)
2230
2231 */
2232 for (i = 0; i <= 30; i += 2)
2233 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2234 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2235
2236 /* Write the code into the inferior memory. */
2237 append_insns (&buildaddr, p - buf, buf);
2238
2239 /* Now emit the relocated instruction. */
2240 *adjusted_insn_addr = buildaddr;
70b439f0 2241 target_read_uint32 (tpaddr, &insn);
0badd99f
YQ
2242
2243 insn_data.base.insn_addr = tpaddr;
2244 insn_data.new_addr = buildaddr;
2245 insn_data.insn_ptr = buf;
2246
2247 aarch64_relocate_instruction (insn, &visitor,
2248 (struct aarch64_insn_data *) &insn_data);
2249
bb903df0 2250 /* We may not have been able to relocate the instruction. */
0badd99f 2251 if (insn_data.insn_ptr == buf)
bb903df0
PL
2252 {
2253 sprintf (err,
2254 "E.Could not relocate instruction from %s to %s.",
2255 core_addr_to_string_nz (tpaddr),
2256 core_addr_to_string_nz (buildaddr));
2257 return 1;
2258 }
dfaffe9d 2259 else
0badd99f 2260 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
dfaffe9d 2261 *adjusted_insn_addr_end = buildaddr;
bb903df0
PL
2262
2263 /* Go back to the start of the buffer. */
2264 p = buf;
2265
2266 /* Emit a branch back from the jump pad. */
2267 offset = (tpaddr + orig_size - buildaddr);
2268 if (!can_encode_int32 (offset, 28))
2269 {
2270 sprintf (err,
2271 "E.Jump back from jump pad too far from tracepoint "
2ac09a5b 2272 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2273 offset);
2274 return 1;
2275 }
2276
2277 p += emit_b (p, 0, offset);
2278 append_insns (&buildaddr, p - buf, buf);
2279
2280 /* Give the caller a branch instruction into the jump pad. */
2281 offset = (*jump_entry - tpaddr);
2282 if (!can_encode_int32 (offset, 28))
2283 {
2284 sprintf (err,
2285 "E.Jump pad too far from tracepoint "
2ac09a5b 2286 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2287 offset);
2288 return 1;
2289 }
2290
2291 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2292 *jjump_pad_insn_size = 4;
2293
2294 /* Return the end address of our pad. */
2295 *jump_entry = buildaddr;
2296
2297 return 0;
2298}
2299
afbe19f8
PL
2300/* Helper function writing LEN instructions from START into
2301 current_insn_ptr. */
2302
2303static void
2304emit_ops_insns (const uint32_t *start, int len)
2305{
2306 CORE_ADDR buildaddr = current_insn_ptr;
2307
2308 if (debug_threads)
2309 debug_printf ("Adding %d instrucions at %s\n",
2310 len, paddress (buildaddr));
2311
2312 append_insns (&buildaddr, len, start);
2313 current_insn_ptr = buildaddr;
2314}
2315
2316/* Pop a register from the stack. */
2317
2318static int
2319emit_pop (uint32_t *buf, struct aarch64_register rt)
2320{
2321 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2322}
2323
2324/* Push a register on the stack. */
2325
2326static int
2327emit_push (uint32_t *buf, struct aarch64_register rt)
2328{
2329 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2330}
2331
2332/* Implementation of emit_ops method "emit_prologue". */
2333
2334static void
2335aarch64_emit_prologue (void)
2336{
2337 uint32_t buf[16];
2338 uint32_t *p = buf;
2339
2340 /* This function emit a prologue for the following function prototype:
2341
2342 enum eval_result_type f (unsigned char *regs,
2343 ULONGEST *value);
2344
2345 The first argument is a buffer of raw registers. The second
2346 argument is the result of
2347 evaluating the expression, which will be set to whatever is on top of
2348 the stack at the end.
2349
2350 The stack set up by the prologue is as such:
2351
2352 High *------------------------------------------------------*
2353 | LR |
2354 | FP | <- FP
2355 | x1 (ULONGEST *value) |
2356 | x0 (unsigned char *regs) |
2357 Low *------------------------------------------------------*
2358
2359 As we are implementing a stack machine, each opcode can expand the
2360 stack so we never know how far we are from the data saved by this
2361 prologue. In order to be able refer to value and regs later, we save
2362 the current stack pointer in the frame pointer. This way, it is not
2363 clobbered when calling C functions.
2364
30baf67b 2365 Finally, throughout every operation, we are using register x0 as the
afbe19f8
PL
2366 top of the stack, and x1 as a scratch register. */
2367
2368 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2369 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2370 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2371
2372 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2373
2374
2375 emit_ops_insns (buf, p - buf);
2376}
2377
2378/* Implementation of emit_ops method "emit_epilogue". */
2379
2380static void
2381aarch64_emit_epilogue (void)
2382{
2383 uint32_t buf[16];
2384 uint32_t *p = buf;
2385
2386 /* Store the result of the expression (x0) in *value. */
2387 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2388 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2389 p += emit_str (p, x0, x1, offset_memory_operand (0));
2390
2391 /* Restore the previous state. */
2392 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2393 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2394
2395 /* Return expr_eval_no_error. */
2396 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2397 p += emit_ret (p, lr);
2398
2399 emit_ops_insns (buf, p - buf);
2400}
2401
2402/* Implementation of emit_ops method "emit_add". */
2403
2404static void
2405aarch64_emit_add (void)
2406{
2407 uint32_t buf[16];
2408 uint32_t *p = buf;
2409
2410 p += emit_pop (p, x1);
45e3745e 2411 p += emit_add (p, x0, x1, register_operand (x0));
afbe19f8
PL
2412
2413 emit_ops_insns (buf, p - buf);
2414}
2415
2416/* Implementation of emit_ops method "emit_sub". */
2417
2418static void
2419aarch64_emit_sub (void)
2420{
2421 uint32_t buf[16];
2422 uint32_t *p = buf;
2423
2424 p += emit_pop (p, x1);
45e3745e 2425 p += emit_sub (p, x0, x1, register_operand (x0));
afbe19f8
PL
2426
2427 emit_ops_insns (buf, p - buf);
2428}
2429
2430/* Implementation of emit_ops method "emit_mul". */
2431
2432static void
2433aarch64_emit_mul (void)
2434{
2435 uint32_t buf[16];
2436 uint32_t *p = buf;
2437
2438 p += emit_pop (p, x1);
2439 p += emit_mul (p, x0, x1, x0);
2440
2441 emit_ops_insns (buf, p - buf);
2442}
2443
2444/* Implementation of emit_ops method "emit_lsh". */
2445
2446static void
2447aarch64_emit_lsh (void)
2448{
2449 uint32_t buf[16];
2450 uint32_t *p = buf;
2451
2452 p += emit_pop (p, x1);
2453 p += emit_lslv (p, x0, x1, x0);
2454
2455 emit_ops_insns (buf, p - buf);
2456}
2457
2458/* Implementation of emit_ops method "emit_rsh_signed". */
2459
2460static void
2461aarch64_emit_rsh_signed (void)
2462{
2463 uint32_t buf[16];
2464 uint32_t *p = buf;
2465
2466 p += emit_pop (p, x1);
2467 p += emit_asrv (p, x0, x1, x0);
2468
2469 emit_ops_insns (buf, p - buf);
2470}
2471
2472/* Implementation of emit_ops method "emit_rsh_unsigned". */
2473
2474static void
2475aarch64_emit_rsh_unsigned (void)
2476{
2477 uint32_t buf[16];
2478 uint32_t *p = buf;
2479
2480 p += emit_pop (p, x1);
2481 p += emit_lsrv (p, x0, x1, x0);
2482
2483 emit_ops_insns (buf, p - buf);
2484}
2485
2486/* Implementation of emit_ops method "emit_ext". */
2487
2488static void
2489aarch64_emit_ext (int arg)
2490{
2491 uint32_t buf[16];
2492 uint32_t *p = buf;
2493
2494 p += emit_sbfx (p, x0, x0, 0, arg);
2495
2496 emit_ops_insns (buf, p - buf);
2497}
2498
2499/* Implementation of emit_ops method "emit_log_not". */
2500
2501static void
2502aarch64_emit_log_not (void)
2503{
2504 uint32_t buf[16];
2505 uint32_t *p = buf;
2506
2507 /* If the top of the stack is 0, replace it with 1. Else replace it with
2508 0. */
2509
2510 p += emit_cmp (p, x0, immediate_operand (0));
2511 p += emit_cset (p, x0, EQ);
2512
2513 emit_ops_insns (buf, p - buf);
2514}
2515
2516/* Implementation of emit_ops method "emit_bit_and". */
2517
2518static void
2519aarch64_emit_bit_and (void)
2520{
2521 uint32_t buf[16];
2522 uint32_t *p = buf;
2523
2524 p += emit_pop (p, x1);
2525 p += emit_and (p, x0, x0, x1);
2526
2527 emit_ops_insns (buf, p - buf);
2528}
2529
2530/* Implementation of emit_ops method "emit_bit_or". */
2531
2532static void
2533aarch64_emit_bit_or (void)
2534{
2535 uint32_t buf[16];
2536 uint32_t *p = buf;
2537
2538 p += emit_pop (p, x1);
2539 p += emit_orr (p, x0, x0, x1);
2540
2541 emit_ops_insns (buf, p - buf);
2542}
2543
2544/* Implementation of emit_ops method "emit_bit_xor". */
2545
2546static void
2547aarch64_emit_bit_xor (void)
2548{
2549 uint32_t buf[16];
2550 uint32_t *p = buf;
2551
2552 p += emit_pop (p, x1);
2553 p += emit_eor (p, x0, x0, x1);
2554
2555 emit_ops_insns (buf, p - buf);
2556}
2557
2558/* Implementation of emit_ops method "emit_bit_not". */
2559
2560static void
2561aarch64_emit_bit_not (void)
2562{
2563 uint32_t buf[16];
2564 uint32_t *p = buf;
2565
2566 p += emit_mvn (p, x0, x0);
2567
2568 emit_ops_insns (buf, p - buf);
2569}
2570
2571/* Implementation of emit_ops method "emit_equal". */
2572
2573static void
2574aarch64_emit_equal (void)
2575{
2576 uint32_t buf[16];
2577 uint32_t *p = buf;
2578
2579 p += emit_pop (p, x1);
2580 p += emit_cmp (p, x0, register_operand (x1));
2581 p += emit_cset (p, x0, EQ);
2582
2583 emit_ops_insns (buf, p - buf);
2584}
2585
2586/* Implementation of emit_ops method "emit_less_signed". */
2587
2588static void
2589aarch64_emit_less_signed (void)
2590{
2591 uint32_t buf[16];
2592 uint32_t *p = buf;
2593
2594 p += emit_pop (p, x1);
2595 p += emit_cmp (p, x1, register_operand (x0));
2596 p += emit_cset (p, x0, LT);
2597
2598 emit_ops_insns (buf, p - buf);
2599}
2600
2601/* Implementation of emit_ops method "emit_less_unsigned". */
2602
2603static void
2604aarch64_emit_less_unsigned (void)
2605{
2606 uint32_t buf[16];
2607 uint32_t *p = buf;
2608
2609 p += emit_pop (p, x1);
2610 p += emit_cmp (p, x1, register_operand (x0));
2611 p += emit_cset (p, x0, LO);
2612
2613 emit_ops_insns (buf, p - buf);
2614}
2615
2616/* Implementation of emit_ops method "emit_ref". */
2617
2618static void
2619aarch64_emit_ref (int size)
2620{
2621 uint32_t buf[16];
2622 uint32_t *p = buf;
2623
2624 switch (size)
2625 {
2626 case 1:
2627 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2628 break;
2629 case 2:
2630 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2631 break;
2632 case 4:
2633 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2634 break;
2635 case 8:
2636 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2637 break;
2638 default:
2639 /* Unknown size, bail on compilation. */
2640 emit_error = 1;
2641 break;
2642 }
2643
2644 emit_ops_insns (buf, p - buf);
2645}
2646
2647/* Implementation of emit_ops method "emit_if_goto". */
2648
2649static void
2650aarch64_emit_if_goto (int *offset_p, int *size_p)
2651{
2652 uint32_t buf[16];
2653 uint32_t *p = buf;
2654
2655 /* The Z flag is set or cleared here. */
2656 p += emit_cmp (p, x0, immediate_operand (0));
2657 /* This instruction must not change the Z flag. */
2658 p += emit_pop (p, x0);
2659 /* Branch over the next instruction if x0 == 0. */
2660 p += emit_bcond (p, EQ, 8);
2661
2662 /* The NOP instruction will be patched with an unconditional branch. */
2663 if (offset_p)
2664 *offset_p = (p - buf) * 4;
2665 if (size_p)
2666 *size_p = 4;
2667 p += emit_nop (p);
2668
2669 emit_ops_insns (buf, p - buf);
2670}
2671
2672/* Implementation of emit_ops method "emit_goto". */
2673
2674static void
2675aarch64_emit_goto (int *offset_p, int *size_p)
2676{
2677 uint32_t buf[16];
2678 uint32_t *p = buf;
2679
2680 /* The NOP instruction will be patched with an unconditional branch. */
2681 if (offset_p)
2682 *offset_p = 0;
2683 if (size_p)
2684 *size_p = 4;
2685 p += emit_nop (p);
2686
2687 emit_ops_insns (buf, p - buf);
2688}
2689
2690/* Implementation of emit_ops method "write_goto_address". */
2691
bb1183e2 2692static void
afbe19f8
PL
2693aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2694{
2695 uint32_t insn;
2696
2697 emit_b (&insn, 0, to - from);
2698 append_insns (&from, 1, &insn);
2699}
2700
2701/* Implementation of emit_ops method "emit_const". */
2702
2703static void
2704aarch64_emit_const (LONGEST num)
2705{
2706 uint32_t buf[16];
2707 uint32_t *p = buf;
2708
2709 p += emit_mov_addr (p, x0, num);
2710
2711 emit_ops_insns (buf, p - buf);
2712}
2713
2714/* Implementation of emit_ops method "emit_call". */
2715
2716static void
2717aarch64_emit_call (CORE_ADDR fn)
2718{
2719 uint32_t buf[16];
2720 uint32_t *p = buf;
2721
2722 p += emit_mov_addr (p, ip0, fn);
2723 p += emit_blr (p, ip0);
2724
2725 emit_ops_insns (buf, p - buf);
2726}
2727
2728/* Implementation of emit_ops method "emit_reg". */
2729
2730static void
2731aarch64_emit_reg (int reg)
2732{
2733 uint32_t buf[16];
2734 uint32_t *p = buf;
2735
2736 /* Set x0 to unsigned char *regs. */
2737 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2738 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2739 p += emit_mov (p, x1, immediate_operand (reg));
2740
2741 emit_ops_insns (buf, p - buf);
2742
2743 aarch64_emit_call (get_raw_reg_func_addr ());
2744}
2745
2746/* Implementation of emit_ops method "emit_pop". */
2747
2748static void
2749aarch64_emit_pop (void)
2750{
2751 uint32_t buf[16];
2752 uint32_t *p = buf;
2753
2754 p += emit_pop (p, x0);
2755
2756 emit_ops_insns (buf, p - buf);
2757}
2758
2759/* Implementation of emit_ops method "emit_stack_flush". */
2760
2761static void
2762aarch64_emit_stack_flush (void)
2763{
2764 uint32_t buf[16];
2765 uint32_t *p = buf;
2766
2767 p += emit_push (p, x0);
2768
2769 emit_ops_insns (buf, p - buf);
2770}
2771
2772/* Implementation of emit_ops method "emit_zero_ext". */
2773
2774static void
2775aarch64_emit_zero_ext (int arg)
2776{
2777 uint32_t buf[16];
2778 uint32_t *p = buf;
2779
2780 p += emit_ubfx (p, x0, x0, 0, arg);
2781
2782 emit_ops_insns (buf, p - buf);
2783}
2784
2785/* Implementation of emit_ops method "emit_swap". */
2786
2787static void
2788aarch64_emit_swap (void)
2789{
2790 uint32_t buf[16];
2791 uint32_t *p = buf;
2792
2793 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2794 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2795 p += emit_mov (p, x0, register_operand (x1));
2796
2797 emit_ops_insns (buf, p - buf);
2798}
2799
2800/* Implementation of emit_ops method "emit_stack_adjust". */
2801
2802static void
2803aarch64_emit_stack_adjust (int n)
2804{
2805 /* This is not needed with our design. */
2806 uint32_t buf[16];
2807 uint32_t *p = buf;
2808
2809 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2810
2811 emit_ops_insns (buf, p - buf);
2812}
2813
2814/* Implementation of emit_ops method "emit_int_call_1". */
2815
2816static void
2817aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2818{
2819 uint32_t buf[16];
2820 uint32_t *p = buf;
2821
2822 p += emit_mov (p, x0, immediate_operand (arg1));
2823
2824 emit_ops_insns (buf, p - buf);
2825
2826 aarch64_emit_call (fn);
2827}
2828
2829/* Implementation of emit_ops method "emit_void_call_2". */
2830
2831static void
2832aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2833{
2834 uint32_t buf[16];
2835 uint32_t *p = buf;
2836
2837 /* Push x0 on the stack. */
2838 aarch64_emit_stack_flush ();
2839
2840 /* Setup arguments for the function call:
2841
2842 x0: arg1
2843 x1: top of the stack
2844
2845 MOV x1, x0
2846 MOV x0, #arg1 */
2847
2848 p += emit_mov (p, x1, register_operand (x0));
2849 p += emit_mov (p, x0, immediate_operand (arg1));
2850
2851 emit_ops_insns (buf, p - buf);
2852
2853 aarch64_emit_call (fn);
2854
2855 /* Restore x0. */
2856 aarch64_emit_pop ();
2857}
2858
2859/* Implementation of emit_ops method "emit_eq_goto". */
2860
2861static void
2862aarch64_emit_eq_goto (int *offset_p, int *size_p)
2863{
2864 uint32_t buf[16];
2865 uint32_t *p = buf;
2866
2867 p += emit_pop (p, x1);
2868 p += emit_cmp (p, x1, register_operand (x0));
2869 /* Branch over the next instruction if x0 != x1. */
2870 p += emit_bcond (p, NE, 8);
2871 /* The NOP instruction will be patched with an unconditional branch. */
2872 if (offset_p)
2873 *offset_p = (p - buf) * 4;
2874 if (size_p)
2875 *size_p = 4;
2876 p += emit_nop (p);
2877
2878 emit_ops_insns (buf, p - buf);
2879}
2880
2881/* Implementation of emit_ops method "emit_ne_goto". */
2882
2883static void
2884aarch64_emit_ne_goto (int *offset_p, int *size_p)
2885{
2886 uint32_t buf[16];
2887 uint32_t *p = buf;
2888
2889 p += emit_pop (p, x1);
2890 p += emit_cmp (p, x1, register_operand (x0));
2891 /* Branch over the next instruction if x0 == x1. */
2892 p += emit_bcond (p, EQ, 8);
2893 /* The NOP instruction will be patched with an unconditional branch. */
2894 if (offset_p)
2895 *offset_p = (p - buf) * 4;
2896 if (size_p)
2897 *size_p = 4;
2898 p += emit_nop (p);
2899
2900 emit_ops_insns (buf, p - buf);
2901}
2902
2903/* Implementation of emit_ops method "emit_lt_goto". */
2904
2905static void
2906aarch64_emit_lt_goto (int *offset_p, int *size_p)
2907{
2908 uint32_t buf[16];
2909 uint32_t *p = buf;
2910
2911 p += emit_pop (p, x1);
2912 p += emit_cmp (p, x1, register_operand (x0));
2913 /* Branch over the next instruction if x0 >= x1. */
2914 p += emit_bcond (p, GE, 8);
2915 /* The NOP instruction will be patched with an unconditional branch. */
2916 if (offset_p)
2917 *offset_p = (p - buf) * 4;
2918 if (size_p)
2919 *size_p = 4;
2920 p += emit_nop (p);
2921
2922 emit_ops_insns (buf, p - buf);
2923}
2924
2925/* Implementation of emit_ops method "emit_le_goto". */
2926
2927static void
2928aarch64_emit_le_goto (int *offset_p, int *size_p)
2929{
2930 uint32_t buf[16];
2931 uint32_t *p = buf;
2932
2933 p += emit_pop (p, x1);
2934 p += emit_cmp (p, x1, register_operand (x0));
2935 /* Branch over the next instruction if x0 > x1. */
2936 p += emit_bcond (p, GT, 8);
2937 /* The NOP instruction will be patched with an unconditional branch. */
2938 if (offset_p)
2939 *offset_p = (p - buf) * 4;
2940 if (size_p)
2941 *size_p = 4;
2942 p += emit_nop (p);
2943
2944 emit_ops_insns (buf, p - buf);
2945}
2946
2947/* Implementation of emit_ops method "emit_gt_goto". */
2948
2949static void
2950aarch64_emit_gt_goto (int *offset_p, int *size_p)
2951{
2952 uint32_t buf[16];
2953 uint32_t *p = buf;
2954
2955 p += emit_pop (p, x1);
2956 p += emit_cmp (p, x1, register_operand (x0));
2957 /* Branch over the next instruction if x0 <= x1. */
2958 p += emit_bcond (p, LE, 8);
2959 /* The NOP instruction will be patched with an unconditional branch. */
2960 if (offset_p)
2961 *offset_p = (p - buf) * 4;
2962 if (size_p)
2963 *size_p = 4;
2964 p += emit_nop (p);
2965
2966 emit_ops_insns (buf, p - buf);
2967}
2968
2969/* Implementation of emit_ops method "emit_ge_got". */
2970
2971static void
2972aarch64_emit_ge_got (int *offset_p, int *size_p)
2973{
2974 uint32_t buf[16];
2975 uint32_t *p = buf;
2976
2977 p += emit_pop (p, x1);
2978 p += emit_cmp (p, x1, register_operand (x0));
2979 /* Branch over the next instruction if x0 <= x1. */
2980 p += emit_bcond (p, LT, 8);
2981 /* The NOP instruction will be patched with an unconditional branch. */
2982 if (offset_p)
2983 *offset_p = (p - buf) * 4;
2984 if (size_p)
2985 *size_p = 4;
2986 p += emit_nop (p);
2987
2988 emit_ops_insns (buf, p - buf);
2989}
2990
2991static struct emit_ops aarch64_emit_ops_impl =
2992{
2993 aarch64_emit_prologue,
2994 aarch64_emit_epilogue,
2995 aarch64_emit_add,
2996 aarch64_emit_sub,
2997 aarch64_emit_mul,
2998 aarch64_emit_lsh,
2999 aarch64_emit_rsh_signed,
3000 aarch64_emit_rsh_unsigned,
3001 aarch64_emit_ext,
3002 aarch64_emit_log_not,
3003 aarch64_emit_bit_and,
3004 aarch64_emit_bit_or,
3005 aarch64_emit_bit_xor,
3006 aarch64_emit_bit_not,
3007 aarch64_emit_equal,
3008 aarch64_emit_less_signed,
3009 aarch64_emit_less_unsigned,
3010 aarch64_emit_ref,
3011 aarch64_emit_if_goto,
3012 aarch64_emit_goto,
3013 aarch64_write_goto_address,
3014 aarch64_emit_const,
3015 aarch64_emit_call,
3016 aarch64_emit_reg,
3017 aarch64_emit_pop,
3018 aarch64_emit_stack_flush,
3019 aarch64_emit_zero_ext,
3020 aarch64_emit_swap,
3021 aarch64_emit_stack_adjust,
3022 aarch64_emit_int_call_1,
3023 aarch64_emit_void_call_2,
3024 aarch64_emit_eq_goto,
3025 aarch64_emit_ne_goto,
3026 aarch64_emit_lt_goto,
3027 aarch64_emit_le_goto,
3028 aarch64_emit_gt_goto,
3029 aarch64_emit_ge_got,
3030};
3031
3032/* Implementation of linux_target_ops method "emit_ops". */
3033
3034static struct emit_ops *
3035aarch64_emit_ops (void)
3036{
3037 return &aarch64_emit_ops_impl;
3038}
3039
bb903df0
PL
3040/* Implementation of linux_target_ops method
3041 "get_min_fast_tracepoint_insn_len". */
3042
3043static int
3044aarch64_get_min_fast_tracepoint_insn_len (void)
3045{
3046 return 4;
3047}
3048
d1d0aea1
PL
3049/* Implementation of linux_target_ops method "supports_range_stepping". */
3050
3051static int
3052aarch64_supports_range_stepping (void)
3053{
3054 return 1;
3055}
3056
dd373349
AT
3057/* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
3058
3059static const gdb_byte *
3060aarch64_sw_breakpoint_from_kind (int kind, int *size)
3061{
17b1509a
YQ
3062 if (is_64bit_tdesc ())
3063 {
3064 *size = aarch64_breakpoint_len;
3065 return aarch64_breakpoint;
3066 }
3067 else
3068 return arm_sw_breakpoint_from_kind (kind, size);
3069}
3070
06250e4e 3071/* Implementation of target ops method "breakpoint_kind_from_pc". */
17b1509a 3072
06250e4e
TBA
3073int
3074aarch64_target::breakpoint_kind_from_pc (CORE_ADDR *pcptr)
17b1509a
YQ
3075{
3076 if (is_64bit_tdesc ())
3077 return aarch64_breakpoint_len;
3078 else
3079 return arm_breakpoint_kind_from_pc (pcptr);
3080}
3081
06250e4e 3082/* Implementation of the target ops method
17b1509a
YQ
3083 "breakpoint_kind_from_current_state". */
3084
06250e4e
TBA
3085int
3086aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
17b1509a
YQ
3087{
3088 if (is_64bit_tdesc ())
3089 return aarch64_breakpoint_len;
3090 else
3091 return arm_breakpoint_kind_from_current_state (pcptr);
dd373349
AT
3092}
3093
7d00775e
AT
3094/* Support for hardware single step. */
3095
3096static int
3097aarch64_supports_hardware_single_step (void)
3098{
3099 return 1;
3100}
3101
176eb98c
MS
3102struct linux_target_ops the_low_target =
3103{
dd373349 3104 aarch64_sw_breakpoint_from_kind,
fa5308bd 3105 NULL, /* get_next_pcs */
421530db 3106 0, /* decr_pc_after_break */
176eb98c 3107 aarch64_breakpoint_at,
802e8e6d 3108 aarch64_supports_z_point_type,
176eb98c
MS
3109 aarch64_insert_point,
3110 aarch64_remove_point,
3111 aarch64_stopped_by_watchpoint,
3112 aarch64_stopped_data_address,
421530db
PL
3113 NULL, /* collect_ptrace_register */
3114 NULL, /* supply_ptrace_register */
ade90bde 3115 aarch64_linux_siginfo_fixup,
176eb98c 3116 aarch64_linux_new_process,
04ec7890 3117 aarch64_linux_delete_process,
176eb98c 3118 aarch64_linux_new_thread,
466eecee 3119 aarch64_linux_delete_thread,
3a8a0396 3120 aarch64_linux_new_fork,
176eb98c 3121 aarch64_linux_prepare_to_resume,
421530db 3122 NULL, /* process_qsupported */
7671bf47 3123 aarch64_supports_tracepoints,
bb903df0
PL
3124 aarch64_get_thread_area,
3125 aarch64_install_fast_tracepoint_jump_pad,
afbe19f8 3126 aarch64_emit_ops,
bb903df0 3127 aarch64_get_min_fast_tracepoint_insn_len,
d1d0aea1 3128 aarch64_supports_range_stepping,
7d00775e 3129 aarch64_supports_hardware_single_step,
061fc021 3130 aarch64_get_syscall_trapinfo,
176eb98c 3131};
3aee8918 3132
ef0478f6
TBA
3133/* The linux target ops object. */
3134
3135linux_process_target *the_linux_target = &the_aarch64_target;
3136
3aee8918
PA
3137void
3138initialize_low_arch (void)
3139{
3b53ae99
YQ
3140 initialize_low_arch_aarch32 ();
3141
3aee8918 3142 initialize_regsets_info (&aarch64_regsets_info);
02895270 3143 initialize_regsets_info (&aarch64_sve_regsets_info);
3aee8918 3144}
This page took 0.838498 seconds and 4 git commands to generate.