Remove parameter sysret from linux_target_ops.get_syscall_trapinfo
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-aarch64-low.c
CommitLineData
176eb98c
MS
1/* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
618f726f 4 Copyright (C) 2009-2016 Free Software Foundation, Inc.
176eb98c
MS
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "server.h"
23#include "linux-low.h"
db3cb7cb 24#include "nat/aarch64-linux.h"
554717a3 25#include "nat/aarch64-linux-hw-point.h"
bb903df0 26#include "arch/aarch64-insn.h"
3b53ae99 27#include "linux-aarch32-low.h"
176eb98c 28#include "elf/common.h"
afbe19f8
PL
29#include "ax.h"
30#include "tracepoint.h"
176eb98c
MS
31
32#include <signal.h>
33#include <sys/user.h>
5826e159 34#include "nat/gdb_ptrace.h"
e9dae05e 35#include <asm/ptrace.h>
bb903df0
PL
36#include <inttypes.h>
37#include <endian.h>
38#include <sys/uio.h>
176eb98c
MS
39
40#include "gdb_proc_service.h"
41
42/* Defined in auto-generated files. */
43void init_registers_aarch64 (void);
3aee8918 44extern const struct target_desc *tdesc_aarch64;
176eb98c 45
176eb98c
MS
46#ifdef HAVE_SYS_REG_H
47#include <sys/reg.h>
48#endif
49
50#define AARCH64_X_REGS_NUM 31
51#define AARCH64_V_REGS_NUM 32
52#define AARCH64_X0_REGNO 0
53#define AARCH64_SP_REGNO 31
54#define AARCH64_PC_REGNO 32
55#define AARCH64_CPSR_REGNO 33
56#define AARCH64_V0_REGNO 34
bf330350
CU
57#define AARCH64_FPSR_REGNO (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM)
58#define AARCH64_FPCR_REGNO (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM + 1)
176eb98c 59
bf330350 60#define AARCH64_NUM_REGS (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM + 2)
176eb98c 61
176eb98c
MS
62/* Per-process arch-specific data we want to keep. */
63
64struct arch_process_info
65{
66 /* Hardware breakpoint/watchpoint data.
67 The reason for them to be per-process rather than per-thread is
68 due to the lack of information in the gdbserver environment;
69 gdbserver is not told that whether a requested hardware
70 breakpoint/watchpoint is thread specific or not, so it has to set
71 each hw bp/wp for every thread in the current process. The
72 higher level bp/wp management in gdb will resume a thread if a hw
73 bp/wp trap is not expected for it. Since the hw bp/wp setting is
74 same for each thread, it is reasonable for the data to live here.
75 */
76 struct aarch64_debug_reg_state debug_reg_state;
77};
78
3b53ae99
YQ
79/* Return true if the size of register 0 is 8 byte. */
80
81static int
82is_64bit_tdesc (void)
83{
84 struct regcache *regcache = get_thread_regcache (current_thread, 0);
85
86 return register_size (regcache->tdesc, 0) == 8;
87}
88
421530db
PL
89/* Implementation of linux_target_ops method "cannot_store_register". */
90
176eb98c
MS
91static int
92aarch64_cannot_store_register (int regno)
93{
94 return regno >= AARCH64_NUM_REGS;
95}
96
421530db
PL
97/* Implementation of linux_target_ops method "cannot_fetch_register". */
98
176eb98c
MS
99static int
100aarch64_cannot_fetch_register (int regno)
101{
102 return regno >= AARCH64_NUM_REGS;
103}
104
105static void
106aarch64_fill_gregset (struct regcache *regcache, void *buf)
107{
6a69a054 108 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
176eb98c
MS
109 int i;
110
111 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
112 collect_register (regcache, AARCH64_X0_REGNO + i, &regset->regs[i]);
113 collect_register (regcache, AARCH64_SP_REGNO, &regset->sp);
114 collect_register (regcache, AARCH64_PC_REGNO, &regset->pc);
115 collect_register (regcache, AARCH64_CPSR_REGNO, &regset->pstate);
116}
117
118static void
119aarch64_store_gregset (struct regcache *regcache, const void *buf)
120{
6a69a054 121 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
176eb98c
MS
122 int i;
123
124 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
125 supply_register (regcache, AARCH64_X0_REGNO + i, &regset->regs[i]);
126 supply_register (regcache, AARCH64_SP_REGNO, &regset->sp);
127 supply_register (regcache, AARCH64_PC_REGNO, &regset->pc);
128 supply_register (regcache, AARCH64_CPSR_REGNO, &regset->pstate);
129}
130
131static void
132aarch64_fill_fpregset (struct regcache *regcache, void *buf)
133{
9caa3311 134 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
176eb98c
MS
135 int i;
136
137 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
138 collect_register (regcache, AARCH64_V0_REGNO + i, &regset->vregs[i]);
bf330350
CU
139 collect_register (regcache, AARCH64_FPSR_REGNO, &regset->fpsr);
140 collect_register (regcache, AARCH64_FPCR_REGNO, &regset->fpcr);
176eb98c
MS
141}
142
143static void
144aarch64_store_fpregset (struct regcache *regcache, const void *buf)
145{
9caa3311
YQ
146 const struct user_fpsimd_state *regset
147 = (const struct user_fpsimd_state *) buf;
176eb98c
MS
148 int i;
149
150 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
151 supply_register (regcache, AARCH64_V0_REGNO + i, &regset->vregs[i]);
bf330350
CU
152 supply_register (regcache, AARCH64_FPSR_REGNO, &regset->fpsr);
153 supply_register (regcache, AARCH64_FPCR_REGNO, &regset->fpcr);
176eb98c
MS
154}
155
176eb98c
MS
156/* Enable miscellaneous debugging output. The name is historical - it
157 was originally used to debug LinuxThreads support. */
158extern int debug_threads;
159
421530db
PL
160/* Implementation of linux_target_ops method "get_pc". */
161
176eb98c
MS
162static CORE_ADDR
163aarch64_get_pc (struct regcache *regcache)
164{
8a7e4587 165 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 166 return linux_get_pc_64bit (regcache);
8a7e4587 167 else
a5652c21 168 return linux_get_pc_32bit (regcache);
176eb98c
MS
169}
170
421530db
PL
171/* Implementation of linux_target_ops method "set_pc". */
172
176eb98c
MS
173static void
174aarch64_set_pc (struct regcache *regcache, CORE_ADDR pc)
175{
8a7e4587 176 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 177 linux_set_pc_64bit (regcache, pc);
8a7e4587 178 else
a5652c21 179 linux_set_pc_32bit (regcache, pc);
176eb98c
MS
180}
181
176eb98c
MS
182#define aarch64_breakpoint_len 4
183
37d66942
PL
184/* AArch64 BRK software debug mode instruction.
185 This instruction needs to match gdb/aarch64-tdep.c
186 (aarch64_default_breakpoint). */
187static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
176eb98c 188
421530db
PL
189/* Implementation of linux_target_ops method "breakpoint_at". */
190
176eb98c
MS
191static int
192aarch64_breakpoint_at (CORE_ADDR where)
193{
db91f502
YQ
194 if (is_64bit_tdesc ())
195 {
196 gdb_byte insn[aarch64_breakpoint_len];
176eb98c 197
db91f502
YQ
198 (*the_target->read_memory) (where, (unsigned char *) &insn,
199 aarch64_breakpoint_len);
200 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
201 return 1;
176eb98c 202
db91f502
YQ
203 return 0;
204 }
205 else
206 return arm_breakpoint_at (where);
176eb98c
MS
207}
208
176eb98c
MS
209static void
210aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
211{
212 int i;
213
214 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
215 {
216 state->dr_addr_bp[i] = 0;
217 state->dr_ctrl_bp[i] = 0;
218 state->dr_ref_count_bp[i] = 0;
219 }
220
221 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
222 {
223 state->dr_addr_wp[i] = 0;
224 state->dr_ctrl_wp[i] = 0;
225 state->dr_ref_count_wp[i] = 0;
226 }
227}
228
176eb98c
MS
229/* Return the pointer to the debug register state structure in the
230 current process' arch-specific data area. */
231
db3cb7cb 232struct aarch64_debug_reg_state *
88e2cf7e 233aarch64_get_debug_reg_state (pid_t pid)
176eb98c 234{
88e2cf7e 235 struct process_info *proc = find_process_pid (pid);
176eb98c 236
fe978cb0 237 return &proc->priv->arch_private->debug_reg_state;
176eb98c
MS
238}
239
421530db
PL
240/* Implementation of linux_target_ops method "supports_z_point_type". */
241
4ff0d3d8
PA
242static int
243aarch64_supports_z_point_type (char z_type)
244{
245 switch (z_type)
246 {
96c97461 247 case Z_PACKET_SW_BP:
4ff0d3d8
PA
248 case Z_PACKET_HW_BP:
249 case Z_PACKET_WRITE_WP:
250 case Z_PACKET_READ_WP:
251 case Z_PACKET_ACCESS_WP:
252 return 1;
253 default:
4ff0d3d8
PA
254 return 0;
255 }
256}
257
421530db 258/* Implementation of linux_target_ops method "insert_point".
176eb98c 259
421530db
PL
260 It actually only records the info of the to-be-inserted bp/wp;
261 the actual insertion will happen when threads are resumed. */
176eb98c
MS
262
263static int
802e8e6d
PA
264aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
265 int len, struct raw_breakpoint *bp)
176eb98c
MS
266{
267 int ret;
4ff0d3d8 268 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
269 struct aarch64_debug_reg_state *state
270 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 271
c5e92cca 272 if (show_debug_regs)
176eb98c
MS
273 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
274 (unsigned long) addr, len);
275
802e8e6d
PA
276 /* Determine the type from the raw breakpoint type. */
277 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
278
279 if (targ_type != hw_execute)
39edd165
YQ
280 {
281 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
282 ret = aarch64_handle_watchpoint (targ_type, addr, len,
283 1 /* is_insert */, state);
284 else
285 ret = -1;
286 }
176eb98c 287 else
8d689ee5
YQ
288 {
289 if (len == 3)
290 {
291 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
292 instruction. Set it to 2 to correctly encode length bit
293 mask in hardware/watchpoint control register. */
294 len = 2;
295 }
296 ret = aarch64_handle_breakpoint (targ_type, addr, len,
297 1 /* is_insert */, state);
298 }
176eb98c 299
60a191ed 300 if (show_debug_regs)
88e2cf7e
YQ
301 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
302 targ_type);
176eb98c
MS
303
304 return ret;
305}
306
421530db 307/* Implementation of linux_target_ops method "remove_point".
176eb98c 308
421530db
PL
309 It actually only records the info of the to-be-removed bp/wp,
310 the actual removal will be done when threads are resumed. */
176eb98c
MS
311
312static int
802e8e6d
PA
313aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
314 int len, struct raw_breakpoint *bp)
176eb98c
MS
315{
316 int ret;
4ff0d3d8 317 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
318 struct aarch64_debug_reg_state *state
319 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 320
c5e92cca 321 if (show_debug_regs)
176eb98c
MS
322 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
323 (unsigned long) addr, len);
324
802e8e6d
PA
325 /* Determine the type from the raw breakpoint type. */
326 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
327
328 /* Set up state pointers. */
329 if (targ_type != hw_execute)
330 ret =
c67ca4de
YQ
331 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
332 state);
176eb98c 333 else
8d689ee5
YQ
334 {
335 if (len == 3)
336 {
337 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
338 instruction. Set it to 2 to correctly encode length bit
339 mask in hardware/watchpoint control register. */
340 len = 2;
341 }
342 ret = aarch64_handle_breakpoint (targ_type, addr, len,
343 0 /* is_insert */, state);
344 }
176eb98c 345
60a191ed 346 if (show_debug_regs)
88e2cf7e
YQ
347 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
348 targ_type);
176eb98c
MS
349
350 return ret;
351}
352
421530db 353/* Implementation of linux_target_ops method "stopped_data_address". */
176eb98c
MS
354
355static CORE_ADDR
356aarch64_stopped_data_address (void)
357{
358 siginfo_t siginfo;
359 int pid, i;
360 struct aarch64_debug_reg_state *state;
361
0bfdf32f 362 pid = lwpid_of (current_thread);
176eb98c
MS
363
364 /* Get the siginfo. */
365 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
366 return (CORE_ADDR) 0;
367
368 /* Need to be a hardware breakpoint/watchpoint trap. */
369 if (siginfo.si_signo != SIGTRAP
370 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
371 return (CORE_ADDR) 0;
372
373 /* Check if the address matches any watched address. */
88e2cf7e 374 state = aarch64_get_debug_reg_state (pid_of (current_thread));
176eb98c
MS
375 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
376 {
377 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
378 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
379 const CORE_ADDR addr_watch = state->dr_addr_wp[i];
380 if (state->dr_ref_count_wp[i]
381 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
382 && addr_trap >= addr_watch
383 && addr_trap < addr_watch + len)
384 return addr_trap;
385 }
386
387 return (CORE_ADDR) 0;
388}
389
421530db 390/* Implementation of linux_target_ops method "stopped_by_watchpoint". */
176eb98c
MS
391
392static int
393aarch64_stopped_by_watchpoint (void)
394{
395 if (aarch64_stopped_data_address () != 0)
396 return 1;
397 else
398 return 0;
399}
400
401/* Fetch the thread-local storage pointer for libthread_db. */
402
403ps_err_e
55fac6e0 404ps_get_thread_area (const struct ps_prochandle *ph,
176eb98c
MS
405 lwpid_t lwpid, int idx, void **base)
406{
a0cc84cd
YQ
407 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
408 is_64bit_tdesc ());
176eb98c
MS
409}
410
ade90bde
YQ
411/* Implementation of linux_target_ops method "siginfo_fixup". */
412
413static int
8adce034 414aarch64_linux_siginfo_fixup (siginfo_t *native, gdb_byte *inf, int direction)
ade90bde
YQ
415{
416 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
417 if (!is_64bit_tdesc ())
418 {
419 if (direction == 0)
420 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
421 native);
422 else
423 aarch64_siginfo_from_compat_siginfo (native,
424 (struct compat_siginfo *) inf);
425
426 return 1;
427 }
428
429 return 0;
430}
431
421530db 432/* Implementation of linux_target_ops method "linux_new_process". */
176eb98c
MS
433
434static struct arch_process_info *
435aarch64_linux_new_process (void)
436{
8d749320 437 struct arch_process_info *info = XCNEW (struct arch_process_info);
176eb98c
MS
438
439 aarch64_init_debug_reg_state (&info->debug_reg_state);
440
441 return info;
442}
443
421530db
PL
444/* Implementation of linux_target_ops method "linux_new_fork". */
445
3a8a0396
DB
446static void
447aarch64_linux_new_fork (struct process_info *parent,
448 struct process_info *child)
449{
450 /* These are allocated by linux_add_process. */
61a7418c
DB
451 gdb_assert (parent->priv != NULL
452 && parent->priv->arch_private != NULL);
453 gdb_assert (child->priv != NULL
454 && child->priv->arch_private != NULL);
3a8a0396
DB
455
456 /* Linux kernel before 2.6.33 commit
457 72f674d203cd230426437cdcf7dd6f681dad8b0d
458 will inherit hardware debug registers from parent
459 on fork/vfork/clone. Newer Linux kernels create such tasks with
460 zeroed debug registers.
461
462 GDB core assumes the child inherits the watchpoints/hw
463 breakpoints of the parent, and will remove them all from the
464 forked off process. Copy the debug registers mirrors into the
465 new process so that all breakpoints and watchpoints can be
466 removed together. The debug registers mirror will become zeroed
467 in the end before detaching the forked off process, thus making
468 this compatible with older Linux kernels too. */
469
61a7418c 470 *child->priv->arch_private = *parent->priv->arch_private;
3a8a0396
DB
471}
472
3b53ae99
YQ
473/* Return the right target description according to the ELF file of
474 current thread. */
475
476static const struct target_desc *
477aarch64_linux_read_description (void)
478{
479 unsigned int machine;
480 int is_elf64;
481 int tid;
482
483 tid = lwpid_of (current_thread);
484
485 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
486
487 if (is_elf64)
488 return tdesc_aarch64;
489 else
490 return tdesc_arm_with_neon;
491}
492
421530db
PL
493/* Implementation of linux_target_ops method "arch_setup". */
494
176eb98c
MS
495static void
496aarch64_arch_setup (void)
497{
3b53ae99 498 current_process ()->tdesc = aarch64_linux_read_description ();
176eb98c 499
af1b22f3 500 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
176eb98c
MS
501}
502
3aee8918 503static struct regset_info aarch64_regsets[] =
176eb98c
MS
504{
505 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
506 sizeof (struct user_pt_regs), GENERAL_REGS,
507 aarch64_fill_gregset, aarch64_store_gregset },
508 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
509 sizeof (struct user_fpsimd_state), FP_REGS,
510 aarch64_fill_fpregset, aarch64_store_fpregset
511 },
50bc912a 512 NULL_REGSET
176eb98c
MS
513};
514
3aee8918
PA
515static struct regsets_info aarch64_regsets_info =
516 {
517 aarch64_regsets, /* regsets */
518 0, /* num_regsets */
519 NULL, /* disabled_regsets */
520 };
521
3b53ae99 522static struct regs_info regs_info_aarch64 =
3aee8918
PA
523 {
524 NULL, /* regset_bitmap */
c2d65f38 525 NULL, /* usrregs */
3aee8918
PA
526 &aarch64_regsets_info,
527 };
528
421530db
PL
529/* Implementation of linux_target_ops method "regs_info". */
530
3aee8918
PA
531static const struct regs_info *
532aarch64_regs_info (void)
533{
3b53ae99
YQ
534 if (is_64bit_tdesc ())
535 return &regs_info_aarch64;
536 else
537 return &regs_info_aarch32;
3aee8918
PA
538}
539
7671bf47
PL
540/* Implementation of linux_target_ops method "supports_tracepoints". */
541
542static int
543aarch64_supports_tracepoints (void)
544{
524b57e6
YQ
545 if (current_thread == NULL)
546 return 1;
547 else
548 {
549 /* We don't support tracepoints on aarch32 now. */
550 return is_64bit_tdesc ();
551 }
7671bf47
PL
552}
553
bb903df0
PL
554/* Implementation of linux_target_ops method "get_thread_area". */
555
556static int
557aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
558{
559 struct iovec iovec;
560 uint64_t reg;
561
562 iovec.iov_base = &reg;
563 iovec.iov_len = sizeof (reg);
564
565 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
566 return -1;
567
568 *addrp = reg;
569
570 return 0;
571}
572
afbe19f8
PL
573/* List of condition codes that we need. */
574
575enum aarch64_condition_codes
576{
577 EQ = 0x0,
578 NE = 0x1,
579 LO = 0x3,
580 GE = 0xa,
581 LT = 0xb,
582 GT = 0xc,
583 LE = 0xd,
bb903df0
PL
584};
585
6c1c9a8b
YQ
586enum aarch64_operand_type
587{
588 OPERAND_IMMEDIATE,
589 OPERAND_REGISTER,
590};
591
bb903df0
PL
592/* Representation of an operand. At this time, it only supports register
593 and immediate types. */
594
595struct aarch64_operand
596{
597 /* Type of the operand. */
6c1c9a8b
YQ
598 enum aarch64_operand_type type;
599
bb903df0
PL
600 /* Value of the operand according to the type. */
601 union
602 {
603 uint32_t imm;
604 struct aarch64_register reg;
605 };
606};
607
608/* List of registers that we are currently using, we can add more here as
609 we need to use them. */
610
611/* General purpose scratch registers (64 bit). */
612static const struct aarch64_register x0 = { 0, 1 };
613static const struct aarch64_register x1 = { 1, 1 };
614static const struct aarch64_register x2 = { 2, 1 };
615static const struct aarch64_register x3 = { 3, 1 };
616static const struct aarch64_register x4 = { 4, 1 };
617
618/* General purpose scratch registers (32 bit). */
afbe19f8 619static const struct aarch64_register w0 = { 0, 0 };
bb903df0
PL
620static const struct aarch64_register w2 = { 2, 0 };
621
622/* Intra-procedure scratch registers. */
623static const struct aarch64_register ip0 = { 16, 1 };
624
625/* Special purpose registers. */
afbe19f8
PL
626static const struct aarch64_register fp = { 29, 1 };
627static const struct aarch64_register lr = { 30, 1 };
bb903df0
PL
628static const struct aarch64_register sp = { 31, 1 };
629static const struct aarch64_register xzr = { 31, 1 };
630
631/* Dynamically allocate a new register. If we know the register
632 statically, we should make it a global as above instead of using this
633 helper function. */
634
635static struct aarch64_register
636aarch64_register (unsigned num, int is64)
637{
638 return (struct aarch64_register) { num, is64 };
639}
640
641/* Helper function to create a register operand, for instructions with
642 different types of operands.
643
644 For example:
645 p += emit_mov (p, x0, register_operand (x1)); */
646
647static struct aarch64_operand
648register_operand (struct aarch64_register reg)
649{
650 struct aarch64_operand operand;
651
652 operand.type = OPERAND_REGISTER;
653 operand.reg = reg;
654
655 return operand;
656}
657
658/* Helper function to create an immediate operand, for instructions with
659 different types of operands.
660
661 For example:
662 p += emit_mov (p, x0, immediate_operand (12)); */
663
664static struct aarch64_operand
665immediate_operand (uint32_t imm)
666{
667 struct aarch64_operand operand;
668
669 operand.type = OPERAND_IMMEDIATE;
670 operand.imm = imm;
671
672 return operand;
673}
674
bb903df0
PL
675/* Helper function to create an offset memory operand.
676
677 For example:
678 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
679
680static struct aarch64_memory_operand
681offset_memory_operand (int32_t offset)
682{
683 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
684}
685
686/* Helper function to create a pre-index memory operand.
687
688 For example:
689 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
690
691static struct aarch64_memory_operand
692preindex_memory_operand (int32_t index)
693{
694 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
695}
696
afbe19f8
PL
697/* Helper function to create a post-index memory operand.
698
699 For example:
700 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
701
702static struct aarch64_memory_operand
703postindex_memory_operand (int32_t index)
704{
705 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
706}
707
bb903df0
PL
708/* System control registers. These special registers can be written and
709 read with the MRS and MSR instructions.
710
711 - NZCV: Condition flags. GDB refers to this register under the CPSR
712 name.
713 - FPSR: Floating-point status register.
714 - FPCR: Floating-point control registers.
715 - TPIDR_EL0: Software thread ID register. */
716
717enum aarch64_system_control_registers
718{
719 /* op0 op1 crn crm op2 */
720 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
721 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
722 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
723 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
724};
725
bb903df0
PL
726/* Write a BLR instruction into *BUF.
727
728 BLR rn
729
730 RN is the register to branch to. */
731
732static int
733emit_blr (uint32_t *buf, struct aarch64_register rn)
734{
e1c587c3 735 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
bb903df0
PL
736}
737
afbe19f8 738/* Write a RET instruction into *BUF.
bb903df0 739
afbe19f8 740 RET xn
bb903df0 741
afbe19f8 742 RN is the register to branch to. */
bb903df0
PL
743
744static int
afbe19f8
PL
745emit_ret (uint32_t *buf, struct aarch64_register rn)
746{
e1c587c3 747 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
afbe19f8
PL
748}
749
750static int
751emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
752 struct aarch64_register rt,
753 struct aarch64_register rt2,
754 struct aarch64_register rn,
755 struct aarch64_memory_operand operand)
bb903df0
PL
756{
757 uint32_t opc;
758 uint32_t pre_index;
759 uint32_t write_back;
760
761 if (rt.is64)
762 opc = ENCODE (2, 2, 30);
763 else
764 opc = ENCODE (0, 2, 30);
765
766 switch (operand.type)
767 {
768 case MEMORY_OPERAND_OFFSET:
769 {
770 pre_index = ENCODE (1, 1, 24);
771 write_back = ENCODE (0, 1, 23);
772 break;
773 }
afbe19f8
PL
774 case MEMORY_OPERAND_POSTINDEX:
775 {
776 pre_index = ENCODE (0, 1, 24);
777 write_back = ENCODE (1, 1, 23);
778 break;
779 }
bb903df0
PL
780 case MEMORY_OPERAND_PREINDEX:
781 {
782 pre_index = ENCODE (1, 1, 24);
783 write_back = ENCODE (1, 1, 23);
784 break;
785 }
786 default:
787 return 0;
788 }
789
e1c587c3
YQ
790 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
791 | ENCODE (operand.index >> 3, 7, 15)
792 | ENCODE (rt2.num, 5, 10)
793 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
794}
795
afbe19f8
PL
796/* Write a STP instruction into *BUF.
797
798 STP rt, rt2, [rn, #offset]
799 STP rt, rt2, [rn, #index]!
800 STP rt, rt2, [rn], #index
801
802 RT and RT2 are the registers to store.
803 RN is the base address register.
804 OFFSET is the immediate to add to the base address. It is limited to a
805 -512 .. 504 range (7 bits << 3). */
806
807static int
808emit_stp (uint32_t *buf, struct aarch64_register rt,
809 struct aarch64_register rt2, struct aarch64_register rn,
810 struct aarch64_memory_operand operand)
811{
812 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
813}
814
815/* Write a LDP instruction into *BUF.
816
817 LDP rt, rt2, [rn, #offset]
818 LDP rt, rt2, [rn, #index]!
819 LDP rt, rt2, [rn], #index
820
821 RT and RT2 are the registers to store.
822 RN is the base address register.
823 OFFSET is the immediate to add to the base address. It is limited to a
824 -512 .. 504 range (7 bits << 3). */
825
826static int
827emit_ldp (uint32_t *buf, struct aarch64_register rt,
828 struct aarch64_register rt2, struct aarch64_register rn,
829 struct aarch64_memory_operand operand)
830{
831 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
832}
833
bb903df0
PL
834/* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
835
836 LDP qt, qt2, [rn, #offset]
837
838 RT and RT2 are the Q registers to store.
839 RN is the base address register.
840 OFFSET is the immediate to add to the base address. It is limited to
841 -1024 .. 1008 range (7 bits << 4). */
842
843static int
844emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
845 struct aarch64_register rn, int32_t offset)
846{
847 uint32_t opc = ENCODE (2, 2, 30);
848 uint32_t pre_index = ENCODE (1, 1, 24);
849
e1c587c3
YQ
850 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
851 | ENCODE (offset >> 4, 7, 15)
852 | ENCODE (rt2, 5, 10)
853 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
854}
855
856/* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
857
858 STP qt, qt2, [rn, #offset]
859
860 RT and RT2 are the Q registers to store.
861 RN is the base address register.
862 OFFSET is the immediate to add to the base address. It is limited to
863 -1024 .. 1008 range (7 bits << 4). */
864
865static int
866emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
867 struct aarch64_register rn, int32_t offset)
868{
869 uint32_t opc = ENCODE (2, 2, 30);
870 uint32_t pre_index = ENCODE (1, 1, 24);
871
e1c587c3 872 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
b6542f81
YQ
873 | ENCODE (offset >> 4, 7, 15)
874 | ENCODE (rt2, 5, 10)
875 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
876}
877
afbe19f8
PL
878/* Write a LDRH instruction into *BUF.
879
880 LDRH wt, [xn, #offset]
881 LDRH wt, [xn, #index]!
882 LDRH wt, [xn], #index
883
884 RT is the register to store.
885 RN is the base address register.
886 OFFSET is the immediate to add to the base address. It is limited to
887 0 .. 32760 range (12 bits << 3). */
888
889static int
890emit_ldrh (uint32_t *buf, struct aarch64_register rt,
891 struct aarch64_register rn,
892 struct aarch64_memory_operand operand)
893{
1c2e1515 894 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
afbe19f8
PL
895}
896
897/* Write a LDRB instruction into *BUF.
898
899 LDRB wt, [xn, #offset]
900 LDRB wt, [xn, #index]!
901 LDRB wt, [xn], #index
902
903 RT is the register to store.
904 RN is the base address register.
905 OFFSET is the immediate to add to the base address. It is limited to
906 0 .. 32760 range (12 bits << 3). */
907
908static int
909emit_ldrb (uint32_t *buf, struct aarch64_register rt,
910 struct aarch64_register rn,
911 struct aarch64_memory_operand operand)
912{
1c2e1515 913 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
afbe19f8
PL
914}
915
bb903df0 916
bb903df0
PL
917
918/* Write a STR instruction into *BUF.
919
920 STR rt, [rn, #offset]
921 STR rt, [rn, #index]!
afbe19f8 922 STR rt, [rn], #index
bb903df0
PL
923
924 RT is the register to store.
925 RN is the base address register.
926 OFFSET is the immediate to add to the base address. It is limited to
927 0 .. 32760 range (12 bits << 3). */
928
929static int
930emit_str (uint32_t *buf, struct aarch64_register rt,
931 struct aarch64_register rn,
932 struct aarch64_memory_operand operand)
933{
1c2e1515 934 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
bb903df0
PL
935}
936
937/* Helper function emitting an exclusive load or store instruction. */
938
939static int
940emit_load_store_exclusive (uint32_t *buf, uint32_t size,
941 enum aarch64_opcodes opcode,
942 struct aarch64_register rs,
943 struct aarch64_register rt,
944 struct aarch64_register rt2,
945 struct aarch64_register rn)
946{
e1c587c3
YQ
947 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
948 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
949 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
950}
951
952/* Write a LAXR instruction into *BUF.
953
954 LDAXR rt, [xn]
955
956 RT is the destination register.
957 RN is the base address register. */
958
959static int
960emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
961 struct aarch64_register rn)
962{
963 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
964 xzr, rn);
965}
966
967/* Write a STXR instruction into *BUF.
968
969 STXR ws, rt, [xn]
970
971 RS is the result register, it indicates if the store succeeded or not.
972 RT is the destination register.
973 RN is the base address register. */
974
975static int
976emit_stxr (uint32_t *buf, struct aarch64_register rs,
977 struct aarch64_register rt, struct aarch64_register rn)
978{
979 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
980 xzr, rn);
981}
982
983/* Write a STLR instruction into *BUF.
984
985 STLR rt, [xn]
986
987 RT is the register to store.
988 RN is the base address register. */
989
990static int
991emit_stlr (uint32_t *buf, struct aarch64_register rt,
992 struct aarch64_register rn)
993{
994 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
995 xzr, rn);
996}
997
998/* Helper function for data processing instructions with register sources. */
999
1000static int
231c0592 1001emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
bb903df0
PL
1002 struct aarch64_register rd,
1003 struct aarch64_register rn,
1004 struct aarch64_register rm)
1005{
1006 uint32_t size = ENCODE (rd.is64, 1, 31);
1007
e1c587c3
YQ
1008 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1009 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1010}
1011
1012/* Helper function for data processing instructions taking either a register
1013 or an immediate. */
1014
1015static int
1016emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1017 struct aarch64_register rd,
1018 struct aarch64_register rn,
1019 struct aarch64_operand operand)
1020{
1021 uint32_t size = ENCODE (rd.is64, 1, 31);
1022 /* The opcode is different for register and immediate source operands. */
1023 uint32_t operand_opcode;
1024
1025 if (operand.type == OPERAND_IMMEDIATE)
1026 {
1027 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1028 operand_opcode = ENCODE (8, 4, 25);
1029
e1c587c3
YQ
1030 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1031 | ENCODE (operand.imm, 12, 10)
1032 | ENCODE (rn.num, 5, 5)
1033 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1034 }
1035 else
1036 {
1037 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1038 operand_opcode = ENCODE (5, 4, 25);
1039
1040 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1041 rn, operand.reg);
1042 }
1043}
1044
1045/* Write an ADD instruction into *BUF.
1046
1047 ADD rd, rn, #imm
1048 ADD rd, rn, rm
1049
1050 This function handles both an immediate and register add.
1051
1052 RD is the destination register.
1053 RN is the input register.
1054 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1055 OPERAND_REGISTER. */
1056
1057static int
1058emit_add (uint32_t *buf, struct aarch64_register rd,
1059 struct aarch64_register rn, struct aarch64_operand operand)
1060{
1061 return emit_data_processing (buf, ADD, rd, rn, operand);
1062}
1063
1064/* Write a SUB instruction into *BUF.
1065
1066 SUB rd, rn, #imm
1067 SUB rd, rn, rm
1068
1069 This function handles both an immediate and register sub.
1070
1071 RD is the destination register.
1072 RN is the input register.
1073 IMM is the immediate to substract to RN. */
1074
1075static int
1076emit_sub (uint32_t *buf, struct aarch64_register rd,
1077 struct aarch64_register rn, struct aarch64_operand operand)
1078{
1079 return emit_data_processing (buf, SUB, rd, rn, operand);
1080}
1081
1082/* Write a MOV instruction into *BUF.
1083
1084 MOV rd, #imm
1085 MOV rd, rm
1086
1087 This function handles both a wide immediate move and a register move,
1088 with the condition that the source register is not xzr. xzr and the
1089 stack pointer share the same encoding and this function only supports
1090 the stack pointer.
1091
1092 RD is the destination register.
1093 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1094 OPERAND_REGISTER. */
1095
1096static int
1097emit_mov (uint32_t *buf, struct aarch64_register rd,
1098 struct aarch64_operand operand)
1099{
1100 if (operand.type == OPERAND_IMMEDIATE)
1101 {
1102 uint32_t size = ENCODE (rd.is64, 1, 31);
1103 /* Do not shift the immediate. */
1104 uint32_t shift = ENCODE (0, 2, 21);
1105
e1c587c3
YQ
1106 return aarch64_emit_insn (buf, MOV | size | shift
1107 | ENCODE (operand.imm, 16, 5)
1108 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1109 }
1110 else
1111 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1112}
1113
1114/* Write a MOVK instruction into *BUF.
1115
1116 MOVK rd, #imm, lsl #shift
1117
1118 RD is the destination register.
1119 IMM is the immediate.
1120 SHIFT is the logical shift left to apply to IMM. */
1121
1122static int
7781c06f
YQ
1123emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1124 unsigned shift)
bb903df0
PL
1125{
1126 uint32_t size = ENCODE (rd.is64, 1, 31);
1127
e1c587c3
YQ
1128 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1129 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1130}
1131
1132/* Write instructions into *BUF in order to move ADDR into a register.
1133 ADDR can be a 64-bit value.
1134
1135 This function will emit a series of MOV and MOVK instructions, such as:
1136
1137 MOV xd, #(addr)
1138 MOVK xd, #(addr >> 16), lsl #16
1139 MOVK xd, #(addr >> 32), lsl #32
1140 MOVK xd, #(addr >> 48), lsl #48 */
1141
1142static int
1143emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1144{
1145 uint32_t *p = buf;
1146
1147 /* The MOV (wide immediate) instruction clears to top bits of the
1148 register. */
1149 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1150
1151 if ((addr >> 16) != 0)
1152 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1153 else
1154 return p - buf;
1155
1156 if ((addr >> 32) != 0)
1157 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1158 else
1159 return p - buf;
1160
1161 if ((addr >> 48) != 0)
1162 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1163
1164 return p - buf;
1165}
1166
afbe19f8
PL
1167/* Write a SUBS instruction into *BUF.
1168
1169 SUBS rd, rn, rm
1170
1171 This instruction update the condition flags.
1172
1173 RD is the destination register.
1174 RN and RM are the source registers. */
1175
1176static int
1177emit_subs (uint32_t *buf, struct aarch64_register rd,
1178 struct aarch64_register rn, struct aarch64_operand operand)
1179{
1180 return emit_data_processing (buf, SUBS, rd, rn, operand);
1181}
1182
1183/* Write a CMP instruction into *BUF.
1184
1185 CMP rn, rm
1186
1187 This instruction is an alias of SUBS xzr, rn, rm.
1188
1189 RN and RM are the registers to compare. */
1190
1191static int
1192emit_cmp (uint32_t *buf, struct aarch64_register rn,
1193 struct aarch64_operand operand)
1194{
1195 return emit_subs (buf, xzr, rn, operand);
1196}
1197
1198/* Write a AND instruction into *BUF.
1199
1200 AND rd, rn, rm
1201
1202 RD is the destination register.
1203 RN and RM are the source registers. */
1204
1205static int
1206emit_and (uint32_t *buf, struct aarch64_register rd,
1207 struct aarch64_register rn, struct aarch64_register rm)
1208{
1209 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1210}
1211
1212/* Write a ORR instruction into *BUF.
1213
1214 ORR rd, rn, rm
1215
1216 RD is the destination register.
1217 RN and RM are the source registers. */
1218
1219static int
1220emit_orr (uint32_t *buf, struct aarch64_register rd,
1221 struct aarch64_register rn, struct aarch64_register rm)
1222{
1223 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1224}
1225
1226/* Write a ORN instruction into *BUF.
1227
1228 ORN rd, rn, rm
1229
1230 RD is the destination register.
1231 RN and RM are the source registers. */
1232
1233static int
1234emit_orn (uint32_t *buf, struct aarch64_register rd,
1235 struct aarch64_register rn, struct aarch64_register rm)
1236{
1237 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1238}
1239
1240/* Write a EOR instruction into *BUF.
1241
1242 EOR rd, rn, rm
1243
1244 RD is the destination register.
1245 RN and RM are the source registers. */
1246
1247static int
1248emit_eor (uint32_t *buf, struct aarch64_register rd,
1249 struct aarch64_register rn, struct aarch64_register rm)
1250{
1251 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1252}
1253
1254/* Write a MVN instruction into *BUF.
1255
1256 MVN rd, rm
1257
1258 This is an alias for ORN rd, xzr, rm.
1259
1260 RD is the destination register.
1261 RM is the source register. */
1262
1263static int
1264emit_mvn (uint32_t *buf, struct aarch64_register rd,
1265 struct aarch64_register rm)
1266{
1267 return emit_orn (buf, rd, xzr, rm);
1268}
1269
1270/* Write a LSLV instruction into *BUF.
1271
1272 LSLV rd, rn, rm
1273
1274 RD is the destination register.
1275 RN and RM are the source registers. */
1276
1277static int
1278emit_lslv (uint32_t *buf, struct aarch64_register rd,
1279 struct aarch64_register rn, struct aarch64_register rm)
1280{
1281 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1282}
1283
1284/* Write a LSRV instruction into *BUF.
1285
1286 LSRV rd, rn, rm
1287
1288 RD is the destination register.
1289 RN and RM are the source registers. */
1290
1291static int
1292emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1293 struct aarch64_register rn, struct aarch64_register rm)
1294{
1295 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1296}
1297
1298/* Write a ASRV instruction into *BUF.
1299
1300 ASRV rd, rn, rm
1301
1302 RD is the destination register.
1303 RN and RM are the source registers. */
1304
1305static int
1306emit_asrv (uint32_t *buf, struct aarch64_register rd,
1307 struct aarch64_register rn, struct aarch64_register rm)
1308{
1309 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1310}
1311
1312/* Write a MUL instruction into *BUF.
1313
1314 MUL rd, rn, rm
1315
1316 RD is the destination register.
1317 RN and RM are the source registers. */
1318
1319static int
1320emit_mul (uint32_t *buf, struct aarch64_register rd,
1321 struct aarch64_register rn, struct aarch64_register rm)
1322{
1323 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1324}
1325
bb903df0
PL
1326/* Write a MRS instruction into *BUF. The register size is 64-bit.
1327
1328 MRS xt, system_reg
1329
1330 RT is the destination register.
1331 SYSTEM_REG is special purpose register to read. */
1332
1333static int
1334emit_mrs (uint32_t *buf, struct aarch64_register rt,
1335 enum aarch64_system_control_registers system_reg)
1336{
e1c587c3
YQ
1337 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1338 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1339}
1340
1341/* Write a MSR instruction into *BUF. The register size is 64-bit.
1342
1343 MSR system_reg, xt
1344
1345 SYSTEM_REG is special purpose register to write.
1346 RT is the input register. */
1347
1348static int
1349emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1350 struct aarch64_register rt)
1351{
e1c587c3
YQ
1352 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1353 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1354}
1355
1356/* Write a SEVL instruction into *BUF.
1357
1358 This is a hint instruction telling the hardware to trigger an event. */
1359
1360static int
1361emit_sevl (uint32_t *buf)
1362{
e1c587c3 1363 return aarch64_emit_insn (buf, SEVL);
bb903df0
PL
1364}
1365
1366/* Write a WFE instruction into *BUF.
1367
1368 This is a hint instruction telling the hardware to wait for an event. */
1369
1370static int
1371emit_wfe (uint32_t *buf)
1372{
e1c587c3 1373 return aarch64_emit_insn (buf, WFE);
bb903df0
PL
1374}
1375
afbe19f8
PL
1376/* Write a SBFM instruction into *BUF.
1377
1378 SBFM rd, rn, #immr, #imms
1379
1380 This instruction moves the bits from #immr to #imms into the
1381 destination, sign extending the result.
1382
1383 RD is the destination register.
1384 RN is the source register.
1385 IMMR is the bit number to start at (least significant bit).
1386 IMMS is the bit number to stop at (most significant bit). */
1387
1388static int
1389emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1390 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1391{
1392 uint32_t size = ENCODE (rd.is64, 1, 31);
1393 uint32_t n = ENCODE (rd.is64, 1, 22);
1394
e1c587c3
YQ
1395 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1396 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1397 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1398}
1399
1400/* Write a SBFX instruction into *BUF.
1401
1402 SBFX rd, rn, #lsb, #width
1403
1404 This instruction moves #width bits from #lsb into the destination, sign
1405 extending the result. This is an alias for:
1406
1407 SBFM rd, rn, #lsb, #(lsb + width - 1)
1408
1409 RD is the destination register.
1410 RN is the source register.
1411 LSB is the bit number to start at (least significant bit).
1412 WIDTH is the number of bits to move. */
1413
1414static int
1415emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1416 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1417{
1418 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1419}
1420
1421/* Write a UBFM instruction into *BUF.
1422
1423 UBFM rd, rn, #immr, #imms
1424
1425 This instruction moves the bits from #immr to #imms into the
1426 destination, extending the result with zeros.
1427
1428 RD is the destination register.
1429 RN is the source register.
1430 IMMR is the bit number to start at (least significant bit).
1431 IMMS is the bit number to stop at (most significant bit). */
1432
1433static int
1434emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1435 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1436{
1437 uint32_t size = ENCODE (rd.is64, 1, 31);
1438 uint32_t n = ENCODE (rd.is64, 1, 22);
1439
e1c587c3
YQ
1440 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1441 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1442 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1443}
1444
1445/* Write a UBFX instruction into *BUF.
1446
1447 UBFX rd, rn, #lsb, #width
1448
1449 This instruction moves #width bits from #lsb into the destination,
1450 extending the result with zeros. This is an alias for:
1451
1452 UBFM rd, rn, #lsb, #(lsb + width - 1)
1453
1454 RD is the destination register.
1455 RN is the source register.
1456 LSB is the bit number to start at (least significant bit).
1457 WIDTH is the number of bits to move. */
1458
1459static int
1460emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1461 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1462{
1463 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1464}
1465
1466/* Write a CSINC instruction into *BUF.
1467
1468 CSINC rd, rn, rm, cond
1469
1470 This instruction conditionally increments rn or rm and places the result
1471 in rd. rn is chosen is the condition is true.
1472
1473 RD is the destination register.
1474 RN and RM are the source registers.
1475 COND is the encoded condition. */
1476
1477static int
1478emit_csinc (uint32_t *buf, struct aarch64_register rd,
1479 struct aarch64_register rn, struct aarch64_register rm,
1480 unsigned cond)
1481{
1482 uint32_t size = ENCODE (rd.is64, 1, 31);
1483
e1c587c3
YQ
1484 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1485 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1486 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1487}
1488
1489/* Write a CSET instruction into *BUF.
1490
1491 CSET rd, cond
1492
1493 This instruction conditionally write 1 or 0 in the destination register.
1494 1 is written if the condition is true. This is an alias for:
1495
1496 CSINC rd, xzr, xzr, !cond
1497
1498 Note that the condition needs to be inverted.
1499
1500 RD is the destination register.
1501 RN and RM are the source registers.
1502 COND is the encoded condition. */
1503
1504static int
1505emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1506{
1507 /* The least significant bit of the condition needs toggling in order to
1508 invert it. */
1509 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1510}
1511
bb903df0
PL
1512/* Write LEN instructions from BUF into the inferior memory at *TO.
1513
1514 Note instructions are always little endian on AArch64, unlike data. */
1515
1516static void
1517append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1518{
1519 size_t byte_len = len * sizeof (uint32_t);
1520#if (__BYTE_ORDER == __BIG_ENDIAN)
1521 uint32_t *le_buf = xmalloc (byte_len);
1522 size_t i;
1523
1524 for (i = 0; i < len; i++)
1525 le_buf[i] = htole32 (buf[i]);
1526
1527 write_inferior_memory (*to, (const unsigned char *) le_buf, byte_len);
1528
1529 xfree (le_buf);
1530#else
1531 write_inferior_memory (*to, (const unsigned char *) buf, byte_len);
1532#endif
1533
1534 *to += byte_len;
1535}
1536
0badd99f
YQ
1537/* Sub-class of struct aarch64_insn_data, store information of
1538 instruction relocation for fast tracepoint. Visitor can
1539 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1540 the relocated instructions in buffer pointed by INSN_PTR. */
bb903df0 1541
0badd99f
YQ
1542struct aarch64_insn_relocation_data
1543{
1544 struct aarch64_insn_data base;
1545
1546 /* The new address the instruction is relocated to. */
1547 CORE_ADDR new_addr;
1548 /* Pointer to the buffer of relocated instruction(s). */
1549 uint32_t *insn_ptr;
1550};
1551
1552/* Implementation of aarch64_insn_visitor method "b". */
1553
1554static void
1555aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1556 struct aarch64_insn_data *data)
1557{
1558 struct aarch64_insn_relocation_data *insn_reloc
1559 = (struct aarch64_insn_relocation_data *) data;
1560 int32_t new_offset
1561 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1562
1563 if (can_encode_int32 (new_offset, 28))
1564 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1565}
1566
1567/* Implementation of aarch64_insn_visitor method "b_cond". */
1568
1569static void
1570aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1571 struct aarch64_insn_data *data)
1572{
1573 struct aarch64_insn_relocation_data *insn_reloc
1574 = (struct aarch64_insn_relocation_data *) data;
1575 int32_t new_offset
1576 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1577
1578 if (can_encode_int32 (new_offset, 21))
1579 {
1580 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1581 new_offset);
bb903df0 1582 }
0badd99f 1583 else if (can_encode_int32 (new_offset, 28))
bb903df0 1584 {
0badd99f
YQ
1585 /* The offset is out of range for a conditional branch
1586 instruction but not for a unconditional branch. We can use
1587 the following instructions instead:
bb903df0 1588
0badd99f
YQ
1589 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1590 B NOT_TAKEN ; Else jump over TAKEN and continue.
1591 TAKEN:
1592 B #(offset - 8)
1593 NOT_TAKEN:
1594
1595 */
1596
1597 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1598 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1599 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
bb903df0 1600 }
0badd99f 1601}
bb903df0 1602
0badd99f
YQ
1603/* Implementation of aarch64_insn_visitor method "cb". */
1604
1605static void
1606aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1607 const unsigned rn, int is64,
1608 struct aarch64_insn_data *data)
1609{
1610 struct aarch64_insn_relocation_data *insn_reloc
1611 = (struct aarch64_insn_relocation_data *) data;
1612 int32_t new_offset
1613 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1614
1615 if (can_encode_int32 (new_offset, 21))
1616 {
1617 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1618 aarch64_register (rn, is64), new_offset);
bb903df0 1619 }
0badd99f 1620 else if (can_encode_int32 (new_offset, 28))
bb903df0 1621 {
0badd99f
YQ
1622 /* The offset is out of range for a compare and branch
1623 instruction but not for a unconditional branch. We can use
1624 the following instructions instead:
1625
1626 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1627 B NOT_TAKEN ; Else jump over TAKEN and continue.
1628 TAKEN:
1629 B #(offset - 8)
1630 NOT_TAKEN:
1631
1632 */
1633 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1634 aarch64_register (rn, is64), 8);
1635 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1636 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1637 }
1638}
bb903df0 1639
0badd99f 1640/* Implementation of aarch64_insn_visitor method "tb". */
bb903df0 1641
0badd99f
YQ
1642static void
1643aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1644 const unsigned rt, unsigned bit,
1645 struct aarch64_insn_data *data)
1646{
1647 struct aarch64_insn_relocation_data *insn_reloc
1648 = (struct aarch64_insn_relocation_data *) data;
1649 int32_t new_offset
1650 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1651
1652 if (can_encode_int32 (new_offset, 16))
1653 {
1654 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1655 aarch64_register (rt, 1), new_offset);
bb903df0 1656 }
0badd99f 1657 else if (can_encode_int32 (new_offset, 28))
bb903df0 1658 {
0badd99f
YQ
1659 /* The offset is out of range for a test bit and branch
1660 instruction but not for a unconditional branch. We can use
1661 the following instructions instead:
1662
1663 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1664 B NOT_TAKEN ; Else jump over TAKEN and continue.
1665 TAKEN:
1666 B #(offset - 8)
1667 NOT_TAKEN:
1668
1669 */
1670 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1671 aarch64_register (rt, 1), 8);
1672 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1673 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1674 new_offset - 8);
1675 }
1676}
bb903df0 1677
0badd99f 1678/* Implementation of aarch64_insn_visitor method "adr". */
bb903df0 1679
0badd99f
YQ
1680static void
1681aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1682 const int is_adrp,
1683 struct aarch64_insn_data *data)
1684{
1685 struct aarch64_insn_relocation_data *insn_reloc
1686 = (struct aarch64_insn_relocation_data *) data;
1687 /* We know exactly the address the ADR{P,} instruction will compute.
1688 We can just write it to the destination register. */
1689 CORE_ADDR address = data->insn_addr + offset;
bb903df0 1690
0badd99f
YQ
1691 if (is_adrp)
1692 {
1693 /* Clear the lower 12 bits of the offset to get the 4K page. */
1694 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1695 aarch64_register (rd, 1),
1696 address & ~0xfff);
1697 }
1698 else
1699 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1700 aarch64_register (rd, 1), address);
1701}
bb903df0 1702
0badd99f 1703/* Implementation of aarch64_insn_visitor method "ldr_literal". */
bb903df0 1704
0badd99f
YQ
1705static void
1706aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1707 const unsigned rt, const int is64,
1708 struct aarch64_insn_data *data)
1709{
1710 struct aarch64_insn_relocation_data *insn_reloc
1711 = (struct aarch64_insn_relocation_data *) data;
1712 CORE_ADDR address = data->insn_addr + offset;
1713
1714 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1715 aarch64_register (rt, 1), address);
1716
1717 /* We know exactly what address to load from, and what register we
1718 can use:
1719
1720 MOV xd, #(oldloc + offset)
1721 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1722 ...
1723
1724 LDR xd, [xd] ; or LDRSW xd, [xd]
1725
1726 */
1727
1728 if (is_sw)
1729 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1730 aarch64_register (rt, 1),
1731 aarch64_register (rt, 1),
1732 offset_memory_operand (0));
bb903df0 1733 else
0badd99f
YQ
1734 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1735 aarch64_register (rt, is64),
1736 aarch64_register (rt, 1),
1737 offset_memory_operand (0));
1738}
1739
1740/* Implementation of aarch64_insn_visitor method "others". */
1741
1742static void
1743aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1744 struct aarch64_insn_data *data)
1745{
1746 struct aarch64_insn_relocation_data *insn_reloc
1747 = (struct aarch64_insn_relocation_data *) data;
bb903df0 1748
0badd99f
YQ
1749 /* The instruction is not PC relative. Just re-emit it at the new
1750 location. */
e1c587c3 1751 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
0badd99f
YQ
1752}
1753
1754static const struct aarch64_insn_visitor visitor =
1755{
1756 aarch64_ftrace_insn_reloc_b,
1757 aarch64_ftrace_insn_reloc_b_cond,
1758 aarch64_ftrace_insn_reloc_cb,
1759 aarch64_ftrace_insn_reloc_tb,
1760 aarch64_ftrace_insn_reloc_adr,
1761 aarch64_ftrace_insn_reloc_ldr_literal,
1762 aarch64_ftrace_insn_reloc_others,
1763};
1764
bb903df0
PL
1765/* Implementation of linux_target_ops method
1766 "install_fast_tracepoint_jump_pad". */
1767
1768static int
1769aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1770 CORE_ADDR tpaddr,
1771 CORE_ADDR collector,
1772 CORE_ADDR lockaddr,
1773 ULONGEST orig_size,
1774 CORE_ADDR *jump_entry,
1775 CORE_ADDR *trampoline,
1776 ULONGEST *trampoline_size,
1777 unsigned char *jjump_pad_insn,
1778 ULONGEST *jjump_pad_insn_size,
1779 CORE_ADDR *adjusted_insn_addr,
1780 CORE_ADDR *adjusted_insn_addr_end,
1781 char *err)
1782{
1783 uint32_t buf[256];
1784 uint32_t *p = buf;
1785 int32_t offset;
1786 int i;
70b439f0 1787 uint32_t insn;
bb903df0 1788 CORE_ADDR buildaddr = *jump_entry;
0badd99f 1789 struct aarch64_insn_relocation_data insn_data;
bb903df0
PL
1790
1791 /* We need to save the current state on the stack both to restore it
1792 later and to collect register values when the tracepoint is hit.
1793
1794 The saved registers are pushed in a layout that needs to be in sync
1795 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1796 the supply_fast_tracepoint_registers function will fill in the
1797 register cache from a pointer to saved registers on the stack we build
1798 here.
1799
1800 For simplicity, we set the size of each cell on the stack to 16 bytes.
1801 This way one cell can hold any register type, from system registers
1802 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1803 has to be 16 bytes aligned anyway.
1804
1805 Note that the CPSR register does not exist on AArch64. Instead we
1806 can access system bits describing the process state with the
1807 MRS/MSR instructions, namely the condition flags. We save them as
1808 if they are part of a CPSR register because that's how GDB
1809 interprets these system bits. At the moment, only the condition
1810 flags are saved in CPSR (NZCV).
1811
1812 Stack layout, each cell is 16 bytes (descending):
1813
1814 High *-------- SIMD&FP registers from 31 down to 0. --------*
1815 | q31 |
1816 . .
1817 . . 32 cells
1818 . .
1819 | q0 |
1820 *---- General purpose registers from 30 down to 0. ----*
1821 | x30 |
1822 . .
1823 . . 31 cells
1824 . .
1825 | x0 |
1826 *------------- Special purpose registers. -------------*
1827 | SP |
1828 | PC |
1829 | CPSR (NZCV) | 5 cells
1830 | FPSR |
1831 | FPCR | <- SP + 16
1832 *------------- collecting_t object --------------------*
1833 | TPIDR_EL0 | struct tracepoint * |
1834 Low *------------------------------------------------------*
1835
1836 After this stack is set up, we issue a call to the collector, passing
1837 it the saved registers at (SP + 16). */
1838
1839 /* Push SIMD&FP registers on the stack:
1840
1841 SUB sp, sp, #(32 * 16)
1842
1843 STP q30, q31, [sp, #(30 * 16)]
1844 ...
1845 STP q0, q1, [sp]
1846
1847 */
1848 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
1849 for (i = 30; i >= 0; i -= 2)
1850 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
1851
1852 /* Push general puspose registers on the stack. Note that we do not need
1853 to push x31 as it represents the xzr register and not the stack
1854 pointer in a STR instruction.
1855
1856 SUB sp, sp, #(31 * 16)
1857
1858 STR x30, [sp, #(30 * 16)]
1859 ...
1860 STR x0, [sp]
1861
1862 */
1863 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
1864 for (i = 30; i >= 0; i -= 1)
1865 p += emit_str (p, aarch64_register (i, 1), sp,
1866 offset_memory_operand (i * 16));
1867
1868 /* Make space for 5 more cells.
1869
1870 SUB sp, sp, #(5 * 16)
1871
1872 */
1873 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
1874
1875
1876 /* Save SP:
1877
1878 ADD x4, sp, #((32 + 31 + 5) * 16)
1879 STR x4, [sp, #(4 * 16)]
1880
1881 */
1882 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
1883 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
1884
1885 /* Save PC (tracepoint address):
1886
1887 MOV x3, #(tpaddr)
1888 ...
1889
1890 STR x3, [sp, #(3 * 16)]
1891
1892 */
1893
1894 p += emit_mov_addr (p, x3, tpaddr);
1895 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
1896
1897 /* Save CPSR (NZCV), FPSR and FPCR:
1898
1899 MRS x2, nzcv
1900 MRS x1, fpsr
1901 MRS x0, fpcr
1902
1903 STR x2, [sp, #(2 * 16)]
1904 STR x1, [sp, #(1 * 16)]
1905 STR x0, [sp, #(0 * 16)]
1906
1907 */
1908 p += emit_mrs (p, x2, NZCV);
1909 p += emit_mrs (p, x1, FPSR);
1910 p += emit_mrs (p, x0, FPCR);
1911 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
1912 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
1913 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
1914
1915 /* Push the collecting_t object. It consist of the address of the
1916 tracepoint and an ID for the current thread. We get the latter by
1917 reading the tpidr_el0 system register. It corresponds to the
1918 NT_ARM_TLS register accessible with ptrace.
1919
1920 MOV x0, #(tpoint)
1921 ...
1922
1923 MRS x1, tpidr_el0
1924
1925 STP x0, x1, [sp, #-16]!
1926
1927 */
1928
1929 p += emit_mov_addr (p, x0, tpoint);
1930 p += emit_mrs (p, x1, TPIDR_EL0);
1931 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
1932
1933 /* Spin-lock:
1934
1935 The shared memory for the lock is at lockaddr. It will hold zero
1936 if no-one is holding the lock, otherwise it contains the address of
1937 the collecting_t object on the stack of the thread which acquired it.
1938
1939 At this stage, the stack pointer points to this thread's collecting_t
1940 object.
1941
1942 We use the following registers:
1943 - x0: Address of the lock.
1944 - x1: Pointer to collecting_t object.
1945 - x2: Scratch register.
1946
1947 MOV x0, #(lockaddr)
1948 ...
1949 MOV x1, sp
1950
1951 ; Trigger an event local to this core. So the following WFE
1952 ; instruction is ignored.
1953 SEVL
1954 again:
1955 ; Wait for an event. The event is triggered by either the SEVL
1956 ; or STLR instructions (store release).
1957 WFE
1958
1959 ; Atomically read at lockaddr. This marks the memory location as
1960 ; exclusive. This instruction also has memory constraints which
1961 ; make sure all previous data reads and writes are done before
1962 ; executing it.
1963 LDAXR x2, [x0]
1964
1965 ; Try again if another thread holds the lock.
1966 CBNZ x2, again
1967
1968 ; We can lock it! Write the address of the collecting_t object.
1969 ; This instruction will fail if the memory location is not marked
1970 ; as exclusive anymore. If it succeeds, it will remove the
1971 ; exclusive mark on the memory location. This way, if another
1972 ; thread executes this instruction before us, we will fail and try
1973 ; all over again.
1974 STXR w2, x1, [x0]
1975 CBNZ w2, again
1976
1977 */
1978
1979 p += emit_mov_addr (p, x0, lockaddr);
1980 p += emit_mov (p, x1, register_operand (sp));
1981
1982 p += emit_sevl (p);
1983 p += emit_wfe (p);
1984 p += emit_ldaxr (p, x2, x0);
1985 p += emit_cb (p, 1, w2, -2 * 4);
1986 p += emit_stxr (p, w2, x1, x0);
1987 p += emit_cb (p, 1, x2, -4 * 4);
1988
1989 /* Call collector (struct tracepoint *, unsigned char *):
1990
1991 MOV x0, #(tpoint)
1992 ...
1993
1994 ; Saved registers start after the collecting_t object.
1995 ADD x1, sp, #16
1996
1997 ; We use an intra-procedure-call scratch register.
1998 MOV ip0, #(collector)
1999 ...
2000
2001 ; And call back to C!
2002 BLR ip0
2003
2004 */
2005
2006 p += emit_mov_addr (p, x0, tpoint);
2007 p += emit_add (p, x1, sp, immediate_operand (16));
2008
2009 p += emit_mov_addr (p, ip0, collector);
2010 p += emit_blr (p, ip0);
2011
2012 /* Release the lock.
2013
2014 MOV x0, #(lockaddr)
2015 ...
2016
2017 ; This instruction is a normal store with memory ordering
2018 ; constraints. Thanks to this we do not have to put a data
2019 ; barrier instruction to make sure all data read and writes are done
2020 ; before this instruction is executed. Furthermore, this instrucion
2021 ; will trigger an event, letting other threads know they can grab
2022 ; the lock.
2023 STLR xzr, [x0]
2024
2025 */
2026 p += emit_mov_addr (p, x0, lockaddr);
2027 p += emit_stlr (p, xzr, x0);
2028
2029 /* Free collecting_t object:
2030
2031 ADD sp, sp, #16
2032
2033 */
2034 p += emit_add (p, sp, sp, immediate_operand (16));
2035
2036 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2037 registers from the stack.
2038
2039 LDR x2, [sp, #(2 * 16)]
2040 LDR x1, [sp, #(1 * 16)]
2041 LDR x0, [sp, #(0 * 16)]
2042
2043 MSR NZCV, x2
2044 MSR FPSR, x1
2045 MSR FPCR, x0
2046
2047 ADD sp, sp #(5 * 16)
2048
2049 */
2050 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2051 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2052 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2053 p += emit_msr (p, NZCV, x2);
2054 p += emit_msr (p, FPSR, x1);
2055 p += emit_msr (p, FPCR, x0);
2056
2057 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2058
2059 /* Pop general purpose registers:
2060
2061 LDR x0, [sp]
2062 ...
2063 LDR x30, [sp, #(30 * 16)]
2064
2065 ADD sp, sp, #(31 * 16)
2066
2067 */
2068 for (i = 0; i <= 30; i += 1)
2069 p += emit_ldr (p, aarch64_register (i, 1), sp,
2070 offset_memory_operand (i * 16));
2071 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2072
2073 /* Pop SIMD&FP registers:
2074
2075 LDP q0, q1, [sp]
2076 ...
2077 LDP q30, q31, [sp, #(30 * 16)]
2078
2079 ADD sp, sp, #(32 * 16)
2080
2081 */
2082 for (i = 0; i <= 30; i += 2)
2083 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2084 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2085
2086 /* Write the code into the inferior memory. */
2087 append_insns (&buildaddr, p - buf, buf);
2088
2089 /* Now emit the relocated instruction. */
2090 *adjusted_insn_addr = buildaddr;
70b439f0 2091 target_read_uint32 (tpaddr, &insn);
0badd99f
YQ
2092
2093 insn_data.base.insn_addr = tpaddr;
2094 insn_data.new_addr = buildaddr;
2095 insn_data.insn_ptr = buf;
2096
2097 aarch64_relocate_instruction (insn, &visitor,
2098 (struct aarch64_insn_data *) &insn_data);
2099
bb903df0 2100 /* We may not have been able to relocate the instruction. */
0badd99f 2101 if (insn_data.insn_ptr == buf)
bb903df0
PL
2102 {
2103 sprintf (err,
2104 "E.Could not relocate instruction from %s to %s.",
2105 core_addr_to_string_nz (tpaddr),
2106 core_addr_to_string_nz (buildaddr));
2107 return 1;
2108 }
dfaffe9d 2109 else
0badd99f 2110 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
dfaffe9d 2111 *adjusted_insn_addr_end = buildaddr;
bb903df0
PL
2112
2113 /* Go back to the start of the buffer. */
2114 p = buf;
2115
2116 /* Emit a branch back from the jump pad. */
2117 offset = (tpaddr + orig_size - buildaddr);
2118 if (!can_encode_int32 (offset, 28))
2119 {
2120 sprintf (err,
2121 "E.Jump back from jump pad too far from tracepoint "
2122 "(offset 0x%" PRIx32 " cannot be encoded in 28 bits).",
2123 offset);
2124 return 1;
2125 }
2126
2127 p += emit_b (p, 0, offset);
2128 append_insns (&buildaddr, p - buf, buf);
2129
2130 /* Give the caller a branch instruction into the jump pad. */
2131 offset = (*jump_entry - tpaddr);
2132 if (!can_encode_int32 (offset, 28))
2133 {
2134 sprintf (err,
2135 "E.Jump pad too far from tracepoint "
2136 "(offset 0x%" PRIx32 " cannot be encoded in 28 bits).",
2137 offset);
2138 return 1;
2139 }
2140
2141 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2142 *jjump_pad_insn_size = 4;
2143
2144 /* Return the end address of our pad. */
2145 *jump_entry = buildaddr;
2146
2147 return 0;
2148}
2149
afbe19f8
PL
2150/* Helper function writing LEN instructions from START into
2151 current_insn_ptr. */
2152
2153static void
2154emit_ops_insns (const uint32_t *start, int len)
2155{
2156 CORE_ADDR buildaddr = current_insn_ptr;
2157
2158 if (debug_threads)
2159 debug_printf ("Adding %d instrucions at %s\n",
2160 len, paddress (buildaddr));
2161
2162 append_insns (&buildaddr, len, start);
2163 current_insn_ptr = buildaddr;
2164}
2165
2166/* Pop a register from the stack. */
2167
2168static int
2169emit_pop (uint32_t *buf, struct aarch64_register rt)
2170{
2171 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2172}
2173
2174/* Push a register on the stack. */
2175
2176static int
2177emit_push (uint32_t *buf, struct aarch64_register rt)
2178{
2179 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2180}
2181
2182/* Implementation of emit_ops method "emit_prologue". */
2183
2184static void
2185aarch64_emit_prologue (void)
2186{
2187 uint32_t buf[16];
2188 uint32_t *p = buf;
2189
2190 /* This function emit a prologue for the following function prototype:
2191
2192 enum eval_result_type f (unsigned char *regs,
2193 ULONGEST *value);
2194
2195 The first argument is a buffer of raw registers. The second
2196 argument is the result of
2197 evaluating the expression, which will be set to whatever is on top of
2198 the stack at the end.
2199
2200 The stack set up by the prologue is as such:
2201
2202 High *------------------------------------------------------*
2203 | LR |
2204 | FP | <- FP
2205 | x1 (ULONGEST *value) |
2206 | x0 (unsigned char *regs) |
2207 Low *------------------------------------------------------*
2208
2209 As we are implementing a stack machine, each opcode can expand the
2210 stack so we never know how far we are from the data saved by this
2211 prologue. In order to be able refer to value and regs later, we save
2212 the current stack pointer in the frame pointer. This way, it is not
2213 clobbered when calling C functions.
2214
2215 Finally, throughtout every operation, we are using register x0 as the
2216 top of the stack, and x1 as a scratch register. */
2217
2218 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2219 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2220 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2221
2222 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2223
2224
2225 emit_ops_insns (buf, p - buf);
2226}
2227
2228/* Implementation of emit_ops method "emit_epilogue". */
2229
2230static void
2231aarch64_emit_epilogue (void)
2232{
2233 uint32_t buf[16];
2234 uint32_t *p = buf;
2235
2236 /* Store the result of the expression (x0) in *value. */
2237 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2238 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2239 p += emit_str (p, x0, x1, offset_memory_operand (0));
2240
2241 /* Restore the previous state. */
2242 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2243 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2244
2245 /* Return expr_eval_no_error. */
2246 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2247 p += emit_ret (p, lr);
2248
2249 emit_ops_insns (buf, p - buf);
2250}
2251
2252/* Implementation of emit_ops method "emit_add". */
2253
2254static void
2255aarch64_emit_add (void)
2256{
2257 uint32_t buf[16];
2258 uint32_t *p = buf;
2259
2260 p += emit_pop (p, x1);
45e3745e 2261 p += emit_add (p, x0, x1, register_operand (x0));
afbe19f8
PL
2262
2263 emit_ops_insns (buf, p - buf);
2264}
2265
2266/* Implementation of emit_ops method "emit_sub". */
2267
2268static void
2269aarch64_emit_sub (void)
2270{
2271 uint32_t buf[16];
2272 uint32_t *p = buf;
2273
2274 p += emit_pop (p, x1);
45e3745e 2275 p += emit_sub (p, x0, x1, register_operand (x0));
afbe19f8
PL
2276
2277 emit_ops_insns (buf, p - buf);
2278}
2279
2280/* Implementation of emit_ops method "emit_mul". */
2281
2282static void
2283aarch64_emit_mul (void)
2284{
2285 uint32_t buf[16];
2286 uint32_t *p = buf;
2287
2288 p += emit_pop (p, x1);
2289 p += emit_mul (p, x0, x1, x0);
2290
2291 emit_ops_insns (buf, p - buf);
2292}
2293
2294/* Implementation of emit_ops method "emit_lsh". */
2295
2296static void
2297aarch64_emit_lsh (void)
2298{
2299 uint32_t buf[16];
2300 uint32_t *p = buf;
2301
2302 p += emit_pop (p, x1);
2303 p += emit_lslv (p, x0, x1, x0);
2304
2305 emit_ops_insns (buf, p - buf);
2306}
2307
2308/* Implementation of emit_ops method "emit_rsh_signed". */
2309
2310static void
2311aarch64_emit_rsh_signed (void)
2312{
2313 uint32_t buf[16];
2314 uint32_t *p = buf;
2315
2316 p += emit_pop (p, x1);
2317 p += emit_asrv (p, x0, x1, x0);
2318
2319 emit_ops_insns (buf, p - buf);
2320}
2321
2322/* Implementation of emit_ops method "emit_rsh_unsigned". */
2323
2324static void
2325aarch64_emit_rsh_unsigned (void)
2326{
2327 uint32_t buf[16];
2328 uint32_t *p = buf;
2329
2330 p += emit_pop (p, x1);
2331 p += emit_lsrv (p, x0, x1, x0);
2332
2333 emit_ops_insns (buf, p - buf);
2334}
2335
2336/* Implementation of emit_ops method "emit_ext". */
2337
2338static void
2339aarch64_emit_ext (int arg)
2340{
2341 uint32_t buf[16];
2342 uint32_t *p = buf;
2343
2344 p += emit_sbfx (p, x0, x0, 0, arg);
2345
2346 emit_ops_insns (buf, p - buf);
2347}
2348
2349/* Implementation of emit_ops method "emit_log_not". */
2350
2351static void
2352aarch64_emit_log_not (void)
2353{
2354 uint32_t buf[16];
2355 uint32_t *p = buf;
2356
2357 /* If the top of the stack is 0, replace it with 1. Else replace it with
2358 0. */
2359
2360 p += emit_cmp (p, x0, immediate_operand (0));
2361 p += emit_cset (p, x0, EQ);
2362
2363 emit_ops_insns (buf, p - buf);
2364}
2365
2366/* Implementation of emit_ops method "emit_bit_and". */
2367
2368static void
2369aarch64_emit_bit_and (void)
2370{
2371 uint32_t buf[16];
2372 uint32_t *p = buf;
2373
2374 p += emit_pop (p, x1);
2375 p += emit_and (p, x0, x0, x1);
2376
2377 emit_ops_insns (buf, p - buf);
2378}
2379
2380/* Implementation of emit_ops method "emit_bit_or". */
2381
2382static void
2383aarch64_emit_bit_or (void)
2384{
2385 uint32_t buf[16];
2386 uint32_t *p = buf;
2387
2388 p += emit_pop (p, x1);
2389 p += emit_orr (p, x0, x0, x1);
2390
2391 emit_ops_insns (buf, p - buf);
2392}
2393
2394/* Implementation of emit_ops method "emit_bit_xor". */
2395
2396static void
2397aarch64_emit_bit_xor (void)
2398{
2399 uint32_t buf[16];
2400 uint32_t *p = buf;
2401
2402 p += emit_pop (p, x1);
2403 p += emit_eor (p, x0, x0, x1);
2404
2405 emit_ops_insns (buf, p - buf);
2406}
2407
2408/* Implementation of emit_ops method "emit_bit_not". */
2409
2410static void
2411aarch64_emit_bit_not (void)
2412{
2413 uint32_t buf[16];
2414 uint32_t *p = buf;
2415
2416 p += emit_mvn (p, x0, x0);
2417
2418 emit_ops_insns (buf, p - buf);
2419}
2420
2421/* Implementation of emit_ops method "emit_equal". */
2422
2423static void
2424aarch64_emit_equal (void)
2425{
2426 uint32_t buf[16];
2427 uint32_t *p = buf;
2428
2429 p += emit_pop (p, x1);
2430 p += emit_cmp (p, x0, register_operand (x1));
2431 p += emit_cset (p, x0, EQ);
2432
2433 emit_ops_insns (buf, p - buf);
2434}
2435
2436/* Implementation of emit_ops method "emit_less_signed". */
2437
2438static void
2439aarch64_emit_less_signed (void)
2440{
2441 uint32_t buf[16];
2442 uint32_t *p = buf;
2443
2444 p += emit_pop (p, x1);
2445 p += emit_cmp (p, x1, register_operand (x0));
2446 p += emit_cset (p, x0, LT);
2447
2448 emit_ops_insns (buf, p - buf);
2449}
2450
2451/* Implementation of emit_ops method "emit_less_unsigned". */
2452
2453static void
2454aarch64_emit_less_unsigned (void)
2455{
2456 uint32_t buf[16];
2457 uint32_t *p = buf;
2458
2459 p += emit_pop (p, x1);
2460 p += emit_cmp (p, x1, register_operand (x0));
2461 p += emit_cset (p, x0, LO);
2462
2463 emit_ops_insns (buf, p - buf);
2464}
2465
2466/* Implementation of emit_ops method "emit_ref". */
2467
2468static void
2469aarch64_emit_ref (int size)
2470{
2471 uint32_t buf[16];
2472 uint32_t *p = buf;
2473
2474 switch (size)
2475 {
2476 case 1:
2477 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2478 break;
2479 case 2:
2480 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2481 break;
2482 case 4:
2483 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2484 break;
2485 case 8:
2486 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2487 break;
2488 default:
2489 /* Unknown size, bail on compilation. */
2490 emit_error = 1;
2491 break;
2492 }
2493
2494 emit_ops_insns (buf, p - buf);
2495}
2496
2497/* Implementation of emit_ops method "emit_if_goto". */
2498
2499static void
2500aarch64_emit_if_goto (int *offset_p, int *size_p)
2501{
2502 uint32_t buf[16];
2503 uint32_t *p = buf;
2504
2505 /* The Z flag is set or cleared here. */
2506 p += emit_cmp (p, x0, immediate_operand (0));
2507 /* This instruction must not change the Z flag. */
2508 p += emit_pop (p, x0);
2509 /* Branch over the next instruction if x0 == 0. */
2510 p += emit_bcond (p, EQ, 8);
2511
2512 /* The NOP instruction will be patched with an unconditional branch. */
2513 if (offset_p)
2514 *offset_p = (p - buf) * 4;
2515 if (size_p)
2516 *size_p = 4;
2517 p += emit_nop (p);
2518
2519 emit_ops_insns (buf, p - buf);
2520}
2521
2522/* Implementation of emit_ops method "emit_goto". */
2523
2524static void
2525aarch64_emit_goto (int *offset_p, int *size_p)
2526{
2527 uint32_t buf[16];
2528 uint32_t *p = buf;
2529
2530 /* The NOP instruction will be patched with an unconditional branch. */
2531 if (offset_p)
2532 *offset_p = 0;
2533 if (size_p)
2534 *size_p = 4;
2535 p += emit_nop (p);
2536
2537 emit_ops_insns (buf, p - buf);
2538}
2539
2540/* Implementation of emit_ops method "write_goto_address". */
2541
2542void
2543aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2544{
2545 uint32_t insn;
2546
2547 emit_b (&insn, 0, to - from);
2548 append_insns (&from, 1, &insn);
2549}
2550
2551/* Implementation of emit_ops method "emit_const". */
2552
2553static void
2554aarch64_emit_const (LONGEST num)
2555{
2556 uint32_t buf[16];
2557 uint32_t *p = buf;
2558
2559 p += emit_mov_addr (p, x0, num);
2560
2561 emit_ops_insns (buf, p - buf);
2562}
2563
2564/* Implementation of emit_ops method "emit_call". */
2565
2566static void
2567aarch64_emit_call (CORE_ADDR fn)
2568{
2569 uint32_t buf[16];
2570 uint32_t *p = buf;
2571
2572 p += emit_mov_addr (p, ip0, fn);
2573 p += emit_blr (p, ip0);
2574
2575 emit_ops_insns (buf, p - buf);
2576}
2577
2578/* Implementation of emit_ops method "emit_reg". */
2579
2580static void
2581aarch64_emit_reg (int reg)
2582{
2583 uint32_t buf[16];
2584 uint32_t *p = buf;
2585
2586 /* Set x0 to unsigned char *regs. */
2587 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2588 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2589 p += emit_mov (p, x1, immediate_operand (reg));
2590
2591 emit_ops_insns (buf, p - buf);
2592
2593 aarch64_emit_call (get_raw_reg_func_addr ());
2594}
2595
2596/* Implementation of emit_ops method "emit_pop". */
2597
2598static void
2599aarch64_emit_pop (void)
2600{
2601 uint32_t buf[16];
2602 uint32_t *p = buf;
2603
2604 p += emit_pop (p, x0);
2605
2606 emit_ops_insns (buf, p - buf);
2607}
2608
2609/* Implementation of emit_ops method "emit_stack_flush". */
2610
2611static void
2612aarch64_emit_stack_flush (void)
2613{
2614 uint32_t buf[16];
2615 uint32_t *p = buf;
2616
2617 p += emit_push (p, x0);
2618
2619 emit_ops_insns (buf, p - buf);
2620}
2621
2622/* Implementation of emit_ops method "emit_zero_ext". */
2623
2624static void
2625aarch64_emit_zero_ext (int arg)
2626{
2627 uint32_t buf[16];
2628 uint32_t *p = buf;
2629
2630 p += emit_ubfx (p, x0, x0, 0, arg);
2631
2632 emit_ops_insns (buf, p - buf);
2633}
2634
2635/* Implementation of emit_ops method "emit_swap". */
2636
2637static void
2638aarch64_emit_swap (void)
2639{
2640 uint32_t buf[16];
2641 uint32_t *p = buf;
2642
2643 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2644 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2645 p += emit_mov (p, x0, register_operand (x1));
2646
2647 emit_ops_insns (buf, p - buf);
2648}
2649
2650/* Implementation of emit_ops method "emit_stack_adjust". */
2651
2652static void
2653aarch64_emit_stack_adjust (int n)
2654{
2655 /* This is not needed with our design. */
2656 uint32_t buf[16];
2657 uint32_t *p = buf;
2658
2659 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2660
2661 emit_ops_insns (buf, p - buf);
2662}
2663
2664/* Implementation of emit_ops method "emit_int_call_1". */
2665
2666static void
2667aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2668{
2669 uint32_t buf[16];
2670 uint32_t *p = buf;
2671
2672 p += emit_mov (p, x0, immediate_operand (arg1));
2673
2674 emit_ops_insns (buf, p - buf);
2675
2676 aarch64_emit_call (fn);
2677}
2678
2679/* Implementation of emit_ops method "emit_void_call_2". */
2680
2681static void
2682aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2683{
2684 uint32_t buf[16];
2685 uint32_t *p = buf;
2686
2687 /* Push x0 on the stack. */
2688 aarch64_emit_stack_flush ();
2689
2690 /* Setup arguments for the function call:
2691
2692 x0: arg1
2693 x1: top of the stack
2694
2695 MOV x1, x0
2696 MOV x0, #arg1 */
2697
2698 p += emit_mov (p, x1, register_operand (x0));
2699 p += emit_mov (p, x0, immediate_operand (arg1));
2700
2701 emit_ops_insns (buf, p - buf);
2702
2703 aarch64_emit_call (fn);
2704
2705 /* Restore x0. */
2706 aarch64_emit_pop ();
2707}
2708
2709/* Implementation of emit_ops method "emit_eq_goto". */
2710
2711static void
2712aarch64_emit_eq_goto (int *offset_p, int *size_p)
2713{
2714 uint32_t buf[16];
2715 uint32_t *p = buf;
2716
2717 p += emit_pop (p, x1);
2718 p += emit_cmp (p, x1, register_operand (x0));
2719 /* Branch over the next instruction if x0 != x1. */
2720 p += emit_bcond (p, NE, 8);
2721 /* The NOP instruction will be patched with an unconditional branch. */
2722 if (offset_p)
2723 *offset_p = (p - buf) * 4;
2724 if (size_p)
2725 *size_p = 4;
2726 p += emit_nop (p);
2727
2728 emit_ops_insns (buf, p - buf);
2729}
2730
2731/* Implementation of emit_ops method "emit_ne_goto". */
2732
2733static void
2734aarch64_emit_ne_goto (int *offset_p, int *size_p)
2735{
2736 uint32_t buf[16];
2737 uint32_t *p = buf;
2738
2739 p += emit_pop (p, x1);
2740 p += emit_cmp (p, x1, register_operand (x0));
2741 /* Branch over the next instruction if x0 == x1. */
2742 p += emit_bcond (p, EQ, 8);
2743 /* The NOP instruction will be patched with an unconditional branch. */
2744 if (offset_p)
2745 *offset_p = (p - buf) * 4;
2746 if (size_p)
2747 *size_p = 4;
2748 p += emit_nop (p);
2749
2750 emit_ops_insns (buf, p - buf);
2751}
2752
2753/* Implementation of emit_ops method "emit_lt_goto". */
2754
2755static void
2756aarch64_emit_lt_goto (int *offset_p, int *size_p)
2757{
2758 uint32_t buf[16];
2759 uint32_t *p = buf;
2760
2761 p += emit_pop (p, x1);
2762 p += emit_cmp (p, x1, register_operand (x0));
2763 /* Branch over the next instruction if x0 >= x1. */
2764 p += emit_bcond (p, GE, 8);
2765 /* The NOP instruction will be patched with an unconditional branch. */
2766 if (offset_p)
2767 *offset_p = (p - buf) * 4;
2768 if (size_p)
2769 *size_p = 4;
2770 p += emit_nop (p);
2771
2772 emit_ops_insns (buf, p - buf);
2773}
2774
2775/* Implementation of emit_ops method "emit_le_goto". */
2776
2777static void
2778aarch64_emit_le_goto (int *offset_p, int *size_p)
2779{
2780 uint32_t buf[16];
2781 uint32_t *p = buf;
2782
2783 p += emit_pop (p, x1);
2784 p += emit_cmp (p, x1, register_operand (x0));
2785 /* Branch over the next instruction if x0 > x1. */
2786 p += emit_bcond (p, GT, 8);
2787 /* The NOP instruction will be patched with an unconditional branch. */
2788 if (offset_p)
2789 *offset_p = (p - buf) * 4;
2790 if (size_p)
2791 *size_p = 4;
2792 p += emit_nop (p);
2793
2794 emit_ops_insns (buf, p - buf);
2795}
2796
2797/* Implementation of emit_ops method "emit_gt_goto". */
2798
2799static void
2800aarch64_emit_gt_goto (int *offset_p, int *size_p)
2801{
2802 uint32_t buf[16];
2803 uint32_t *p = buf;
2804
2805 p += emit_pop (p, x1);
2806 p += emit_cmp (p, x1, register_operand (x0));
2807 /* Branch over the next instruction if x0 <= x1. */
2808 p += emit_bcond (p, LE, 8);
2809 /* The NOP instruction will be patched with an unconditional branch. */
2810 if (offset_p)
2811 *offset_p = (p - buf) * 4;
2812 if (size_p)
2813 *size_p = 4;
2814 p += emit_nop (p);
2815
2816 emit_ops_insns (buf, p - buf);
2817}
2818
2819/* Implementation of emit_ops method "emit_ge_got". */
2820
2821static void
2822aarch64_emit_ge_got (int *offset_p, int *size_p)
2823{
2824 uint32_t buf[16];
2825 uint32_t *p = buf;
2826
2827 p += emit_pop (p, x1);
2828 p += emit_cmp (p, x1, register_operand (x0));
2829 /* Branch over the next instruction if x0 <= x1. */
2830 p += emit_bcond (p, LT, 8);
2831 /* The NOP instruction will be patched with an unconditional branch. */
2832 if (offset_p)
2833 *offset_p = (p - buf) * 4;
2834 if (size_p)
2835 *size_p = 4;
2836 p += emit_nop (p);
2837
2838 emit_ops_insns (buf, p - buf);
2839}
2840
2841static struct emit_ops aarch64_emit_ops_impl =
2842{
2843 aarch64_emit_prologue,
2844 aarch64_emit_epilogue,
2845 aarch64_emit_add,
2846 aarch64_emit_sub,
2847 aarch64_emit_mul,
2848 aarch64_emit_lsh,
2849 aarch64_emit_rsh_signed,
2850 aarch64_emit_rsh_unsigned,
2851 aarch64_emit_ext,
2852 aarch64_emit_log_not,
2853 aarch64_emit_bit_and,
2854 aarch64_emit_bit_or,
2855 aarch64_emit_bit_xor,
2856 aarch64_emit_bit_not,
2857 aarch64_emit_equal,
2858 aarch64_emit_less_signed,
2859 aarch64_emit_less_unsigned,
2860 aarch64_emit_ref,
2861 aarch64_emit_if_goto,
2862 aarch64_emit_goto,
2863 aarch64_write_goto_address,
2864 aarch64_emit_const,
2865 aarch64_emit_call,
2866 aarch64_emit_reg,
2867 aarch64_emit_pop,
2868 aarch64_emit_stack_flush,
2869 aarch64_emit_zero_ext,
2870 aarch64_emit_swap,
2871 aarch64_emit_stack_adjust,
2872 aarch64_emit_int_call_1,
2873 aarch64_emit_void_call_2,
2874 aarch64_emit_eq_goto,
2875 aarch64_emit_ne_goto,
2876 aarch64_emit_lt_goto,
2877 aarch64_emit_le_goto,
2878 aarch64_emit_gt_goto,
2879 aarch64_emit_ge_got,
2880};
2881
2882/* Implementation of linux_target_ops method "emit_ops". */
2883
2884static struct emit_ops *
2885aarch64_emit_ops (void)
2886{
2887 return &aarch64_emit_ops_impl;
2888}
2889
bb903df0
PL
2890/* Implementation of linux_target_ops method
2891 "get_min_fast_tracepoint_insn_len". */
2892
2893static int
2894aarch64_get_min_fast_tracepoint_insn_len (void)
2895{
2896 return 4;
2897}
2898
d1d0aea1
PL
2899/* Implementation of linux_target_ops method "supports_range_stepping". */
2900
2901static int
2902aarch64_supports_range_stepping (void)
2903{
2904 return 1;
2905}
2906
dd373349
AT
2907/* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2908
2909static const gdb_byte *
2910aarch64_sw_breakpoint_from_kind (int kind, int *size)
2911{
17b1509a
YQ
2912 if (is_64bit_tdesc ())
2913 {
2914 *size = aarch64_breakpoint_len;
2915 return aarch64_breakpoint;
2916 }
2917 else
2918 return arm_sw_breakpoint_from_kind (kind, size);
2919}
2920
2921/* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
2922
2923static int
2924aarch64_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
2925{
2926 if (is_64bit_tdesc ())
2927 return aarch64_breakpoint_len;
2928 else
2929 return arm_breakpoint_kind_from_pc (pcptr);
2930}
2931
2932/* Implementation of the linux_target_ops method
2933 "breakpoint_kind_from_current_state". */
2934
2935static int
2936aarch64_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
2937{
2938 if (is_64bit_tdesc ())
2939 return aarch64_breakpoint_len;
2940 else
2941 return arm_breakpoint_kind_from_current_state (pcptr);
dd373349
AT
2942}
2943
7d00775e
AT
2944/* Support for hardware single step. */
2945
2946static int
2947aarch64_supports_hardware_single_step (void)
2948{
2949 return 1;
2950}
2951
176eb98c
MS
2952struct linux_target_ops the_low_target =
2953{
2954 aarch64_arch_setup,
3aee8918 2955 aarch64_regs_info,
176eb98c
MS
2956 aarch64_cannot_fetch_register,
2957 aarch64_cannot_store_register,
421530db 2958 NULL, /* fetch_register */
176eb98c
MS
2959 aarch64_get_pc,
2960 aarch64_set_pc,
17b1509a 2961 aarch64_breakpoint_kind_from_pc,
dd373349 2962 aarch64_sw_breakpoint_from_kind,
fa5308bd 2963 NULL, /* get_next_pcs */
421530db 2964 0, /* decr_pc_after_break */
176eb98c 2965 aarch64_breakpoint_at,
802e8e6d 2966 aarch64_supports_z_point_type,
176eb98c
MS
2967 aarch64_insert_point,
2968 aarch64_remove_point,
2969 aarch64_stopped_by_watchpoint,
2970 aarch64_stopped_data_address,
421530db
PL
2971 NULL, /* collect_ptrace_register */
2972 NULL, /* supply_ptrace_register */
ade90bde 2973 aarch64_linux_siginfo_fixup,
176eb98c
MS
2974 aarch64_linux_new_process,
2975 aarch64_linux_new_thread,
3a8a0396 2976 aarch64_linux_new_fork,
176eb98c 2977 aarch64_linux_prepare_to_resume,
421530db 2978 NULL, /* process_qsupported */
7671bf47 2979 aarch64_supports_tracepoints,
bb903df0
PL
2980 aarch64_get_thread_area,
2981 aarch64_install_fast_tracepoint_jump_pad,
afbe19f8 2982 aarch64_emit_ops,
bb903df0 2983 aarch64_get_min_fast_tracepoint_insn_len,
d1d0aea1 2984 aarch64_supports_range_stepping,
17b1509a 2985 aarch64_breakpoint_kind_from_current_state,
7d00775e 2986 aarch64_supports_hardware_single_step,
176eb98c 2987};
3aee8918
PA
2988
2989void
2990initialize_low_arch (void)
2991{
2992 init_registers_aarch64 ();
2993
3b53ae99
YQ
2994 initialize_low_arch_aarch32 ();
2995
3aee8918
PA
2996 initialize_regsets_info (&aarch64_regsets_info);
2997}
This page took 0.492528 seconds and 4 git commands to generate.