Fix typo in previous commit
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-aarch64-low.c
CommitLineData
176eb98c
MS
1/* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
618f726f 4 Copyright (C) 2009-2016 Free Software Foundation, Inc.
176eb98c
MS
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "server.h"
23#include "linux-low.h"
db3cb7cb 24#include "nat/aarch64-linux.h"
554717a3 25#include "nat/aarch64-linux-hw-point.h"
bb903df0 26#include "arch/aarch64-insn.h"
3b53ae99 27#include "linux-aarch32-low.h"
176eb98c 28#include "elf/common.h"
afbe19f8
PL
29#include "ax.h"
30#include "tracepoint.h"
176eb98c
MS
31
32#include <signal.h>
33#include <sys/user.h>
5826e159 34#include "nat/gdb_ptrace.h"
e9dae05e 35#include <asm/ptrace.h>
bb903df0
PL
36#include <inttypes.h>
37#include <endian.h>
38#include <sys/uio.h>
176eb98c
MS
39
40#include "gdb_proc_service.h"
41
42/* Defined in auto-generated files. */
43void init_registers_aarch64 (void);
3aee8918 44extern const struct target_desc *tdesc_aarch64;
176eb98c 45
176eb98c
MS
46#ifdef HAVE_SYS_REG_H
47#include <sys/reg.h>
48#endif
49
50#define AARCH64_X_REGS_NUM 31
51#define AARCH64_V_REGS_NUM 32
52#define AARCH64_X0_REGNO 0
53#define AARCH64_SP_REGNO 31
54#define AARCH64_PC_REGNO 32
55#define AARCH64_CPSR_REGNO 33
56#define AARCH64_V0_REGNO 34
bf330350
CU
57#define AARCH64_FPSR_REGNO (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM)
58#define AARCH64_FPCR_REGNO (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM + 1)
176eb98c 59
bf330350 60#define AARCH64_NUM_REGS (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM + 2)
176eb98c 61
176eb98c
MS
62/* Per-process arch-specific data we want to keep. */
63
64struct arch_process_info
65{
66 /* Hardware breakpoint/watchpoint data.
67 The reason for them to be per-process rather than per-thread is
68 due to the lack of information in the gdbserver environment;
69 gdbserver is not told that whether a requested hardware
70 breakpoint/watchpoint is thread specific or not, so it has to set
71 each hw bp/wp for every thread in the current process. The
72 higher level bp/wp management in gdb will resume a thread if a hw
73 bp/wp trap is not expected for it. Since the hw bp/wp setting is
74 same for each thread, it is reasonable for the data to live here.
75 */
76 struct aarch64_debug_reg_state debug_reg_state;
77};
78
3b53ae99
YQ
79/* Return true if the size of register 0 is 8 byte. */
80
81static int
82is_64bit_tdesc (void)
83{
84 struct regcache *regcache = get_thread_regcache (current_thread, 0);
85
86 return register_size (regcache->tdesc, 0) == 8;
87}
88
421530db
PL
89/* Implementation of linux_target_ops method "cannot_store_register". */
90
176eb98c
MS
91static int
92aarch64_cannot_store_register (int regno)
93{
94 return regno >= AARCH64_NUM_REGS;
95}
96
421530db
PL
97/* Implementation of linux_target_ops method "cannot_fetch_register". */
98
176eb98c
MS
99static int
100aarch64_cannot_fetch_register (int regno)
101{
102 return regno >= AARCH64_NUM_REGS;
103}
104
105static void
106aarch64_fill_gregset (struct regcache *regcache, void *buf)
107{
6a69a054 108 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
176eb98c
MS
109 int i;
110
111 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
112 collect_register (regcache, AARCH64_X0_REGNO + i, &regset->regs[i]);
113 collect_register (regcache, AARCH64_SP_REGNO, &regset->sp);
114 collect_register (regcache, AARCH64_PC_REGNO, &regset->pc);
115 collect_register (regcache, AARCH64_CPSR_REGNO, &regset->pstate);
116}
117
118static void
119aarch64_store_gregset (struct regcache *regcache, const void *buf)
120{
6a69a054 121 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
176eb98c
MS
122 int i;
123
124 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
125 supply_register (regcache, AARCH64_X0_REGNO + i, &regset->regs[i]);
126 supply_register (regcache, AARCH64_SP_REGNO, &regset->sp);
127 supply_register (regcache, AARCH64_PC_REGNO, &regset->pc);
128 supply_register (regcache, AARCH64_CPSR_REGNO, &regset->pstate);
129}
130
131static void
132aarch64_fill_fpregset (struct regcache *regcache, void *buf)
133{
9caa3311 134 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
176eb98c
MS
135 int i;
136
137 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
138 collect_register (regcache, AARCH64_V0_REGNO + i, &regset->vregs[i]);
bf330350
CU
139 collect_register (regcache, AARCH64_FPSR_REGNO, &regset->fpsr);
140 collect_register (regcache, AARCH64_FPCR_REGNO, &regset->fpcr);
176eb98c
MS
141}
142
143static void
144aarch64_store_fpregset (struct regcache *regcache, const void *buf)
145{
9caa3311
YQ
146 const struct user_fpsimd_state *regset
147 = (const struct user_fpsimd_state *) buf;
176eb98c
MS
148 int i;
149
150 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
151 supply_register (regcache, AARCH64_V0_REGNO + i, &regset->vregs[i]);
bf330350
CU
152 supply_register (regcache, AARCH64_FPSR_REGNO, &regset->fpsr);
153 supply_register (regcache, AARCH64_FPCR_REGNO, &regset->fpcr);
176eb98c
MS
154}
155
176eb98c
MS
156/* Enable miscellaneous debugging output. The name is historical - it
157 was originally used to debug LinuxThreads support. */
158extern int debug_threads;
159
421530db
PL
160/* Implementation of linux_target_ops method "get_pc". */
161
176eb98c
MS
162static CORE_ADDR
163aarch64_get_pc (struct regcache *regcache)
164{
8a7e4587 165 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 166 return linux_get_pc_64bit (regcache);
8a7e4587 167 else
a5652c21 168 return linux_get_pc_32bit (regcache);
176eb98c
MS
169}
170
421530db
PL
171/* Implementation of linux_target_ops method "set_pc". */
172
176eb98c
MS
173static void
174aarch64_set_pc (struct regcache *regcache, CORE_ADDR pc)
175{
8a7e4587 176 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 177 linux_set_pc_64bit (regcache, pc);
8a7e4587 178 else
a5652c21 179 linux_set_pc_32bit (regcache, pc);
176eb98c
MS
180}
181
176eb98c
MS
182#define aarch64_breakpoint_len 4
183
37d66942
PL
184/* AArch64 BRK software debug mode instruction.
185 This instruction needs to match gdb/aarch64-tdep.c
186 (aarch64_default_breakpoint). */
187static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
176eb98c 188
421530db
PL
189/* Implementation of linux_target_ops method "breakpoint_at". */
190
176eb98c
MS
191static int
192aarch64_breakpoint_at (CORE_ADDR where)
193{
db91f502
YQ
194 if (is_64bit_tdesc ())
195 {
196 gdb_byte insn[aarch64_breakpoint_len];
176eb98c 197
db91f502
YQ
198 (*the_target->read_memory) (where, (unsigned char *) &insn,
199 aarch64_breakpoint_len);
200 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
201 return 1;
176eb98c 202
db91f502
YQ
203 return 0;
204 }
205 else
206 return arm_breakpoint_at (where);
176eb98c
MS
207}
208
176eb98c
MS
209static void
210aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
211{
212 int i;
213
214 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
215 {
216 state->dr_addr_bp[i] = 0;
217 state->dr_ctrl_bp[i] = 0;
218 state->dr_ref_count_bp[i] = 0;
219 }
220
221 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
222 {
223 state->dr_addr_wp[i] = 0;
224 state->dr_ctrl_wp[i] = 0;
225 state->dr_ref_count_wp[i] = 0;
226 }
227}
228
176eb98c
MS
229/* Return the pointer to the debug register state structure in the
230 current process' arch-specific data area. */
231
db3cb7cb 232struct aarch64_debug_reg_state *
88e2cf7e 233aarch64_get_debug_reg_state (pid_t pid)
176eb98c 234{
88e2cf7e 235 struct process_info *proc = find_process_pid (pid);
176eb98c 236
fe978cb0 237 return &proc->priv->arch_private->debug_reg_state;
176eb98c
MS
238}
239
421530db
PL
240/* Implementation of linux_target_ops method "supports_z_point_type". */
241
4ff0d3d8
PA
242static int
243aarch64_supports_z_point_type (char z_type)
244{
245 switch (z_type)
246 {
96c97461 247 case Z_PACKET_SW_BP:
4ff0d3d8
PA
248 case Z_PACKET_HW_BP:
249 case Z_PACKET_WRITE_WP:
250 case Z_PACKET_READ_WP:
251 case Z_PACKET_ACCESS_WP:
252 return 1;
253 default:
4ff0d3d8
PA
254 return 0;
255 }
256}
257
421530db 258/* Implementation of linux_target_ops method "insert_point".
176eb98c 259
421530db
PL
260 It actually only records the info of the to-be-inserted bp/wp;
261 the actual insertion will happen when threads are resumed. */
176eb98c
MS
262
263static int
802e8e6d
PA
264aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
265 int len, struct raw_breakpoint *bp)
176eb98c
MS
266{
267 int ret;
4ff0d3d8 268 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
269 struct aarch64_debug_reg_state *state
270 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 271
c5e92cca 272 if (show_debug_regs)
176eb98c
MS
273 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
274 (unsigned long) addr, len);
275
802e8e6d
PA
276 /* Determine the type from the raw breakpoint type. */
277 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
278
279 if (targ_type != hw_execute)
39edd165
YQ
280 {
281 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
282 ret = aarch64_handle_watchpoint (targ_type, addr, len,
283 1 /* is_insert */, state);
284 else
285 ret = -1;
286 }
176eb98c 287 else
8d689ee5
YQ
288 {
289 if (len == 3)
290 {
291 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
292 instruction. Set it to 2 to correctly encode length bit
293 mask in hardware/watchpoint control register. */
294 len = 2;
295 }
296 ret = aarch64_handle_breakpoint (targ_type, addr, len,
297 1 /* is_insert */, state);
298 }
176eb98c 299
60a191ed 300 if (show_debug_regs)
88e2cf7e
YQ
301 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
302 targ_type);
176eb98c
MS
303
304 return ret;
305}
306
421530db 307/* Implementation of linux_target_ops method "remove_point".
176eb98c 308
421530db
PL
309 It actually only records the info of the to-be-removed bp/wp,
310 the actual removal will be done when threads are resumed. */
176eb98c
MS
311
312static int
802e8e6d
PA
313aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
314 int len, struct raw_breakpoint *bp)
176eb98c
MS
315{
316 int ret;
4ff0d3d8 317 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
318 struct aarch64_debug_reg_state *state
319 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 320
c5e92cca 321 if (show_debug_regs)
176eb98c
MS
322 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
323 (unsigned long) addr, len);
324
802e8e6d
PA
325 /* Determine the type from the raw breakpoint type. */
326 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
327
328 /* Set up state pointers. */
329 if (targ_type != hw_execute)
330 ret =
c67ca4de
YQ
331 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
332 state);
176eb98c 333 else
8d689ee5
YQ
334 {
335 if (len == 3)
336 {
337 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
338 instruction. Set it to 2 to correctly encode length bit
339 mask in hardware/watchpoint control register. */
340 len = 2;
341 }
342 ret = aarch64_handle_breakpoint (targ_type, addr, len,
343 0 /* is_insert */, state);
344 }
176eb98c 345
60a191ed 346 if (show_debug_regs)
88e2cf7e
YQ
347 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
348 targ_type);
176eb98c
MS
349
350 return ret;
351}
352
421530db 353/* Implementation of linux_target_ops method "stopped_data_address". */
176eb98c
MS
354
355static CORE_ADDR
356aarch64_stopped_data_address (void)
357{
358 siginfo_t siginfo;
359 int pid, i;
360 struct aarch64_debug_reg_state *state;
361
0bfdf32f 362 pid = lwpid_of (current_thread);
176eb98c
MS
363
364 /* Get the siginfo. */
365 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
366 return (CORE_ADDR) 0;
367
368 /* Need to be a hardware breakpoint/watchpoint trap. */
369 if (siginfo.si_signo != SIGTRAP
370 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
371 return (CORE_ADDR) 0;
372
373 /* Check if the address matches any watched address. */
88e2cf7e 374 state = aarch64_get_debug_reg_state (pid_of (current_thread));
176eb98c
MS
375 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
376 {
377 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
378 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
379 const CORE_ADDR addr_watch = state->dr_addr_wp[i];
380 if (state->dr_ref_count_wp[i]
381 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
382 && addr_trap >= addr_watch
383 && addr_trap < addr_watch + len)
384 return addr_trap;
385 }
386
387 return (CORE_ADDR) 0;
388}
389
421530db 390/* Implementation of linux_target_ops method "stopped_by_watchpoint". */
176eb98c
MS
391
392static int
393aarch64_stopped_by_watchpoint (void)
394{
395 if (aarch64_stopped_data_address () != 0)
396 return 1;
397 else
398 return 0;
399}
400
401/* Fetch the thread-local storage pointer for libthread_db. */
402
403ps_err_e
55fac6e0 404ps_get_thread_area (const struct ps_prochandle *ph,
176eb98c
MS
405 lwpid_t lwpid, int idx, void **base)
406{
a0cc84cd
YQ
407 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
408 is_64bit_tdesc ());
176eb98c
MS
409}
410
ade90bde
YQ
411/* Implementation of linux_target_ops method "siginfo_fixup". */
412
413static int
8adce034 414aarch64_linux_siginfo_fixup (siginfo_t *native, gdb_byte *inf, int direction)
ade90bde
YQ
415{
416 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
417 if (!is_64bit_tdesc ())
418 {
419 if (direction == 0)
420 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
421 native);
422 else
423 aarch64_siginfo_from_compat_siginfo (native,
424 (struct compat_siginfo *) inf);
425
426 return 1;
427 }
428
429 return 0;
430}
431
421530db 432/* Implementation of linux_target_ops method "linux_new_process". */
176eb98c
MS
433
434static struct arch_process_info *
435aarch64_linux_new_process (void)
436{
8d749320 437 struct arch_process_info *info = XCNEW (struct arch_process_info);
176eb98c
MS
438
439 aarch64_init_debug_reg_state (&info->debug_reg_state);
440
441 return info;
442}
443
421530db
PL
444/* Implementation of linux_target_ops method "linux_new_fork". */
445
3a8a0396
DB
446static void
447aarch64_linux_new_fork (struct process_info *parent,
448 struct process_info *child)
449{
450 /* These are allocated by linux_add_process. */
61a7418c
DB
451 gdb_assert (parent->priv != NULL
452 && parent->priv->arch_private != NULL);
453 gdb_assert (child->priv != NULL
454 && child->priv->arch_private != NULL);
3a8a0396
DB
455
456 /* Linux kernel before 2.6.33 commit
457 72f674d203cd230426437cdcf7dd6f681dad8b0d
458 will inherit hardware debug registers from parent
459 on fork/vfork/clone. Newer Linux kernels create such tasks with
460 zeroed debug registers.
461
462 GDB core assumes the child inherits the watchpoints/hw
463 breakpoints of the parent, and will remove them all from the
464 forked off process. Copy the debug registers mirrors into the
465 new process so that all breakpoints and watchpoints can be
466 removed together. The debug registers mirror will become zeroed
467 in the end before detaching the forked off process, thus making
468 this compatible with older Linux kernels too. */
469
61a7418c 470 *child->priv->arch_private = *parent->priv->arch_private;
3a8a0396
DB
471}
472
3b53ae99
YQ
473/* Return the right target description according to the ELF file of
474 current thread. */
475
476static const struct target_desc *
477aarch64_linux_read_description (void)
478{
479 unsigned int machine;
480 int is_elf64;
481 int tid;
482
483 tid = lwpid_of (current_thread);
484
485 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
486
487 if (is_elf64)
488 return tdesc_aarch64;
489 else
490 return tdesc_arm_with_neon;
491}
492
421530db
PL
493/* Implementation of linux_target_ops method "arch_setup". */
494
176eb98c
MS
495static void
496aarch64_arch_setup (void)
497{
3b53ae99 498 current_process ()->tdesc = aarch64_linux_read_description ();
176eb98c 499
af1b22f3 500 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
176eb98c
MS
501}
502
3aee8918 503static struct regset_info aarch64_regsets[] =
176eb98c
MS
504{
505 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
506 sizeof (struct user_pt_regs), GENERAL_REGS,
507 aarch64_fill_gregset, aarch64_store_gregset },
508 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
509 sizeof (struct user_fpsimd_state), FP_REGS,
510 aarch64_fill_fpregset, aarch64_store_fpregset
511 },
50bc912a 512 NULL_REGSET
176eb98c
MS
513};
514
3aee8918
PA
515static struct regsets_info aarch64_regsets_info =
516 {
517 aarch64_regsets, /* regsets */
518 0, /* num_regsets */
519 NULL, /* disabled_regsets */
520 };
521
3b53ae99 522static struct regs_info regs_info_aarch64 =
3aee8918
PA
523 {
524 NULL, /* regset_bitmap */
c2d65f38 525 NULL, /* usrregs */
3aee8918
PA
526 &aarch64_regsets_info,
527 };
528
421530db
PL
529/* Implementation of linux_target_ops method "regs_info". */
530
3aee8918
PA
531static const struct regs_info *
532aarch64_regs_info (void)
533{
3b53ae99
YQ
534 if (is_64bit_tdesc ())
535 return &regs_info_aarch64;
536 else
537 return &regs_info_aarch32;
3aee8918
PA
538}
539
7671bf47
PL
540/* Implementation of linux_target_ops method "supports_tracepoints". */
541
542static int
543aarch64_supports_tracepoints (void)
544{
524b57e6
YQ
545 if (current_thread == NULL)
546 return 1;
547 else
548 {
549 /* We don't support tracepoints on aarch32 now. */
550 return is_64bit_tdesc ();
551 }
7671bf47
PL
552}
553
bb903df0
PL
554/* Implementation of linux_target_ops method "get_thread_area". */
555
556static int
557aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
558{
559 struct iovec iovec;
560 uint64_t reg;
561
562 iovec.iov_base = &reg;
563 iovec.iov_len = sizeof (reg);
564
565 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
566 return -1;
567
568 *addrp = reg;
569
570 return 0;
571}
572
061fc021
YQ
573/* Implementation of linux_target_ops method "get_syscall_trapinfo". */
574
575static void
576aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
577{
578 int use_64bit = register_size (regcache->tdesc, 0) == 8;
579
580 if (use_64bit)
581 {
582 long l_sysno;
583
584 collect_register_by_name (regcache, "x8", &l_sysno);
585 *sysno = (int) l_sysno;
586 }
587 else
588 collect_register_by_name (regcache, "r7", sysno);
589}
590
afbe19f8
PL
591/* List of condition codes that we need. */
592
593enum aarch64_condition_codes
594{
595 EQ = 0x0,
596 NE = 0x1,
597 LO = 0x3,
598 GE = 0xa,
599 LT = 0xb,
600 GT = 0xc,
601 LE = 0xd,
bb903df0
PL
602};
603
6c1c9a8b
YQ
604enum aarch64_operand_type
605{
606 OPERAND_IMMEDIATE,
607 OPERAND_REGISTER,
608};
609
bb903df0
PL
610/* Representation of an operand. At this time, it only supports register
611 and immediate types. */
612
613struct aarch64_operand
614{
615 /* Type of the operand. */
6c1c9a8b
YQ
616 enum aarch64_operand_type type;
617
bb903df0
PL
618 /* Value of the operand according to the type. */
619 union
620 {
621 uint32_t imm;
622 struct aarch64_register reg;
623 };
624};
625
626/* List of registers that we are currently using, we can add more here as
627 we need to use them. */
628
629/* General purpose scratch registers (64 bit). */
630static const struct aarch64_register x0 = { 0, 1 };
631static const struct aarch64_register x1 = { 1, 1 };
632static const struct aarch64_register x2 = { 2, 1 };
633static const struct aarch64_register x3 = { 3, 1 };
634static const struct aarch64_register x4 = { 4, 1 };
635
636/* General purpose scratch registers (32 bit). */
afbe19f8 637static const struct aarch64_register w0 = { 0, 0 };
bb903df0
PL
638static const struct aarch64_register w2 = { 2, 0 };
639
640/* Intra-procedure scratch registers. */
641static const struct aarch64_register ip0 = { 16, 1 };
642
643/* Special purpose registers. */
afbe19f8
PL
644static const struct aarch64_register fp = { 29, 1 };
645static const struct aarch64_register lr = { 30, 1 };
bb903df0
PL
646static const struct aarch64_register sp = { 31, 1 };
647static const struct aarch64_register xzr = { 31, 1 };
648
649/* Dynamically allocate a new register. If we know the register
650 statically, we should make it a global as above instead of using this
651 helper function. */
652
653static struct aarch64_register
654aarch64_register (unsigned num, int is64)
655{
656 return (struct aarch64_register) { num, is64 };
657}
658
659/* Helper function to create a register operand, for instructions with
660 different types of operands.
661
662 For example:
663 p += emit_mov (p, x0, register_operand (x1)); */
664
665static struct aarch64_operand
666register_operand (struct aarch64_register reg)
667{
668 struct aarch64_operand operand;
669
670 operand.type = OPERAND_REGISTER;
671 operand.reg = reg;
672
673 return operand;
674}
675
676/* Helper function to create an immediate operand, for instructions with
677 different types of operands.
678
679 For example:
680 p += emit_mov (p, x0, immediate_operand (12)); */
681
682static struct aarch64_operand
683immediate_operand (uint32_t imm)
684{
685 struct aarch64_operand operand;
686
687 operand.type = OPERAND_IMMEDIATE;
688 operand.imm = imm;
689
690 return operand;
691}
692
bb903df0
PL
693/* Helper function to create an offset memory operand.
694
695 For example:
696 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
697
698static struct aarch64_memory_operand
699offset_memory_operand (int32_t offset)
700{
701 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
702}
703
704/* Helper function to create a pre-index memory operand.
705
706 For example:
707 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
708
709static struct aarch64_memory_operand
710preindex_memory_operand (int32_t index)
711{
712 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
713}
714
afbe19f8
PL
715/* Helper function to create a post-index memory operand.
716
717 For example:
718 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
719
720static struct aarch64_memory_operand
721postindex_memory_operand (int32_t index)
722{
723 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
724}
725
bb903df0
PL
726/* System control registers. These special registers can be written and
727 read with the MRS and MSR instructions.
728
729 - NZCV: Condition flags. GDB refers to this register under the CPSR
730 name.
731 - FPSR: Floating-point status register.
732 - FPCR: Floating-point control registers.
733 - TPIDR_EL0: Software thread ID register. */
734
735enum aarch64_system_control_registers
736{
737 /* op0 op1 crn crm op2 */
738 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
739 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
740 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
741 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
742};
743
bb903df0
PL
744/* Write a BLR instruction into *BUF.
745
746 BLR rn
747
748 RN is the register to branch to. */
749
750static int
751emit_blr (uint32_t *buf, struct aarch64_register rn)
752{
e1c587c3 753 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
bb903df0
PL
754}
755
afbe19f8 756/* Write a RET instruction into *BUF.
bb903df0 757
afbe19f8 758 RET xn
bb903df0 759
afbe19f8 760 RN is the register to branch to. */
bb903df0
PL
761
762static int
afbe19f8
PL
763emit_ret (uint32_t *buf, struct aarch64_register rn)
764{
e1c587c3 765 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
afbe19f8
PL
766}
767
768static int
769emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
770 struct aarch64_register rt,
771 struct aarch64_register rt2,
772 struct aarch64_register rn,
773 struct aarch64_memory_operand operand)
bb903df0
PL
774{
775 uint32_t opc;
776 uint32_t pre_index;
777 uint32_t write_back;
778
779 if (rt.is64)
780 opc = ENCODE (2, 2, 30);
781 else
782 opc = ENCODE (0, 2, 30);
783
784 switch (operand.type)
785 {
786 case MEMORY_OPERAND_OFFSET:
787 {
788 pre_index = ENCODE (1, 1, 24);
789 write_back = ENCODE (0, 1, 23);
790 break;
791 }
afbe19f8
PL
792 case MEMORY_OPERAND_POSTINDEX:
793 {
794 pre_index = ENCODE (0, 1, 24);
795 write_back = ENCODE (1, 1, 23);
796 break;
797 }
bb903df0
PL
798 case MEMORY_OPERAND_PREINDEX:
799 {
800 pre_index = ENCODE (1, 1, 24);
801 write_back = ENCODE (1, 1, 23);
802 break;
803 }
804 default:
805 return 0;
806 }
807
e1c587c3
YQ
808 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
809 | ENCODE (operand.index >> 3, 7, 15)
810 | ENCODE (rt2.num, 5, 10)
811 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
812}
813
afbe19f8
PL
814/* Write a STP instruction into *BUF.
815
816 STP rt, rt2, [rn, #offset]
817 STP rt, rt2, [rn, #index]!
818 STP rt, rt2, [rn], #index
819
820 RT and RT2 are the registers to store.
821 RN is the base address register.
822 OFFSET is the immediate to add to the base address. It is limited to a
823 -512 .. 504 range (7 bits << 3). */
824
825static int
826emit_stp (uint32_t *buf, struct aarch64_register rt,
827 struct aarch64_register rt2, struct aarch64_register rn,
828 struct aarch64_memory_operand operand)
829{
830 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
831}
832
833/* Write a LDP instruction into *BUF.
834
835 LDP rt, rt2, [rn, #offset]
836 LDP rt, rt2, [rn, #index]!
837 LDP rt, rt2, [rn], #index
838
839 RT and RT2 are the registers to store.
840 RN is the base address register.
841 OFFSET is the immediate to add to the base address. It is limited to a
842 -512 .. 504 range (7 bits << 3). */
843
844static int
845emit_ldp (uint32_t *buf, struct aarch64_register rt,
846 struct aarch64_register rt2, struct aarch64_register rn,
847 struct aarch64_memory_operand operand)
848{
849 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
850}
851
bb903df0
PL
852/* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
853
854 LDP qt, qt2, [rn, #offset]
855
856 RT and RT2 are the Q registers to store.
857 RN is the base address register.
858 OFFSET is the immediate to add to the base address. It is limited to
859 -1024 .. 1008 range (7 bits << 4). */
860
861static int
862emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
863 struct aarch64_register rn, int32_t offset)
864{
865 uint32_t opc = ENCODE (2, 2, 30);
866 uint32_t pre_index = ENCODE (1, 1, 24);
867
e1c587c3
YQ
868 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
869 | ENCODE (offset >> 4, 7, 15)
870 | ENCODE (rt2, 5, 10)
871 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
872}
873
874/* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
875
876 STP qt, qt2, [rn, #offset]
877
878 RT and RT2 are the Q registers to store.
879 RN is the base address register.
880 OFFSET is the immediate to add to the base address. It is limited to
881 -1024 .. 1008 range (7 bits << 4). */
882
883static int
884emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
885 struct aarch64_register rn, int32_t offset)
886{
887 uint32_t opc = ENCODE (2, 2, 30);
888 uint32_t pre_index = ENCODE (1, 1, 24);
889
e1c587c3 890 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
b6542f81
YQ
891 | ENCODE (offset >> 4, 7, 15)
892 | ENCODE (rt2, 5, 10)
893 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
894}
895
afbe19f8
PL
896/* Write a LDRH instruction into *BUF.
897
898 LDRH wt, [xn, #offset]
899 LDRH wt, [xn, #index]!
900 LDRH wt, [xn], #index
901
902 RT is the register to store.
903 RN is the base address register.
904 OFFSET is the immediate to add to the base address. It is limited to
905 0 .. 32760 range (12 bits << 3). */
906
907static int
908emit_ldrh (uint32_t *buf, struct aarch64_register rt,
909 struct aarch64_register rn,
910 struct aarch64_memory_operand operand)
911{
1c2e1515 912 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
afbe19f8
PL
913}
914
915/* Write a LDRB instruction into *BUF.
916
917 LDRB wt, [xn, #offset]
918 LDRB wt, [xn, #index]!
919 LDRB wt, [xn], #index
920
921 RT is the register to store.
922 RN is the base address register.
923 OFFSET is the immediate to add to the base address. It is limited to
924 0 .. 32760 range (12 bits << 3). */
925
926static int
927emit_ldrb (uint32_t *buf, struct aarch64_register rt,
928 struct aarch64_register rn,
929 struct aarch64_memory_operand operand)
930{
1c2e1515 931 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
afbe19f8
PL
932}
933
bb903df0 934
bb903df0
PL
935
936/* Write a STR instruction into *BUF.
937
938 STR rt, [rn, #offset]
939 STR rt, [rn, #index]!
afbe19f8 940 STR rt, [rn], #index
bb903df0
PL
941
942 RT is the register to store.
943 RN is the base address register.
944 OFFSET is the immediate to add to the base address. It is limited to
945 0 .. 32760 range (12 bits << 3). */
946
947static int
948emit_str (uint32_t *buf, struct aarch64_register rt,
949 struct aarch64_register rn,
950 struct aarch64_memory_operand operand)
951{
1c2e1515 952 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
bb903df0
PL
953}
954
955/* Helper function emitting an exclusive load or store instruction. */
956
957static int
958emit_load_store_exclusive (uint32_t *buf, uint32_t size,
959 enum aarch64_opcodes opcode,
960 struct aarch64_register rs,
961 struct aarch64_register rt,
962 struct aarch64_register rt2,
963 struct aarch64_register rn)
964{
e1c587c3
YQ
965 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
966 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
967 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
968}
969
970/* Write a LAXR instruction into *BUF.
971
972 LDAXR rt, [xn]
973
974 RT is the destination register.
975 RN is the base address register. */
976
977static int
978emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
979 struct aarch64_register rn)
980{
981 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
982 xzr, rn);
983}
984
985/* Write a STXR instruction into *BUF.
986
987 STXR ws, rt, [xn]
988
989 RS is the result register, it indicates if the store succeeded or not.
990 RT is the destination register.
991 RN is the base address register. */
992
993static int
994emit_stxr (uint32_t *buf, struct aarch64_register rs,
995 struct aarch64_register rt, struct aarch64_register rn)
996{
997 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
998 xzr, rn);
999}
1000
1001/* Write a STLR instruction into *BUF.
1002
1003 STLR rt, [xn]
1004
1005 RT is the register to store.
1006 RN is the base address register. */
1007
1008static int
1009emit_stlr (uint32_t *buf, struct aarch64_register rt,
1010 struct aarch64_register rn)
1011{
1012 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1013 xzr, rn);
1014}
1015
1016/* Helper function for data processing instructions with register sources. */
1017
1018static int
231c0592 1019emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
bb903df0
PL
1020 struct aarch64_register rd,
1021 struct aarch64_register rn,
1022 struct aarch64_register rm)
1023{
1024 uint32_t size = ENCODE (rd.is64, 1, 31);
1025
e1c587c3
YQ
1026 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1027 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1028}
1029
1030/* Helper function for data processing instructions taking either a register
1031 or an immediate. */
1032
1033static int
1034emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1035 struct aarch64_register rd,
1036 struct aarch64_register rn,
1037 struct aarch64_operand operand)
1038{
1039 uint32_t size = ENCODE (rd.is64, 1, 31);
1040 /* The opcode is different for register and immediate source operands. */
1041 uint32_t operand_opcode;
1042
1043 if (operand.type == OPERAND_IMMEDIATE)
1044 {
1045 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1046 operand_opcode = ENCODE (8, 4, 25);
1047
e1c587c3
YQ
1048 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1049 | ENCODE (operand.imm, 12, 10)
1050 | ENCODE (rn.num, 5, 5)
1051 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1052 }
1053 else
1054 {
1055 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1056 operand_opcode = ENCODE (5, 4, 25);
1057
1058 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1059 rn, operand.reg);
1060 }
1061}
1062
1063/* Write an ADD instruction into *BUF.
1064
1065 ADD rd, rn, #imm
1066 ADD rd, rn, rm
1067
1068 This function handles both an immediate and register add.
1069
1070 RD is the destination register.
1071 RN is the input register.
1072 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1073 OPERAND_REGISTER. */
1074
1075static int
1076emit_add (uint32_t *buf, struct aarch64_register rd,
1077 struct aarch64_register rn, struct aarch64_operand operand)
1078{
1079 return emit_data_processing (buf, ADD, rd, rn, operand);
1080}
1081
1082/* Write a SUB instruction into *BUF.
1083
1084 SUB rd, rn, #imm
1085 SUB rd, rn, rm
1086
1087 This function handles both an immediate and register sub.
1088
1089 RD is the destination register.
1090 RN is the input register.
1091 IMM is the immediate to substract to RN. */
1092
1093static int
1094emit_sub (uint32_t *buf, struct aarch64_register rd,
1095 struct aarch64_register rn, struct aarch64_operand operand)
1096{
1097 return emit_data_processing (buf, SUB, rd, rn, operand);
1098}
1099
1100/* Write a MOV instruction into *BUF.
1101
1102 MOV rd, #imm
1103 MOV rd, rm
1104
1105 This function handles both a wide immediate move and a register move,
1106 with the condition that the source register is not xzr. xzr and the
1107 stack pointer share the same encoding and this function only supports
1108 the stack pointer.
1109
1110 RD is the destination register.
1111 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1112 OPERAND_REGISTER. */
1113
1114static int
1115emit_mov (uint32_t *buf, struct aarch64_register rd,
1116 struct aarch64_operand operand)
1117{
1118 if (operand.type == OPERAND_IMMEDIATE)
1119 {
1120 uint32_t size = ENCODE (rd.is64, 1, 31);
1121 /* Do not shift the immediate. */
1122 uint32_t shift = ENCODE (0, 2, 21);
1123
e1c587c3
YQ
1124 return aarch64_emit_insn (buf, MOV | size | shift
1125 | ENCODE (operand.imm, 16, 5)
1126 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1127 }
1128 else
1129 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1130}
1131
1132/* Write a MOVK instruction into *BUF.
1133
1134 MOVK rd, #imm, lsl #shift
1135
1136 RD is the destination register.
1137 IMM is the immediate.
1138 SHIFT is the logical shift left to apply to IMM. */
1139
1140static int
7781c06f
YQ
1141emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1142 unsigned shift)
bb903df0
PL
1143{
1144 uint32_t size = ENCODE (rd.is64, 1, 31);
1145
e1c587c3
YQ
1146 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1147 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1148}
1149
1150/* Write instructions into *BUF in order to move ADDR into a register.
1151 ADDR can be a 64-bit value.
1152
1153 This function will emit a series of MOV and MOVK instructions, such as:
1154
1155 MOV xd, #(addr)
1156 MOVK xd, #(addr >> 16), lsl #16
1157 MOVK xd, #(addr >> 32), lsl #32
1158 MOVK xd, #(addr >> 48), lsl #48 */
1159
1160static int
1161emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1162{
1163 uint32_t *p = buf;
1164
1165 /* The MOV (wide immediate) instruction clears to top bits of the
1166 register. */
1167 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1168
1169 if ((addr >> 16) != 0)
1170 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1171 else
1172 return p - buf;
1173
1174 if ((addr >> 32) != 0)
1175 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1176 else
1177 return p - buf;
1178
1179 if ((addr >> 48) != 0)
1180 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1181
1182 return p - buf;
1183}
1184
afbe19f8
PL
1185/* Write a SUBS instruction into *BUF.
1186
1187 SUBS rd, rn, rm
1188
1189 This instruction update the condition flags.
1190
1191 RD is the destination register.
1192 RN and RM are the source registers. */
1193
1194static int
1195emit_subs (uint32_t *buf, struct aarch64_register rd,
1196 struct aarch64_register rn, struct aarch64_operand operand)
1197{
1198 return emit_data_processing (buf, SUBS, rd, rn, operand);
1199}
1200
1201/* Write a CMP instruction into *BUF.
1202
1203 CMP rn, rm
1204
1205 This instruction is an alias of SUBS xzr, rn, rm.
1206
1207 RN and RM are the registers to compare. */
1208
1209static int
1210emit_cmp (uint32_t *buf, struct aarch64_register rn,
1211 struct aarch64_operand operand)
1212{
1213 return emit_subs (buf, xzr, rn, operand);
1214}
1215
1216/* Write a AND instruction into *BUF.
1217
1218 AND rd, rn, rm
1219
1220 RD is the destination register.
1221 RN and RM are the source registers. */
1222
1223static int
1224emit_and (uint32_t *buf, struct aarch64_register rd,
1225 struct aarch64_register rn, struct aarch64_register rm)
1226{
1227 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1228}
1229
1230/* Write a ORR instruction into *BUF.
1231
1232 ORR rd, rn, rm
1233
1234 RD is the destination register.
1235 RN and RM are the source registers. */
1236
1237static int
1238emit_orr (uint32_t *buf, struct aarch64_register rd,
1239 struct aarch64_register rn, struct aarch64_register rm)
1240{
1241 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1242}
1243
1244/* Write a ORN instruction into *BUF.
1245
1246 ORN rd, rn, rm
1247
1248 RD is the destination register.
1249 RN and RM are the source registers. */
1250
1251static int
1252emit_orn (uint32_t *buf, struct aarch64_register rd,
1253 struct aarch64_register rn, struct aarch64_register rm)
1254{
1255 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1256}
1257
1258/* Write a EOR instruction into *BUF.
1259
1260 EOR rd, rn, rm
1261
1262 RD is the destination register.
1263 RN and RM are the source registers. */
1264
1265static int
1266emit_eor (uint32_t *buf, struct aarch64_register rd,
1267 struct aarch64_register rn, struct aarch64_register rm)
1268{
1269 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1270}
1271
1272/* Write a MVN instruction into *BUF.
1273
1274 MVN rd, rm
1275
1276 This is an alias for ORN rd, xzr, rm.
1277
1278 RD is the destination register.
1279 RM is the source register. */
1280
1281static int
1282emit_mvn (uint32_t *buf, struct aarch64_register rd,
1283 struct aarch64_register rm)
1284{
1285 return emit_orn (buf, rd, xzr, rm);
1286}
1287
1288/* Write a LSLV instruction into *BUF.
1289
1290 LSLV rd, rn, rm
1291
1292 RD is the destination register.
1293 RN and RM are the source registers. */
1294
1295static int
1296emit_lslv (uint32_t *buf, struct aarch64_register rd,
1297 struct aarch64_register rn, struct aarch64_register rm)
1298{
1299 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1300}
1301
1302/* Write a LSRV instruction into *BUF.
1303
1304 LSRV rd, rn, rm
1305
1306 RD is the destination register.
1307 RN and RM are the source registers. */
1308
1309static int
1310emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1311 struct aarch64_register rn, struct aarch64_register rm)
1312{
1313 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1314}
1315
1316/* Write a ASRV instruction into *BUF.
1317
1318 ASRV rd, rn, rm
1319
1320 RD is the destination register.
1321 RN and RM are the source registers. */
1322
1323static int
1324emit_asrv (uint32_t *buf, struct aarch64_register rd,
1325 struct aarch64_register rn, struct aarch64_register rm)
1326{
1327 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1328}
1329
1330/* Write a MUL instruction into *BUF.
1331
1332 MUL rd, rn, rm
1333
1334 RD is the destination register.
1335 RN and RM are the source registers. */
1336
1337static int
1338emit_mul (uint32_t *buf, struct aarch64_register rd,
1339 struct aarch64_register rn, struct aarch64_register rm)
1340{
1341 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1342}
1343
bb903df0
PL
1344/* Write a MRS instruction into *BUF. The register size is 64-bit.
1345
1346 MRS xt, system_reg
1347
1348 RT is the destination register.
1349 SYSTEM_REG is special purpose register to read. */
1350
1351static int
1352emit_mrs (uint32_t *buf, struct aarch64_register rt,
1353 enum aarch64_system_control_registers system_reg)
1354{
e1c587c3
YQ
1355 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1356 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1357}
1358
1359/* Write a MSR instruction into *BUF. The register size is 64-bit.
1360
1361 MSR system_reg, xt
1362
1363 SYSTEM_REG is special purpose register to write.
1364 RT is the input register. */
1365
1366static int
1367emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1368 struct aarch64_register rt)
1369{
e1c587c3
YQ
1370 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1371 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1372}
1373
1374/* Write a SEVL instruction into *BUF.
1375
1376 This is a hint instruction telling the hardware to trigger an event. */
1377
1378static int
1379emit_sevl (uint32_t *buf)
1380{
e1c587c3 1381 return aarch64_emit_insn (buf, SEVL);
bb903df0
PL
1382}
1383
1384/* Write a WFE instruction into *BUF.
1385
1386 This is a hint instruction telling the hardware to wait for an event. */
1387
1388static int
1389emit_wfe (uint32_t *buf)
1390{
e1c587c3 1391 return aarch64_emit_insn (buf, WFE);
bb903df0
PL
1392}
1393
afbe19f8
PL
1394/* Write a SBFM instruction into *BUF.
1395
1396 SBFM rd, rn, #immr, #imms
1397
1398 This instruction moves the bits from #immr to #imms into the
1399 destination, sign extending the result.
1400
1401 RD is the destination register.
1402 RN is the source register.
1403 IMMR is the bit number to start at (least significant bit).
1404 IMMS is the bit number to stop at (most significant bit). */
1405
1406static int
1407emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1408 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1409{
1410 uint32_t size = ENCODE (rd.is64, 1, 31);
1411 uint32_t n = ENCODE (rd.is64, 1, 22);
1412
e1c587c3
YQ
1413 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1414 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1415 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1416}
1417
1418/* Write a SBFX instruction into *BUF.
1419
1420 SBFX rd, rn, #lsb, #width
1421
1422 This instruction moves #width bits from #lsb into the destination, sign
1423 extending the result. This is an alias for:
1424
1425 SBFM rd, rn, #lsb, #(lsb + width - 1)
1426
1427 RD is the destination register.
1428 RN is the source register.
1429 LSB is the bit number to start at (least significant bit).
1430 WIDTH is the number of bits to move. */
1431
1432static int
1433emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1434 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1435{
1436 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1437}
1438
1439/* Write a UBFM instruction into *BUF.
1440
1441 UBFM rd, rn, #immr, #imms
1442
1443 This instruction moves the bits from #immr to #imms into the
1444 destination, extending the result with zeros.
1445
1446 RD is the destination register.
1447 RN is the source register.
1448 IMMR is the bit number to start at (least significant bit).
1449 IMMS is the bit number to stop at (most significant bit). */
1450
1451static int
1452emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1453 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1454{
1455 uint32_t size = ENCODE (rd.is64, 1, 31);
1456 uint32_t n = ENCODE (rd.is64, 1, 22);
1457
e1c587c3
YQ
1458 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1459 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1460 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1461}
1462
1463/* Write a UBFX instruction into *BUF.
1464
1465 UBFX rd, rn, #lsb, #width
1466
1467 This instruction moves #width bits from #lsb into the destination,
1468 extending the result with zeros. This is an alias for:
1469
1470 UBFM rd, rn, #lsb, #(lsb + width - 1)
1471
1472 RD is the destination register.
1473 RN is the source register.
1474 LSB is the bit number to start at (least significant bit).
1475 WIDTH is the number of bits to move. */
1476
1477static int
1478emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1479 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1480{
1481 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1482}
1483
1484/* Write a CSINC instruction into *BUF.
1485
1486 CSINC rd, rn, rm, cond
1487
1488 This instruction conditionally increments rn or rm and places the result
1489 in rd. rn is chosen is the condition is true.
1490
1491 RD is the destination register.
1492 RN and RM are the source registers.
1493 COND is the encoded condition. */
1494
1495static int
1496emit_csinc (uint32_t *buf, struct aarch64_register rd,
1497 struct aarch64_register rn, struct aarch64_register rm,
1498 unsigned cond)
1499{
1500 uint32_t size = ENCODE (rd.is64, 1, 31);
1501
e1c587c3
YQ
1502 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1503 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1504 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1505}
1506
1507/* Write a CSET instruction into *BUF.
1508
1509 CSET rd, cond
1510
1511 This instruction conditionally write 1 or 0 in the destination register.
1512 1 is written if the condition is true. This is an alias for:
1513
1514 CSINC rd, xzr, xzr, !cond
1515
1516 Note that the condition needs to be inverted.
1517
1518 RD is the destination register.
1519 RN and RM are the source registers.
1520 COND is the encoded condition. */
1521
1522static int
1523emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1524{
1525 /* The least significant bit of the condition needs toggling in order to
1526 invert it. */
1527 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1528}
1529
bb903df0
PL
1530/* Write LEN instructions from BUF into the inferior memory at *TO.
1531
1532 Note instructions are always little endian on AArch64, unlike data. */
1533
1534static void
1535append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1536{
1537 size_t byte_len = len * sizeof (uint32_t);
1538#if (__BYTE_ORDER == __BIG_ENDIAN)
1539 uint32_t *le_buf = xmalloc (byte_len);
1540 size_t i;
1541
1542 for (i = 0; i < len; i++)
1543 le_buf[i] = htole32 (buf[i]);
1544
1545 write_inferior_memory (*to, (const unsigned char *) le_buf, byte_len);
1546
1547 xfree (le_buf);
1548#else
1549 write_inferior_memory (*to, (const unsigned char *) buf, byte_len);
1550#endif
1551
1552 *to += byte_len;
1553}
1554
0badd99f
YQ
1555/* Sub-class of struct aarch64_insn_data, store information of
1556 instruction relocation for fast tracepoint. Visitor can
1557 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1558 the relocated instructions in buffer pointed by INSN_PTR. */
bb903df0 1559
0badd99f
YQ
1560struct aarch64_insn_relocation_data
1561{
1562 struct aarch64_insn_data base;
1563
1564 /* The new address the instruction is relocated to. */
1565 CORE_ADDR new_addr;
1566 /* Pointer to the buffer of relocated instruction(s). */
1567 uint32_t *insn_ptr;
1568};
1569
1570/* Implementation of aarch64_insn_visitor method "b". */
1571
1572static void
1573aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1574 struct aarch64_insn_data *data)
1575{
1576 struct aarch64_insn_relocation_data *insn_reloc
1577 = (struct aarch64_insn_relocation_data *) data;
1578 int32_t new_offset
1579 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1580
1581 if (can_encode_int32 (new_offset, 28))
1582 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1583}
1584
1585/* Implementation of aarch64_insn_visitor method "b_cond". */
1586
1587static void
1588aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1589 struct aarch64_insn_data *data)
1590{
1591 struct aarch64_insn_relocation_data *insn_reloc
1592 = (struct aarch64_insn_relocation_data *) data;
1593 int32_t new_offset
1594 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1595
1596 if (can_encode_int32 (new_offset, 21))
1597 {
1598 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1599 new_offset);
bb903df0 1600 }
0badd99f 1601 else if (can_encode_int32 (new_offset, 28))
bb903df0 1602 {
0badd99f
YQ
1603 /* The offset is out of range for a conditional branch
1604 instruction but not for a unconditional branch. We can use
1605 the following instructions instead:
bb903df0 1606
0badd99f
YQ
1607 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1608 B NOT_TAKEN ; Else jump over TAKEN and continue.
1609 TAKEN:
1610 B #(offset - 8)
1611 NOT_TAKEN:
1612
1613 */
1614
1615 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1616 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1617 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
bb903df0 1618 }
0badd99f 1619}
bb903df0 1620
0badd99f
YQ
1621/* Implementation of aarch64_insn_visitor method "cb". */
1622
1623static void
1624aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1625 const unsigned rn, int is64,
1626 struct aarch64_insn_data *data)
1627{
1628 struct aarch64_insn_relocation_data *insn_reloc
1629 = (struct aarch64_insn_relocation_data *) data;
1630 int32_t new_offset
1631 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1632
1633 if (can_encode_int32 (new_offset, 21))
1634 {
1635 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1636 aarch64_register (rn, is64), new_offset);
bb903df0 1637 }
0badd99f 1638 else if (can_encode_int32 (new_offset, 28))
bb903df0 1639 {
0badd99f
YQ
1640 /* The offset is out of range for a compare and branch
1641 instruction but not for a unconditional branch. We can use
1642 the following instructions instead:
1643
1644 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1645 B NOT_TAKEN ; Else jump over TAKEN and continue.
1646 TAKEN:
1647 B #(offset - 8)
1648 NOT_TAKEN:
1649
1650 */
1651 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1652 aarch64_register (rn, is64), 8);
1653 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1654 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1655 }
1656}
bb903df0 1657
0badd99f 1658/* Implementation of aarch64_insn_visitor method "tb". */
bb903df0 1659
0badd99f
YQ
1660static void
1661aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1662 const unsigned rt, unsigned bit,
1663 struct aarch64_insn_data *data)
1664{
1665 struct aarch64_insn_relocation_data *insn_reloc
1666 = (struct aarch64_insn_relocation_data *) data;
1667 int32_t new_offset
1668 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1669
1670 if (can_encode_int32 (new_offset, 16))
1671 {
1672 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1673 aarch64_register (rt, 1), new_offset);
bb903df0 1674 }
0badd99f 1675 else if (can_encode_int32 (new_offset, 28))
bb903df0 1676 {
0badd99f
YQ
1677 /* The offset is out of range for a test bit and branch
1678 instruction but not for a unconditional branch. We can use
1679 the following instructions instead:
1680
1681 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1682 B NOT_TAKEN ; Else jump over TAKEN and continue.
1683 TAKEN:
1684 B #(offset - 8)
1685 NOT_TAKEN:
1686
1687 */
1688 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1689 aarch64_register (rt, 1), 8);
1690 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1691 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1692 new_offset - 8);
1693 }
1694}
bb903df0 1695
0badd99f 1696/* Implementation of aarch64_insn_visitor method "adr". */
bb903df0 1697
0badd99f
YQ
1698static void
1699aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1700 const int is_adrp,
1701 struct aarch64_insn_data *data)
1702{
1703 struct aarch64_insn_relocation_data *insn_reloc
1704 = (struct aarch64_insn_relocation_data *) data;
1705 /* We know exactly the address the ADR{P,} instruction will compute.
1706 We can just write it to the destination register. */
1707 CORE_ADDR address = data->insn_addr + offset;
bb903df0 1708
0badd99f
YQ
1709 if (is_adrp)
1710 {
1711 /* Clear the lower 12 bits of the offset to get the 4K page. */
1712 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1713 aarch64_register (rd, 1),
1714 address & ~0xfff);
1715 }
1716 else
1717 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1718 aarch64_register (rd, 1), address);
1719}
bb903df0 1720
0badd99f 1721/* Implementation of aarch64_insn_visitor method "ldr_literal". */
bb903df0 1722
0badd99f
YQ
1723static void
1724aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1725 const unsigned rt, const int is64,
1726 struct aarch64_insn_data *data)
1727{
1728 struct aarch64_insn_relocation_data *insn_reloc
1729 = (struct aarch64_insn_relocation_data *) data;
1730 CORE_ADDR address = data->insn_addr + offset;
1731
1732 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1733 aarch64_register (rt, 1), address);
1734
1735 /* We know exactly what address to load from, and what register we
1736 can use:
1737
1738 MOV xd, #(oldloc + offset)
1739 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1740 ...
1741
1742 LDR xd, [xd] ; or LDRSW xd, [xd]
1743
1744 */
1745
1746 if (is_sw)
1747 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1748 aarch64_register (rt, 1),
1749 aarch64_register (rt, 1),
1750 offset_memory_operand (0));
bb903df0 1751 else
0badd99f
YQ
1752 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1753 aarch64_register (rt, is64),
1754 aarch64_register (rt, 1),
1755 offset_memory_operand (0));
1756}
1757
1758/* Implementation of aarch64_insn_visitor method "others". */
1759
1760static void
1761aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1762 struct aarch64_insn_data *data)
1763{
1764 struct aarch64_insn_relocation_data *insn_reloc
1765 = (struct aarch64_insn_relocation_data *) data;
bb903df0 1766
0badd99f
YQ
1767 /* The instruction is not PC relative. Just re-emit it at the new
1768 location. */
e1c587c3 1769 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
0badd99f
YQ
1770}
1771
1772static const struct aarch64_insn_visitor visitor =
1773{
1774 aarch64_ftrace_insn_reloc_b,
1775 aarch64_ftrace_insn_reloc_b_cond,
1776 aarch64_ftrace_insn_reloc_cb,
1777 aarch64_ftrace_insn_reloc_tb,
1778 aarch64_ftrace_insn_reloc_adr,
1779 aarch64_ftrace_insn_reloc_ldr_literal,
1780 aarch64_ftrace_insn_reloc_others,
1781};
1782
bb903df0
PL
1783/* Implementation of linux_target_ops method
1784 "install_fast_tracepoint_jump_pad". */
1785
1786static int
1787aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1788 CORE_ADDR tpaddr,
1789 CORE_ADDR collector,
1790 CORE_ADDR lockaddr,
1791 ULONGEST orig_size,
1792 CORE_ADDR *jump_entry,
1793 CORE_ADDR *trampoline,
1794 ULONGEST *trampoline_size,
1795 unsigned char *jjump_pad_insn,
1796 ULONGEST *jjump_pad_insn_size,
1797 CORE_ADDR *adjusted_insn_addr,
1798 CORE_ADDR *adjusted_insn_addr_end,
1799 char *err)
1800{
1801 uint32_t buf[256];
1802 uint32_t *p = buf;
1803 int32_t offset;
1804 int i;
70b439f0 1805 uint32_t insn;
bb903df0 1806 CORE_ADDR buildaddr = *jump_entry;
0badd99f 1807 struct aarch64_insn_relocation_data insn_data;
bb903df0
PL
1808
1809 /* We need to save the current state on the stack both to restore it
1810 later and to collect register values when the tracepoint is hit.
1811
1812 The saved registers are pushed in a layout that needs to be in sync
1813 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1814 the supply_fast_tracepoint_registers function will fill in the
1815 register cache from a pointer to saved registers on the stack we build
1816 here.
1817
1818 For simplicity, we set the size of each cell on the stack to 16 bytes.
1819 This way one cell can hold any register type, from system registers
1820 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1821 has to be 16 bytes aligned anyway.
1822
1823 Note that the CPSR register does not exist on AArch64. Instead we
1824 can access system bits describing the process state with the
1825 MRS/MSR instructions, namely the condition flags. We save them as
1826 if they are part of a CPSR register because that's how GDB
1827 interprets these system bits. At the moment, only the condition
1828 flags are saved in CPSR (NZCV).
1829
1830 Stack layout, each cell is 16 bytes (descending):
1831
1832 High *-------- SIMD&FP registers from 31 down to 0. --------*
1833 | q31 |
1834 . .
1835 . . 32 cells
1836 . .
1837 | q0 |
1838 *---- General purpose registers from 30 down to 0. ----*
1839 | x30 |
1840 . .
1841 . . 31 cells
1842 . .
1843 | x0 |
1844 *------------- Special purpose registers. -------------*
1845 | SP |
1846 | PC |
1847 | CPSR (NZCV) | 5 cells
1848 | FPSR |
1849 | FPCR | <- SP + 16
1850 *------------- collecting_t object --------------------*
1851 | TPIDR_EL0 | struct tracepoint * |
1852 Low *------------------------------------------------------*
1853
1854 After this stack is set up, we issue a call to the collector, passing
1855 it the saved registers at (SP + 16). */
1856
1857 /* Push SIMD&FP registers on the stack:
1858
1859 SUB sp, sp, #(32 * 16)
1860
1861 STP q30, q31, [sp, #(30 * 16)]
1862 ...
1863 STP q0, q1, [sp]
1864
1865 */
1866 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
1867 for (i = 30; i >= 0; i -= 2)
1868 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
1869
1870 /* Push general puspose registers on the stack. Note that we do not need
1871 to push x31 as it represents the xzr register and not the stack
1872 pointer in a STR instruction.
1873
1874 SUB sp, sp, #(31 * 16)
1875
1876 STR x30, [sp, #(30 * 16)]
1877 ...
1878 STR x0, [sp]
1879
1880 */
1881 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
1882 for (i = 30; i >= 0; i -= 1)
1883 p += emit_str (p, aarch64_register (i, 1), sp,
1884 offset_memory_operand (i * 16));
1885
1886 /* Make space for 5 more cells.
1887
1888 SUB sp, sp, #(5 * 16)
1889
1890 */
1891 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
1892
1893
1894 /* Save SP:
1895
1896 ADD x4, sp, #((32 + 31 + 5) * 16)
1897 STR x4, [sp, #(4 * 16)]
1898
1899 */
1900 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
1901 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
1902
1903 /* Save PC (tracepoint address):
1904
1905 MOV x3, #(tpaddr)
1906 ...
1907
1908 STR x3, [sp, #(3 * 16)]
1909
1910 */
1911
1912 p += emit_mov_addr (p, x3, tpaddr);
1913 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
1914
1915 /* Save CPSR (NZCV), FPSR and FPCR:
1916
1917 MRS x2, nzcv
1918 MRS x1, fpsr
1919 MRS x0, fpcr
1920
1921 STR x2, [sp, #(2 * 16)]
1922 STR x1, [sp, #(1 * 16)]
1923 STR x0, [sp, #(0 * 16)]
1924
1925 */
1926 p += emit_mrs (p, x2, NZCV);
1927 p += emit_mrs (p, x1, FPSR);
1928 p += emit_mrs (p, x0, FPCR);
1929 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
1930 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
1931 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
1932
1933 /* Push the collecting_t object. It consist of the address of the
1934 tracepoint and an ID for the current thread. We get the latter by
1935 reading the tpidr_el0 system register. It corresponds to the
1936 NT_ARM_TLS register accessible with ptrace.
1937
1938 MOV x0, #(tpoint)
1939 ...
1940
1941 MRS x1, tpidr_el0
1942
1943 STP x0, x1, [sp, #-16]!
1944
1945 */
1946
1947 p += emit_mov_addr (p, x0, tpoint);
1948 p += emit_mrs (p, x1, TPIDR_EL0);
1949 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
1950
1951 /* Spin-lock:
1952
1953 The shared memory for the lock is at lockaddr. It will hold zero
1954 if no-one is holding the lock, otherwise it contains the address of
1955 the collecting_t object on the stack of the thread which acquired it.
1956
1957 At this stage, the stack pointer points to this thread's collecting_t
1958 object.
1959
1960 We use the following registers:
1961 - x0: Address of the lock.
1962 - x1: Pointer to collecting_t object.
1963 - x2: Scratch register.
1964
1965 MOV x0, #(lockaddr)
1966 ...
1967 MOV x1, sp
1968
1969 ; Trigger an event local to this core. So the following WFE
1970 ; instruction is ignored.
1971 SEVL
1972 again:
1973 ; Wait for an event. The event is triggered by either the SEVL
1974 ; or STLR instructions (store release).
1975 WFE
1976
1977 ; Atomically read at lockaddr. This marks the memory location as
1978 ; exclusive. This instruction also has memory constraints which
1979 ; make sure all previous data reads and writes are done before
1980 ; executing it.
1981 LDAXR x2, [x0]
1982
1983 ; Try again if another thread holds the lock.
1984 CBNZ x2, again
1985
1986 ; We can lock it! Write the address of the collecting_t object.
1987 ; This instruction will fail if the memory location is not marked
1988 ; as exclusive anymore. If it succeeds, it will remove the
1989 ; exclusive mark on the memory location. This way, if another
1990 ; thread executes this instruction before us, we will fail and try
1991 ; all over again.
1992 STXR w2, x1, [x0]
1993 CBNZ w2, again
1994
1995 */
1996
1997 p += emit_mov_addr (p, x0, lockaddr);
1998 p += emit_mov (p, x1, register_operand (sp));
1999
2000 p += emit_sevl (p);
2001 p += emit_wfe (p);
2002 p += emit_ldaxr (p, x2, x0);
2003 p += emit_cb (p, 1, w2, -2 * 4);
2004 p += emit_stxr (p, w2, x1, x0);
2005 p += emit_cb (p, 1, x2, -4 * 4);
2006
2007 /* Call collector (struct tracepoint *, unsigned char *):
2008
2009 MOV x0, #(tpoint)
2010 ...
2011
2012 ; Saved registers start after the collecting_t object.
2013 ADD x1, sp, #16
2014
2015 ; We use an intra-procedure-call scratch register.
2016 MOV ip0, #(collector)
2017 ...
2018
2019 ; And call back to C!
2020 BLR ip0
2021
2022 */
2023
2024 p += emit_mov_addr (p, x0, tpoint);
2025 p += emit_add (p, x1, sp, immediate_operand (16));
2026
2027 p += emit_mov_addr (p, ip0, collector);
2028 p += emit_blr (p, ip0);
2029
2030 /* Release the lock.
2031
2032 MOV x0, #(lockaddr)
2033 ...
2034
2035 ; This instruction is a normal store with memory ordering
2036 ; constraints. Thanks to this we do not have to put a data
2037 ; barrier instruction to make sure all data read and writes are done
2038 ; before this instruction is executed. Furthermore, this instrucion
2039 ; will trigger an event, letting other threads know they can grab
2040 ; the lock.
2041 STLR xzr, [x0]
2042
2043 */
2044 p += emit_mov_addr (p, x0, lockaddr);
2045 p += emit_stlr (p, xzr, x0);
2046
2047 /* Free collecting_t object:
2048
2049 ADD sp, sp, #16
2050
2051 */
2052 p += emit_add (p, sp, sp, immediate_operand (16));
2053
2054 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2055 registers from the stack.
2056
2057 LDR x2, [sp, #(2 * 16)]
2058 LDR x1, [sp, #(1 * 16)]
2059 LDR x0, [sp, #(0 * 16)]
2060
2061 MSR NZCV, x2
2062 MSR FPSR, x1
2063 MSR FPCR, x0
2064
2065 ADD sp, sp #(5 * 16)
2066
2067 */
2068 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2069 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2070 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2071 p += emit_msr (p, NZCV, x2);
2072 p += emit_msr (p, FPSR, x1);
2073 p += emit_msr (p, FPCR, x0);
2074
2075 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2076
2077 /* Pop general purpose registers:
2078
2079 LDR x0, [sp]
2080 ...
2081 LDR x30, [sp, #(30 * 16)]
2082
2083 ADD sp, sp, #(31 * 16)
2084
2085 */
2086 for (i = 0; i <= 30; i += 1)
2087 p += emit_ldr (p, aarch64_register (i, 1), sp,
2088 offset_memory_operand (i * 16));
2089 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2090
2091 /* Pop SIMD&FP registers:
2092
2093 LDP q0, q1, [sp]
2094 ...
2095 LDP q30, q31, [sp, #(30 * 16)]
2096
2097 ADD sp, sp, #(32 * 16)
2098
2099 */
2100 for (i = 0; i <= 30; i += 2)
2101 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2102 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2103
2104 /* Write the code into the inferior memory. */
2105 append_insns (&buildaddr, p - buf, buf);
2106
2107 /* Now emit the relocated instruction. */
2108 *adjusted_insn_addr = buildaddr;
70b439f0 2109 target_read_uint32 (tpaddr, &insn);
0badd99f
YQ
2110
2111 insn_data.base.insn_addr = tpaddr;
2112 insn_data.new_addr = buildaddr;
2113 insn_data.insn_ptr = buf;
2114
2115 aarch64_relocate_instruction (insn, &visitor,
2116 (struct aarch64_insn_data *) &insn_data);
2117
bb903df0 2118 /* We may not have been able to relocate the instruction. */
0badd99f 2119 if (insn_data.insn_ptr == buf)
bb903df0
PL
2120 {
2121 sprintf (err,
2122 "E.Could not relocate instruction from %s to %s.",
2123 core_addr_to_string_nz (tpaddr),
2124 core_addr_to_string_nz (buildaddr));
2125 return 1;
2126 }
dfaffe9d 2127 else
0badd99f 2128 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
dfaffe9d 2129 *adjusted_insn_addr_end = buildaddr;
bb903df0
PL
2130
2131 /* Go back to the start of the buffer. */
2132 p = buf;
2133
2134 /* Emit a branch back from the jump pad. */
2135 offset = (tpaddr + orig_size - buildaddr);
2136 if (!can_encode_int32 (offset, 28))
2137 {
2138 sprintf (err,
2139 "E.Jump back from jump pad too far from tracepoint "
2140 "(offset 0x%" PRIx32 " cannot be encoded in 28 bits).",
2141 offset);
2142 return 1;
2143 }
2144
2145 p += emit_b (p, 0, offset);
2146 append_insns (&buildaddr, p - buf, buf);
2147
2148 /* Give the caller a branch instruction into the jump pad. */
2149 offset = (*jump_entry - tpaddr);
2150 if (!can_encode_int32 (offset, 28))
2151 {
2152 sprintf (err,
2153 "E.Jump pad too far from tracepoint "
2154 "(offset 0x%" PRIx32 " cannot be encoded in 28 bits).",
2155 offset);
2156 return 1;
2157 }
2158
2159 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2160 *jjump_pad_insn_size = 4;
2161
2162 /* Return the end address of our pad. */
2163 *jump_entry = buildaddr;
2164
2165 return 0;
2166}
2167
afbe19f8
PL
2168/* Helper function writing LEN instructions from START into
2169 current_insn_ptr. */
2170
2171static void
2172emit_ops_insns (const uint32_t *start, int len)
2173{
2174 CORE_ADDR buildaddr = current_insn_ptr;
2175
2176 if (debug_threads)
2177 debug_printf ("Adding %d instrucions at %s\n",
2178 len, paddress (buildaddr));
2179
2180 append_insns (&buildaddr, len, start);
2181 current_insn_ptr = buildaddr;
2182}
2183
2184/* Pop a register from the stack. */
2185
2186static int
2187emit_pop (uint32_t *buf, struct aarch64_register rt)
2188{
2189 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2190}
2191
2192/* Push a register on the stack. */
2193
2194static int
2195emit_push (uint32_t *buf, struct aarch64_register rt)
2196{
2197 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2198}
2199
2200/* Implementation of emit_ops method "emit_prologue". */
2201
2202static void
2203aarch64_emit_prologue (void)
2204{
2205 uint32_t buf[16];
2206 uint32_t *p = buf;
2207
2208 /* This function emit a prologue for the following function prototype:
2209
2210 enum eval_result_type f (unsigned char *regs,
2211 ULONGEST *value);
2212
2213 The first argument is a buffer of raw registers. The second
2214 argument is the result of
2215 evaluating the expression, which will be set to whatever is on top of
2216 the stack at the end.
2217
2218 The stack set up by the prologue is as such:
2219
2220 High *------------------------------------------------------*
2221 | LR |
2222 | FP | <- FP
2223 | x1 (ULONGEST *value) |
2224 | x0 (unsigned char *regs) |
2225 Low *------------------------------------------------------*
2226
2227 As we are implementing a stack machine, each opcode can expand the
2228 stack so we never know how far we are from the data saved by this
2229 prologue. In order to be able refer to value and regs later, we save
2230 the current stack pointer in the frame pointer. This way, it is not
2231 clobbered when calling C functions.
2232
2233 Finally, throughtout every operation, we are using register x0 as the
2234 top of the stack, and x1 as a scratch register. */
2235
2236 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2237 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2238 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2239
2240 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2241
2242
2243 emit_ops_insns (buf, p - buf);
2244}
2245
2246/* Implementation of emit_ops method "emit_epilogue". */
2247
2248static void
2249aarch64_emit_epilogue (void)
2250{
2251 uint32_t buf[16];
2252 uint32_t *p = buf;
2253
2254 /* Store the result of the expression (x0) in *value. */
2255 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2256 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2257 p += emit_str (p, x0, x1, offset_memory_operand (0));
2258
2259 /* Restore the previous state. */
2260 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2261 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2262
2263 /* Return expr_eval_no_error. */
2264 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2265 p += emit_ret (p, lr);
2266
2267 emit_ops_insns (buf, p - buf);
2268}
2269
2270/* Implementation of emit_ops method "emit_add". */
2271
2272static void
2273aarch64_emit_add (void)
2274{
2275 uint32_t buf[16];
2276 uint32_t *p = buf;
2277
2278 p += emit_pop (p, x1);
45e3745e 2279 p += emit_add (p, x0, x1, register_operand (x0));
afbe19f8
PL
2280
2281 emit_ops_insns (buf, p - buf);
2282}
2283
2284/* Implementation of emit_ops method "emit_sub". */
2285
2286static void
2287aarch64_emit_sub (void)
2288{
2289 uint32_t buf[16];
2290 uint32_t *p = buf;
2291
2292 p += emit_pop (p, x1);
45e3745e 2293 p += emit_sub (p, x0, x1, register_operand (x0));
afbe19f8
PL
2294
2295 emit_ops_insns (buf, p - buf);
2296}
2297
2298/* Implementation of emit_ops method "emit_mul". */
2299
2300static void
2301aarch64_emit_mul (void)
2302{
2303 uint32_t buf[16];
2304 uint32_t *p = buf;
2305
2306 p += emit_pop (p, x1);
2307 p += emit_mul (p, x0, x1, x0);
2308
2309 emit_ops_insns (buf, p - buf);
2310}
2311
2312/* Implementation of emit_ops method "emit_lsh". */
2313
2314static void
2315aarch64_emit_lsh (void)
2316{
2317 uint32_t buf[16];
2318 uint32_t *p = buf;
2319
2320 p += emit_pop (p, x1);
2321 p += emit_lslv (p, x0, x1, x0);
2322
2323 emit_ops_insns (buf, p - buf);
2324}
2325
2326/* Implementation of emit_ops method "emit_rsh_signed". */
2327
2328static void
2329aarch64_emit_rsh_signed (void)
2330{
2331 uint32_t buf[16];
2332 uint32_t *p = buf;
2333
2334 p += emit_pop (p, x1);
2335 p += emit_asrv (p, x0, x1, x0);
2336
2337 emit_ops_insns (buf, p - buf);
2338}
2339
2340/* Implementation of emit_ops method "emit_rsh_unsigned". */
2341
2342static void
2343aarch64_emit_rsh_unsigned (void)
2344{
2345 uint32_t buf[16];
2346 uint32_t *p = buf;
2347
2348 p += emit_pop (p, x1);
2349 p += emit_lsrv (p, x0, x1, x0);
2350
2351 emit_ops_insns (buf, p - buf);
2352}
2353
2354/* Implementation of emit_ops method "emit_ext". */
2355
2356static void
2357aarch64_emit_ext (int arg)
2358{
2359 uint32_t buf[16];
2360 uint32_t *p = buf;
2361
2362 p += emit_sbfx (p, x0, x0, 0, arg);
2363
2364 emit_ops_insns (buf, p - buf);
2365}
2366
2367/* Implementation of emit_ops method "emit_log_not". */
2368
2369static void
2370aarch64_emit_log_not (void)
2371{
2372 uint32_t buf[16];
2373 uint32_t *p = buf;
2374
2375 /* If the top of the stack is 0, replace it with 1. Else replace it with
2376 0. */
2377
2378 p += emit_cmp (p, x0, immediate_operand (0));
2379 p += emit_cset (p, x0, EQ);
2380
2381 emit_ops_insns (buf, p - buf);
2382}
2383
2384/* Implementation of emit_ops method "emit_bit_and". */
2385
2386static void
2387aarch64_emit_bit_and (void)
2388{
2389 uint32_t buf[16];
2390 uint32_t *p = buf;
2391
2392 p += emit_pop (p, x1);
2393 p += emit_and (p, x0, x0, x1);
2394
2395 emit_ops_insns (buf, p - buf);
2396}
2397
2398/* Implementation of emit_ops method "emit_bit_or". */
2399
2400static void
2401aarch64_emit_bit_or (void)
2402{
2403 uint32_t buf[16];
2404 uint32_t *p = buf;
2405
2406 p += emit_pop (p, x1);
2407 p += emit_orr (p, x0, x0, x1);
2408
2409 emit_ops_insns (buf, p - buf);
2410}
2411
2412/* Implementation of emit_ops method "emit_bit_xor". */
2413
2414static void
2415aarch64_emit_bit_xor (void)
2416{
2417 uint32_t buf[16];
2418 uint32_t *p = buf;
2419
2420 p += emit_pop (p, x1);
2421 p += emit_eor (p, x0, x0, x1);
2422
2423 emit_ops_insns (buf, p - buf);
2424}
2425
2426/* Implementation of emit_ops method "emit_bit_not". */
2427
2428static void
2429aarch64_emit_bit_not (void)
2430{
2431 uint32_t buf[16];
2432 uint32_t *p = buf;
2433
2434 p += emit_mvn (p, x0, x0);
2435
2436 emit_ops_insns (buf, p - buf);
2437}
2438
2439/* Implementation of emit_ops method "emit_equal". */
2440
2441static void
2442aarch64_emit_equal (void)
2443{
2444 uint32_t buf[16];
2445 uint32_t *p = buf;
2446
2447 p += emit_pop (p, x1);
2448 p += emit_cmp (p, x0, register_operand (x1));
2449 p += emit_cset (p, x0, EQ);
2450
2451 emit_ops_insns (buf, p - buf);
2452}
2453
2454/* Implementation of emit_ops method "emit_less_signed". */
2455
2456static void
2457aarch64_emit_less_signed (void)
2458{
2459 uint32_t buf[16];
2460 uint32_t *p = buf;
2461
2462 p += emit_pop (p, x1);
2463 p += emit_cmp (p, x1, register_operand (x0));
2464 p += emit_cset (p, x0, LT);
2465
2466 emit_ops_insns (buf, p - buf);
2467}
2468
2469/* Implementation of emit_ops method "emit_less_unsigned". */
2470
2471static void
2472aarch64_emit_less_unsigned (void)
2473{
2474 uint32_t buf[16];
2475 uint32_t *p = buf;
2476
2477 p += emit_pop (p, x1);
2478 p += emit_cmp (p, x1, register_operand (x0));
2479 p += emit_cset (p, x0, LO);
2480
2481 emit_ops_insns (buf, p - buf);
2482}
2483
2484/* Implementation of emit_ops method "emit_ref". */
2485
2486static void
2487aarch64_emit_ref (int size)
2488{
2489 uint32_t buf[16];
2490 uint32_t *p = buf;
2491
2492 switch (size)
2493 {
2494 case 1:
2495 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2496 break;
2497 case 2:
2498 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2499 break;
2500 case 4:
2501 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2502 break;
2503 case 8:
2504 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2505 break;
2506 default:
2507 /* Unknown size, bail on compilation. */
2508 emit_error = 1;
2509 break;
2510 }
2511
2512 emit_ops_insns (buf, p - buf);
2513}
2514
2515/* Implementation of emit_ops method "emit_if_goto". */
2516
2517static void
2518aarch64_emit_if_goto (int *offset_p, int *size_p)
2519{
2520 uint32_t buf[16];
2521 uint32_t *p = buf;
2522
2523 /* The Z flag is set or cleared here. */
2524 p += emit_cmp (p, x0, immediate_operand (0));
2525 /* This instruction must not change the Z flag. */
2526 p += emit_pop (p, x0);
2527 /* Branch over the next instruction if x0 == 0. */
2528 p += emit_bcond (p, EQ, 8);
2529
2530 /* The NOP instruction will be patched with an unconditional branch. */
2531 if (offset_p)
2532 *offset_p = (p - buf) * 4;
2533 if (size_p)
2534 *size_p = 4;
2535 p += emit_nop (p);
2536
2537 emit_ops_insns (buf, p - buf);
2538}
2539
2540/* Implementation of emit_ops method "emit_goto". */
2541
2542static void
2543aarch64_emit_goto (int *offset_p, int *size_p)
2544{
2545 uint32_t buf[16];
2546 uint32_t *p = buf;
2547
2548 /* The NOP instruction will be patched with an unconditional branch. */
2549 if (offset_p)
2550 *offset_p = 0;
2551 if (size_p)
2552 *size_p = 4;
2553 p += emit_nop (p);
2554
2555 emit_ops_insns (buf, p - buf);
2556}
2557
2558/* Implementation of emit_ops method "write_goto_address". */
2559
2560void
2561aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2562{
2563 uint32_t insn;
2564
2565 emit_b (&insn, 0, to - from);
2566 append_insns (&from, 1, &insn);
2567}
2568
2569/* Implementation of emit_ops method "emit_const". */
2570
2571static void
2572aarch64_emit_const (LONGEST num)
2573{
2574 uint32_t buf[16];
2575 uint32_t *p = buf;
2576
2577 p += emit_mov_addr (p, x0, num);
2578
2579 emit_ops_insns (buf, p - buf);
2580}
2581
2582/* Implementation of emit_ops method "emit_call". */
2583
2584static void
2585aarch64_emit_call (CORE_ADDR fn)
2586{
2587 uint32_t buf[16];
2588 uint32_t *p = buf;
2589
2590 p += emit_mov_addr (p, ip0, fn);
2591 p += emit_blr (p, ip0);
2592
2593 emit_ops_insns (buf, p - buf);
2594}
2595
2596/* Implementation of emit_ops method "emit_reg". */
2597
2598static void
2599aarch64_emit_reg (int reg)
2600{
2601 uint32_t buf[16];
2602 uint32_t *p = buf;
2603
2604 /* Set x0 to unsigned char *regs. */
2605 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2606 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2607 p += emit_mov (p, x1, immediate_operand (reg));
2608
2609 emit_ops_insns (buf, p - buf);
2610
2611 aarch64_emit_call (get_raw_reg_func_addr ());
2612}
2613
2614/* Implementation of emit_ops method "emit_pop". */
2615
2616static void
2617aarch64_emit_pop (void)
2618{
2619 uint32_t buf[16];
2620 uint32_t *p = buf;
2621
2622 p += emit_pop (p, x0);
2623
2624 emit_ops_insns (buf, p - buf);
2625}
2626
2627/* Implementation of emit_ops method "emit_stack_flush". */
2628
2629static void
2630aarch64_emit_stack_flush (void)
2631{
2632 uint32_t buf[16];
2633 uint32_t *p = buf;
2634
2635 p += emit_push (p, x0);
2636
2637 emit_ops_insns (buf, p - buf);
2638}
2639
2640/* Implementation of emit_ops method "emit_zero_ext". */
2641
2642static void
2643aarch64_emit_zero_ext (int arg)
2644{
2645 uint32_t buf[16];
2646 uint32_t *p = buf;
2647
2648 p += emit_ubfx (p, x0, x0, 0, arg);
2649
2650 emit_ops_insns (buf, p - buf);
2651}
2652
2653/* Implementation of emit_ops method "emit_swap". */
2654
2655static void
2656aarch64_emit_swap (void)
2657{
2658 uint32_t buf[16];
2659 uint32_t *p = buf;
2660
2661 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2662 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2663 p += emit_mov (p, x0, register_operand (x1));
2664
2665 emit_ops_insns (buf, p - buf);
2666}
2667
2668/* Implementation of emit_ops method "emit_stack_adjust". */
2669
2670static void
2671aarch64_emit_stack_adjust (int n)
2672{
2673 /* This is not needed with our design. */
2674 uint32_t buf[16];
2675 uint32_t *p = buf;
2676
2677 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2678
2679 emit_ops_insns (buf, p - buf);
2680}
2681
2682/* Implementation of emit_ops method "emit_int_call_1". */
2683
2684static void
2685aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2686{
2687 uint32_t buf[16];
2688 uint32_t *p = buf;
2689
2690 p += emit_mov (p, x0, immediate_operand (arg1));
2691
2692 emit_ops_insns (buf, p - buf);
2693
2694 aarch64_emit_call (fn);
2695}
2696
2697/* Implementation of emit_ops method "emit_void_call_2". */
2698
2699static void
2700aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2701{
2702 uint32_t buf[16];
2703 uint32_t *p = buf;
2704
2705 /* Push x0 on the stack. */
2706 aarch64_emit_stack_flush ();
2707
2708 /* Setup arguments for the function call:
2709
2710 x0: arg1
2711 x1: top of the stack
2712
2713 MOV x1, x0
2714 MOV x0, #arg1 */
2715
2716 p += emit_mov (p, x1, register_operand (x0));
2717 p += emit_mov (p, x0, immediate_operand (arg1));
2718
2719 emit_ops_insns (buf, p - buf);
2720
2721 aarch64_emit_call (fn);
2722
2723 /* Restore x0. */
2724 aarch64_emit_pop ();
2725}
2726
2727/* Implementation of emit_ops method "emit_eq_goto". */
2728
2729static void
2730aarch64_emit_eq_goto (int *offset_p, int *size_p)
2731{
2732 uint32_t buf[16];
2733 uint32_t *p = buf;
2734
2735 p += emit_pop (p, x1);
2736 p += emit_cmp (p, x1, register_operand (x0));
2737 /* Branch over the next instruction if x0 != x1. */
2738 p += emit_bcond (p, NE, 8);
2739 /* The NOP instruction will be patched with an unconditional branch. */
2740 if (offset_p)
2741 *offset_p = (p - buf) * 4;
2742 if (size_p)
2743 *size_p = 4;
2744 p += emit_nop (p);
2745
2746 emit_ops_insns (buf, p - buf);
2747}
2748
2749/* Implementation of emit_ops method "emit_ne_goto". */
2750
2751static void
2752aarch64_emit_ne_goto (int *offset_p, int *size_p)
2753{
2754 uint32_t buf[16];
2755 uint32_t *p = buf;
2756
2757 p += emit_pop (p, x1);
2758 p += emit_cmp (p, x1, register_operand (x0));
2759 /* Branch over the next instruction if x0 == x1. */
2760 p += emit_bcond (p, EQ, 8);
2761 /* The NOP instruction will be patched with an unconditional branch. */
2762 if (offset_p)
2763 *offset_p = (p - buf) * 4;
2764 if (size_p)
2765 *size_p = 4;
2766 p += emit_nop (p);
2767
2768 emit_ops_insns (buf, p - buf);
2769}
2770
2771/* Implementation of emit_ops method "emit_lt_goto". */
2772
2773static void
2774aarch64_emit_lt_goto (int *offset_p, int *size_p)
2775{
2776 uint32_t buf[16];
2777 uint32_t *p = buf;
2778
2779 p += emit_pop (p, x1);
2780 p += emit_cmp (p, x1, register_operand (x0));
2781 /* Branch over the next instruction if x0 >= x1. */
2782 p += emit_bcond (p, GE, 8);
2783 /* The NOP instruction will be patched with an unconditional branch. */
2784 if (offset_p)
2785 *offset_p = (p - buf) * 4;
2786 if (size_p)
2787 *size_p = 4;
2788 p += emit_nop (p);
2789
2790 emit_ops_insns (buf, p - buf);
2791}
2792
2793/* Implementation of emit_ops method "emit_le_goto". */
2794
2795static void
2796aarch64_emit_le_goto (int *offset_p, int *size_p)
2797{
2798 uint32_t buf[16];
2799 uint32_t *p = buf;
2800
2801 p += emit_pop (p, x1);
2802 p += emit_cmp (p, x1, register_operand (x0));
2803 /* Branch over the next instruction if x0 > x1. */
2804 p += emit_bcond (p, GT, 8);
2805 /* The NOP instruction will be patched with an unconditional branch. */
2806 if (offset_p)
2807 *offset_p = (p - buf) * 4;
2808 if (size_p)
2809 *size_p = 4;
2810 p += emit_nop (p);
2811
2812 emit_ops_insns (buf, p - buf);
2813}
2814
2815/* Implementation of emit_ops method "emit_gt_goto". */
2816
2817static void
2818aarch64_emit_gt_goto (int *offset_p, int *size_p)
2819{
2820 uint32_t buf[16];
2821 uint32_t *p = buf;
2822
2823 p += emit_pop (p, x1);
2824 p += emit_cmp (p, x1, register_operand (x0));
2825 /* Branch over the next instruction if x0 <= x1. */
2826 p += emit_bcond (p, LE, 8);
2827 /* The NOP instruction will be patched with an unconditional branch. */
2828 if (offset_p)
2829 *offset_p = (p - buf) * 4;
2830 if (size_p)
2831 *size_p = 4;
2832 p += emit_nop (p);
2833
2834 emit_ops_insns (buf, p - buf);
2835}
2836
2837/* Implementation of emit_ops method "emit_ge_got". */
2838
2839static void
2840aarch64_emit_ge_got (int *offset_p, int *size_p)
2841{
2842 uint32_t buf[16];
2843 uint32_t *p = buf;
2844
2845 p += emit_pop (p, x1);
2846 p += emit_cmp (p, x1, register_operand (x0));
2847 /* Branch over the next instruction if x0 <= x1. */
2848 p += emit_bcond (p, LT, 8);
2849 /* The NOP instruction will be patched with an unconditional branch. */
2850 if (offset_p)
2851 *offset_p = (p - buf) * 4;
2852 if (size_p)
2853 *size_p = 4;
2854 p += emit_nop (p);
2855
2856 emit_ops_insns (buf, p - buf);
2857}
2858
2859static struct emit_ops aarch64_emit_ops_impl =
2860{
2861 aarch64_emit_prologue,
2862 aarch64_emit_epilogue,
2863 aarch64_emit_add,
2864 aarch64_emit_sub,
2865 aarch64_emit_mul,
2866 aarch64_emit_lsh,
2867 aarch64_emit_rsh_signed,
2868 aarch64_emit_rsh_unsigned,
2869 aarch64_emit_ext,
2870 aarch64_emit_log_not,
2871 aarch64_emit_bit_and,
2872 aarch64_emit_bit_or,
2873 aarch64_emit_bit_xor,
2874 aarch64_emit_bit_not,
2875 aarch64_emit_equal,
2876 aarch64_emit_less_signed,
2877 aarch64_emit_less_unsigned,
2878 aarch64_emit_ref,
2879 aarch64_emit_if_goto,
2880 aarch64_emit_goto,
2881 aarch64_write_goto_address,
2882 aarch64_emit_const,
2883 aarch64_emit_call,
2884 aarch64_emit_reg,
2885 aarch64_emit_pop,
2886 aarch64_emit_stack_flush,
2887 aarch64_emit_zero_ext,
2888 aarch64_emit_swap,
2889 aarch64_emit_stack_adjust,
2890 aarch64_emit_int_call_1,
2891 aarch64_emit_void_call_2,
2892 aarch64_emit_eq_goto,
2893 aarch64_emit_ne_goto,
2894 aarch64_emit_lt_goto,
2895 aarch64_emit_le_goto,
2896 aarch64_emit_gt_goto,
2897 aarch64_emit_ge_got,
2898};
2899
2900/* Implementation of linux_target_ops method "emit_ops". */
2901
2902static struct emit_ops *
2903aarch64_emit_ops (void)
2904{
2905 return &aarch64_emit_ops_impl;
2906}
2907
bb903df0
PL
2908/* Implementation of linux_target_ops method
2909 "get_min_fast_tracepoint_insn_len". */
2910
2911static int
2912aarch64_get_min_fast_tracepoint_insn_len (void)
2913{
2914 return 4;
2915}
2916
d1d0aea1
PL
2917/* Implementation of linux_target_ops method "supports_range_stepping". */
2918
2919static int
2920aarch64_supports_range_stepping (void)
2921{
2922 return 1;
2923}
2924
dd373349
AT
2925/* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2926
2927static const gdb_byte *
2928aarch64_sw_breakpoint_from_kind (int kind, int *size)
2929{
17b1509a
YQ
2930 if (is_64bit_tdesc ())
2931 {
2932 *size = aarch64_breakpoint_len;
2933 return aarch64_breakpoint;
2934 }
2935 else
2936 return arm_sw_breakpoint_from_kind (kind, size);
2937}
2938
2939/* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
2940
2941static int
2942aarch64_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
2943{
2944 if (is_64bit_tdesc ())
2945 return aarch64_breakpoint_len;
2946 else
2947 return arm_breakpoint_kind_from_pc (pcptr);
2948}
2949
2950/* Implementation of the linux_target_ops method
2951 "breakpoint_kind_from_current_state". */
2952
2953static int
2954aarch64_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
2955{
2956 if (is_64bit_tdesc ())
2957 return aarch64_breakpoint_len;
2958 else
2959 return arm_breakpoint_kind_from_current_state (pcptr);
dd373349
AT
2960}
2961
7d00775e
AT
2962/* Support for hardware single step. */
2963
2964static int
2965aarch64_supports_hardware_single_step (void)
2966{
2967 return 1;
2968}
2969
176eb98c
MS
2970struct linux_target_ops the_low_target =
2971{
2972 aarch64_arch_setup,
3aee8918 2973 aarch64_regs_info,
176eb98c
MS
2974 aarch64_cannot_fetch_register,
2975 aarch64_cannot_store_register,
421530db 2976 NULL, /* fetch_register */
176eb98c
MS
2977 aarch64_get_pc,
2978 aarch64_set_pc,
17b1509a 2979 aarch64_breakpoint_kind_from_pc,
dd373349 2980 aarch64_sw_breakpoint_from_kind,
fa5308bd 2981 NULL, /* get_next_pcs */
421530db 2982 0, /* decr_pc_after_break */
176eb98c 2983 aarch64_breakpoint_at,
802e8e6d 2984 aarch64_supports_z_point_type,
176eb98c
MS
2985 aarch64_insert_point,
2986 aarch64_remove_point,
2987 aarch64_stopped_by_watchpoint,
2988 aarch64_stopped_data_address,
421530db
PL
2989 NULL, /* collect_ptrace_register */
2990 NULL, /* supply_ptrace_register */
ade90bde 2991 aarch64_linux_siginfo_fixup,
176eb98c
MS
2992 aarch64_linux_new_process,
2993 aarch64_linux_new_thread,
3a8a0396 2994 aarch64_linux_new_fork,
176eb98c 2995 aarch64_linux_prepare_to_resume,
421530db 2996 NULL, /* process_qsupported */
7671bf47 2997 aarch64_supports_tracepoints,
bb903df0
PL
2998 aarch64_get_thread_area,
2999 aarch64_install_fast_tracepoint_jump_pad,
afbe19f8 3000 aarch64_emit_ops,
bb903df0 3001 aarch64_get_min_fast_tracepoint_insn_len,
d1d0aea1 3002 aarch64_supports_range_stepping,
17b1509a 3003 aarch64_breakpoint_kind_from_current_state,
7d00775e 3004 aarch64_supports_hardware_single_step,
061fc021 3005 aarch64_get_syscall_trapinfo,
176eb98c 3006};
3aee8918
PA
3007
3008void
3009initialize_low_arch (void)
3010{
3011 init_registers_aarch64 ();
3012
3b53ae99
YQ
3013 initialize_low_arch_aarch32 ();
3014
3aee8918
PA
3015 initialize_regsets_info (&aarch64_regsets_info);
3016}
This page took 0.452413 seconds and 4 git commands to generate.