binutils objcopy test tidy
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-aarch64-low.c
CommitLineData
176eb98c
MS
1/* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
32d0add0 4 Copyright (C) 2009-2015 Free Software Foundation, Inc.
176eb98c
MS
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "server.h"
23#include "linux-low.h"
db3cb7cb 24#include "nat/aarch64-linux.h"
554717a3 25#include "nat/aarch64-linux-hw-point.h"
bb903df0 26#include "arch/aarch64-insn.h"
3b53ae99 27#include "linux-aarch32-low.h"
176eb98c 28#include "elf/common.h"
afbe19f8
PL
29#include "ax.h"
30#include "tracepoint.h"
176eb98c
MS
31
32#include <signal.h>
33#include <sys/user.h>
5826e159 34#include "nat/gdb_ptrace.h"
e9dae05e 35#include <asm/ptrace.h>
bb903df0
PL
36#include <inttypes.h>
37#include <endian.h>
38#include <sys/uio.h>
176eb98c
MS
39
40#include "gdb_proc_service.h"
41
42/* Defined in auto-generated files. */
43void init_registers_aarch64 (void);
3aee8918 44extern const struct target_desc *tdesc_aarch64;
176eb98c 45
176eb98c
MS
46#ifdef HAVE_SYS_REG_H
47#include <sys/reg.h>
48#endif
49
50#define AARCH64_X_REGS_NUM 31
51#define AARCH64_V_REGS_NUM 32
52#define AARCH64_X0_REGNO 0
53#define AARCH64_SP_REGNO 31
54#define AARCH64_PC_REGNO 32
55#define AARCH64_CPSR_REGNO 33
56#define AARCH64_V0_REGNO 34
bf330350
CU
57#define AARCH64_FPSR_REGNO (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM)
58#define AARCH64_FPCR_REGNO (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM + 1)
176eb98c 59
bf330350 60#define AARCH64_NUM_REGS (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM + 2)
176eb98c 61
176eb98c
MS
62/* Per-process arch-specific data we want to keep. */
63
64struct arch_process_info
65{
66 /* Hardware breakpoint/watchpoint data.
67 The reason for them to be per-process rather than per-thread is
68 due to the lack of information in the gdbserver environment;
69 gdbserver is not told that whether a requested hardware
70 breakpoint/watchpoint is thread specific or not, so it has to set
71 each hw bp/wp for every thread in the current process. The
72 higher level bp/wp management in gdb will resume a thread if a hw
73 bp/wp trap is not expected for it. Since the hw bp/wp setting is
74 same for each thread, it is reasonable for the data to live here.
75 */
76 struct aarch64_debug_reg_state debug_reg_state;
77};
78
3b53ae99
YQ
79/* Return true if the size of register 0 is 8 byte. */
80
81static int
82is_64bit_tdesc (void)
83{
84 struct regcache *regcache = get_thread_regcache (current_thread, 0);
85
86 return register_size (regcache->tdesc, 0) == 8;
87}
88
421530db
PL
89/* Implementation of linux_target_ops method "cannot_store_register". */
90
176eb98c
MS
91static int
92aarch64_cannot_store_register (int regno)
93{
94 return regno >= AARCH64_NUM_REGS;
95}
96
421530db
PL
97/* Implementation of linux_target_ops method "cannot_fetch_register". */
98
176eb98c
MS
99static int
100aarch64_cannot_fetch_register (int regno)
101{
102 return regno >= AARCH64_NUM_REGS;
103}
104
105static void
106aarch64_fill_gregset (struct regcache *regcache, void *buf)
107{
108 struct user_pt_regs *regset = buf;
109 int i;
110
111 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
112 collect_register (regcache, AARCH64_X0_REGNO + i, &regset->regs[i]);
113 collect_register (regcache, AARCH64_SP_REGNO, &regset->sp);
114 collect_register (regcache, AARCH64_PC_REGNO, &regset->pc);
115 collect_register (regcache, AARCH64_CPSR_REGNO, &regset->pstate);
116}
117
118static void
119aarch64_store_gregset (struct regcache *regcache, const void *buf)
120{
121 const struct user_pt_regs *regset = buf;
122 int i;
123
124 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
125 supply_register (regcache, AARCH64_X0_REGNO + i, &regset->regs[i]);
126 supply_register (regcache, AARCH64_SP_REGNO, &regset->sp);
127 supply_register (regcache, AARCH64_PC_REGNO, &regset->pc);
128 supply_register (regcache, AARCH64_CPSR_REGNO, &regset->pstate);
129}
130
131static void
132aarch64_fill_fpregset (struct regcache *regcache, void *buf)
133{
134 struct user_fpsimd_state *regset = buf;
135 int i;
136
137 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
138 collect_register (regcache, AARCH64_V0_REGNO + i, &regset->vregs[i]);
bf330350
CU
139 collect_register (regcache, AARCH64_FPSR_REGNO, &regset->fpsr);
140 collect_register (regcache, AARCH64_FPCR_REGNO, &regset->fpcr);
176eb98c
MS
141}
142
143static void
144aarch64_store_fpregset (struct regcache *regcache, const void *buf)
145{
146 const struct user_fpsimd_state *regset = buf;
147 int i;
148
149 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
150 supply_register (regcache, AARCH64_V0_REGNO + i, &regset->vregs[i]);
bf330350
CU
151 supply_register (regcache, AARCH64_FPSR_REGNO, &regset->fpsr);
152 supply_register (regcache, AARCH64_FPCR_REGNO, &regset->fpcr);
176eb98c
MS
153}
154
176eb98c
MS
155/* Enable miscellaneous debugging output. The name is historical - it
156 was originally used to debug LinuxThreads support. */
157extern int debug_threads;
158
421530db
PL
159/* Implementation of linux_target_ops method "get_pc". */
160
176eb98c
MS
161static CORE_ADDR
162aarch64_get_pc (struct regcache *regcache)
163{
8a7e4587
YQ
164 if (register_size (regcache->tdesc, 0) == 8)
165 {
166 unsigned long pc;
167
168 collect_register_by_name (regcache, "pc", &pc);
169 if (debug_threads)
170 debug_printf ("stop pc is %08lx\n", pc);
171 return pc;
172 }
173 else
174 {
175 unsigned int pc;
176
177 collect_register_by_name (regcache, "pc", &pc);
178 if (debug_threads)
179 debug_printf ("stop pc is %04x\n", pc);
180 return pc;
181 }
176eb98c
MS
182}
183
421530db
PL
184/* Implementation of linux_target_ops method "set_pc". */
185
176eb98c
MS
186static void
187aarch64_set_pc (struct regcache *regcache, CORE_ADDR pc)
188{
8a7e4587
YQ
189 if (register_size (regcache->tdesc, 0) == 8)
190 {
191 unsigned long newpc = pc;
192 supply_register_by_name (regcache, "pc", &newpc);
193 }
194 else
195 {
196 unsigned int newpc = pc;
197 supply_register_by_name (regcache, "pc", &newpc);
198 }
176eb98c
MS
199}
200
176eb98c
MS
201#define aarch64_breakpoint_len 4
202
37d66942
PL
203/* AArch64 BRK software debug mode instruction.
204 This instruction needs to match gdb/aarch64-tdep.c
205 (aarch64_default_breakpoint). */
206static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
176eb98c 207
421530db
PL
208/* Implementation of linux_target_ops method "breakpoint_at". */
209
176eb98c
MS
210static int
211aarch64_breakpoint_at (CORE_ADDR where)
212{
37d66942 213 gdb_byte insn[aarch64_breakpoint_len];
176eb98c 214
37d66942
PL
215 (*the_target->read_memory) (where, (unsigned char *) &insn,
216 aarch64_breakpoint_len);
217 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
176eb98c
MS
218 return 1;
219
220 return 0;
221}
222
176eb98c
MS
223static void
224aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
225{
226 int i;
227
228 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
229 {
230 state->dr_addr_bp[i] = 0;
231 state->dr_ctrl_bp[i] = 0;
232 state->dr_ref_count_bp[i] = 0;
233 }
234
235 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
236 {
237 state->dr_addr_wp[i] = 0;
238 state->dr_ctrl_wp[i] = 0;
239 state->dr_ref_count_wp[i] = 0;
240 }
241}
242
176eb98c
MS
243/* Return the pointer to the debug register state structure in the
244 current process' arch-specific data area. */
245
db3cb7cb 246struct aarch64_debug_reg_state *
88e2cf7e 247aarch64_get_debug_reg_state (pid_t pid)
176eb98c 248{
88e2cf7e 249 struct process_info *proc = find_process_pid (pid);
176eb98c 250
fe978cb0 251 return &proc->priv->arch_private->debug_reg_state;
176eb98c
MS
252}
253
421530db
PL
254/* Implementation of linux_target_ops method "supports_z_point_type". */
255
4ff0d3d8
PA
256static int
257aarch64_supports_z_point_type (char z_type)
258{
259 switch (z_type)
260 {
96c97461 261 case Z_PACKET_SW_BP:
6085d6f6
YQ
262 {
263 if (!extended_protocol && is_64bit_tdesc ())
264 {
265 /* Only enable Z0 packet in non-multi-arch debugging. If
266 extended protocol is used, don't enable Z0 packet because
267 GDBserver may attach to 32-bit process. */
268 return 1;
269 }
270 else
271 {
272 /* Disable Z0 packet so that GDBserver doesn't have to handle
273 different breakpoint instructions (aarch64, arm, thumb etc)
274 in multi-arch debugging. */
275 return 0;
276 }
277 }
4ff0d3d8
PA
278 case Z_PACKET_HW_BP:
279 case Z_PACKET_WRITE_WP:
280 case Z_PACKET_READ_WP:
281 case Z_PACKET_ACCESS_WP:
282 return 1;
283 default:
4ff0d3d8
PA
284 return 0;
285 }
286}
287
421530db 288/* Implementation of linux_target_ops method "insert_point".
176eb98c 289
421530db
PL
290 It actually only records the info of the to-be-inserted bp/wp;
291 the actual insertion will happen when threads are resumed. */
176eb98c
MS
292
293static int
802e8e6d
PA
294aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
295 int len, struct raw_breakpoint *bp)
176eb98c
MS
296{
297 int ret;
4ff0d3d8 298 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
299 struct aarch64_debug_reg_state *state
300 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 301
c5e92cca 302 if (show_debug_regs)
176eb98c
MS
303 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
304 (unsigned long) addr, len);
305
802e8e6d
PA
306 /* Determine the type from the raw breakpoint type. */
307 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
308
309 if (targ_type != hw_execute)
39edd165
YQ
310 {
311 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
312 ret = aarch64_handle_watchpoint (targ_type, addr, len,
313 1 /* is_insert */, state);
314 else
315 ret = -1;
316 }
176eb98c
MS
317 else
318 ret =
c67ca4de
YQ
319 aarch64_handle_breakpoint (targ_type, addr, len, 1 /* is_insert */,
320 state);
176eb98c 321
60a191ed 322 if (show_debug_regs)
88e2cf7e
YQ
323 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
324 targ_type);
176eb98c
MS
325
326 return ret;
327}
328
421530db 329/* Implementation of linux_target_ops method "remove_point".
176eb98c 330
421530db
PL
331 It actually only records the info of the to-be-removed bp/wp,
332 the actual removal will be done when threads are resumed. */
176eb98c
MS
333
334static int
802e8e6d
PA
335aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
336 int len, struct raw_breakpoint *bp)
176eb98c
MS
337{
338 int ret;
4ff0d3d8 339 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
340 struct aarch64_debug_reg_state *state
341 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 342
c5e92cca 343 if (show_debug_regs)
176eb98c
MS
344 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
345 (unsigned long) addr, len);
346
802e8e6d
PA
347 /* Determine the type from the raw breakpoint type. */
348 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
349
350 /* Set up state pointers. */
351 if (targ_type != hw_execute)
352 ret =
c67ca4de
YQ
353 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
354 state);
176eb98c
MS
355 else
356 ret =
c67ca4de
YQ
357 aarch64_handle_breakpoint (targ_type, addr, len, 0 /* is_insert */,
358 state);
176eb98c 359
60a191ed 360 if (show_debug_regs)
88e2cf7e
YQ
361 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
362 targ_type);
176eb98c
MS
363
364 return ret;
365}
366
421530db 367/* Implementation of linux_target_ops method "stopped_data_address". */
176eb98c
MS
368
369static CORE_ADDR
370aarch64_stopped_data_address (void)
371{
372 siginfo_t siginfo;
373 int pid, i;
374 struct aarch64_debug_reg_state *state;
375
0bfdf32f 376 pid = lwpid_of (current_thread);
176eb98c
MS
377
378 /* Get the siginfo. */
379 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
380 return (CORE_ADDR) 0;
381
382 /* Need to be a hardware breakpoint/watchpoint trap. */
383 if (siginfo.si_signo != SIGTRAP
384 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
385 return (CORE_ADDR) 0;
386
387 /* Check if the address matches any watched address. */
88e2cf7e 388 state = aarch64_get_debug_reg_state (pid_of (current_thread));
176eb98c
MS
389 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
390 {
391 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
392 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
393 const CORE_ADDR addr_watch = state->dr_addr_wp[i];
394 if (state->dr_ref_count_wp[i]
395 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
396 && addr_trap >= addr_watch
397 && addr_trap < addr_watch + len)
398 return addr_trap;
399 }
400
401 return (CORE_ADDR) 0;
402}
403
421530db 404/* Implementation of linux_target_ops method "stopped_by_watchpoint". */
176eb98c
MS
405
406static int
407aarch64_stopped_by_watchpoint (void)
408{
409 if (aarch64_stopped_data_address () != 0)
410 return 1;
411 else
412 return 0;
413}
414
415/* Fetch the thread-local storage pointer for libthread_db. */
416
417ps_err_e
55fac6e0 418ps_get_thread_area (const struct ps_prochandle *ph,
176eb98c
MS
419 lwpid_t lwpid, int idx, void **base)
420{
a0cc84cd
YQ
421 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
422 is_64bit_tdesc ());
176eb98c
MS
423}
424
ade90bde
YQ
425/* Implementation of linux_target_ops method "siginfo_fixup". */
426
427static int
428aarch64_linux_siginfo_fixup (siginfo_t *native, void *inf, int direction)
429{
430 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
431 if (!is_64bit_tdesc ())
432 {
433 if (direction == 0)
434 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
435 native);
436 else
437 aarch64_siginfo_from_compat_siginfo (native,
438 (struct compat_siginfo *) inf);
439
440 return 1;
441 }
442
443 return 0;
444}
445
421530db 446/* Implementation of linux_target_ops method "linux_new_process". */
176eb98c
MS
447
448static struct arch_process_info *
449aarch64_linux_new_process (void)
450{
8d749320 451 struct arch_process_info *info = XCNEW (struct arch_process_info);
176eb98c
MS
452
453 aarch64_init_debug_reg_state (&info->debug_reg_state);
454
455 return info;
456}
457
421530db
PL
458/* Implementation of linux_target_ops method "linux_new_fork". */
459
3a8a0396
DB
460static void
461aarch64_linux_new_fork (struct process_info *parent,
462 struct process_info *child)
463{
464 /* These are allocated by linux_add_process. */
61a7418c
DB
465 gdb_assert (parent->priv != NULL
466 && parent->priv->arch_private != NULL);
467 gdb_assert (child->priv != NULL
468 && child->priv->arch_private != NULL);
3a8a0396
DB
469
470 /* Linux kernel before 2.6.33 commit
471 72f674d203cd230426437cdcf7dd6f681dad8b0d
472 will inherit hardware debug registers from parent
473 on fork/vfork/clone. Newer Linux kernels create such tasks with
474 zeroed debug registers.
475
476 GDB core assumes the child inherits the watchpoints/hw
477 breakpoints of the parent, and will remove them all from the
478 forked off process. Copy the debug registers mirrors into the
479 new process so that all breakpoints and watchpoints can be
480 removed together. The debug registers mirror will become zeroed
481 in the end before detaching the forked off process, thus making
482 this compatible with older Linux kernels too. */
483
61a7418c 484 *child->priv->arch_private = *parent->priv->arch_private;
3a8a0396
DB
485}
486
3b53ae99
YQ
487/* Return the right target description according to the ELF file of
488 current thread. */
489
490static const struct target_desc *
491aarch64_linux_read_description (void)
492{
493 unsigned int machine;
494 int is_elf64;
495 int tid;
496
497 tid = lwpid_of (current_thread);
498
499 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
500
501 if (is_elf64)
502 return tdesc_aarch64;
503 else
504 return tdesc_arm_with_neon;
505}
506
421530db
PL
507/* Implementation of linux_target_ops method "arch_setup". */
508
176eb98c
MS
509static void
510aarch64_arch_setup (void)
511{
3b53ae99 512 current_process ()->tdesc = aarch64_linux_read_description ();
176eb98c 513
af1b22f3 514 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
176eb98c
MS
515}
516
3aee8918 517static struct regset_info aarch64_regsets[] =
176eb98c
MS
518{
519 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
520 sizeof (struct user_pt_regs), GENERAL_REGS,
521 aarch64_fill_gregset, aarch64_store_gregset },
522 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
523 sizeof (struct user_fpsimd_state), FP_REGS,
524 aarch64_fill_fpregset, aarch64_store_fpregset
525 },
526 { 0, 0, 0, -1, -1, NULL, NULL }
527};
528
3aee8918
PA
529static struct regsets_info aarch64_regsets_info =
530 {
531 aarch64_regsets, /* regsets */
532 0, /* num_regsets */
533 NULL, /* disabled_regsets */
534 };
535
3b53ae99 536static struct regs_info regs_info_aarch64 =
3aee8918
PA
537 {
538 NULL, /* regset_bitmap */
c2d65f38 539 NULL, /* usrregs */
3aee8918
PA
540 &aarch64_regsets_info,
541 };
542
421530db
PL
543/* Implementation of linux_target_ops method "regs_info". */
544
3aee8918
PA
545static const struct regs_info *
546aarch64_regs_info (void)
547{
3b53ae99
YQ
548 if (is_64bit_tdesc ())
549 return &regs_info_aarch64;
550 else
551 return &regs_info_aarch32;
3aee8918
PA
552}
553
7671bf47
PL
554/* Implementation of linux_target_ops method "supports_tracepoints". */
555
556static int
557aarch64_supports_tracepoints (void)
558{
524b57e6
YQ
559 if (current_thread == NULL)
560 return 1;
561 else
562 {
563 /* We don't support tracepoints on aarch32 now. */
564 return is_64bit_tdesc ();
565 }
7671bf47
PL
566}
567
bb903df0
PL
568/* Implementation of linux_target_ops method "get_thread_area". */
569
570static int
571aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
572{
573 struct iovec iovec;
574 uint64_t reg;
575
576 iovec.iov_base = &reg;
577 iovec.iov_len = sizeof (reg);
578
579 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
580 return -1;
581
582 *addrp = reg;
583
584 return 0;
585}
586
afbe19f8
PL
587/* List of condition codes that we need. */
588
589enum aarch64_condition_codes
590{
591 EQ = 0x0,
592 NE = 0x1,
593 LO = 0x3,
594 GE = 0xa,
595 LT = 0xb,
596 GT = 0xc,
597 LE = 0xd,
bb903df0
PL
598};
599
bb903df0
PL
600/* Representation of an operand. At this time, it only supports register
601 and immediate types. */
602
603struct aarch64_operand
604{
605 /* Type of the operand. */
606 enum
607 {
608 OPERAND_IMMEDIATE,
609 OPERAND_REGISTER,
610 } type;
611 /* Value of the operand according to the type. */
612 union
613 {
614 uint32_t imm;
615 struct aarch64_register reg;
616 };
617};
618
619/* List of registers that we are currently using, we can add more here as
620 we need to use them. */
621
622/* General purpose scratch registers (64 bit). */
623static const struct aarch64_register x0 = { 0, 1 };
624static const struct aarch64_register x1 = { 1, 1 };
625static const struct aarch64_register x2 = { 2, 1 };
626static const struct aarch64_register x3 = { 3, 1 };
627static const struct aarch64_register x4 = { 4, 1 };
628
629/* General purpose scratch registers (32 bit). */
afbe19f8 630static const struct aarch64_register w0 = { 0, 0 };
bb903df0
PL
631static const struct aarch64_register w2 = { 2, 0 };
632
633/* Intra-procedure scratch registers. */
634static const struct aarch64_register ip0 = { 16, 1 };
635
636/* Special purpose registers. */
afbe19f8
PL
637static const struct aarch64_register fp = { 29, 1 };
638static const struct aarch64_register lr = { 30, 1 };
bb903df0
PL
639static const struct aarch64_register sp = { 31, 1 };
640static const struct aarch64_register xzr = { 31, 1 };
641
642/* Dynamically allocate a new register. If we know the register
643 statically, we should make it a global as above instead of using this
644 helper function. */
645
646static struct aarch64_register
647aarch64_register (unsigned num, int is64)
648{
649 return (struct aarch64_register) { num, is64 };
650}
651
652/* Helper function to create a register operand, for instructions with
653 different types of operands.
654
655 For example:
656 p += emit_mov (p, x0, register_operand (x1)); */
657
658static struct aarch64_operand
659register_operand (struct aarch64_register reg)
660{
661 struct aarch64_operand operand;
662
663 operand.type = OPERAND_REGISTER;
664 operand.reg = reg;
665
666 return operand;
667}
668
669/* Helper function to create an immediate operand, for instructions with
670 different types of operands.
671
672 For example:
673 p += emit_mov (p, x0, immediate_operand (12)); */
674
675static struct aarch64_operand
676immediate_operand (uint32_t imm)
677{
678 struct aarch64_operand operand;
679
680 operand.type = OPERAND_IMMEDIATE;
681 operand.imm = imm;
682
683 return operand;
684}
685
bb903df0
PL
686/* Helper function to create an offset memory operand.
687
688 For example:
689 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
690
691static struct aarch64_memory_operand
692offset_memory_operand (int32_t offset)
693{
694 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
695}
696
697/* Helper function to create a pre-index memory operand.
698
699 For example:
700 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
701
702static struct aarch64_memory_operand
703preindex_memory_operand (int32_t index)
704{
705 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
706}
707
afbe19f8
PL
708/* Helper function to create a post-index memory operand.
709
710 For example:
711 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
712
713static struct aarch64_memory_operand
714postindex_memory_operand (int32_t index)
715{
716 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
717}
718
bb903df0
PL
719/* System control registers. These special registers can be written and
720 read with the MRS and MSR instructions.
721
722 - NZCV: Condition flags. GDB refers to this register under the CPSR
723 name.
724 - FPSR: Floating-point status register.
725 - FPCR: Floating-point control registers.
726 - TPIDR_EL0: Software thread ID register. */
727
728enum aarch64_system_control_registers
729{
730 /* op0 op1 crn crm op2 */
731 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
732 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
733 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
734 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
735};
736
bb903df0
PL
737/* Write a BLR instruction into *BUF.
738
739 BLR rn
740
741 RN is the register to branch to. */
742
743static int
744emit_blr (uint32_t *buf, struct aarch64_register rn)
745{
e1c587c3 746 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
bb903df0
PL
747}
748
afbe19f8 749/* Write a RET instruction into *BUF.
bb903df0 750
afbe19f8 751 RET xn
bb903df0 752
afbe19f8 753 RN is the register to branch to. */
bb903df0
PL
754
755static int
afbe19f8
PL
756emit_ret (uint32_t *buf, struct aarch64_register rn)
757{
e1c587c3 758 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
afbe19f8
PL
759}
760
761static int
762emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
763 struct aarch64_register rt,
764 struct aarch64_register rt2,
765 struct aarch64_register rn,
766 struct aarch64_memory_operand operand)
bb903df0
PL
767{
768 uint32_t opc;
769 uint32_t pre_index;
770 uint32_t write_back;
771
772 if (rt.is64)
773 opc = ENCODE (2, 2, 30);
774 else
775 opc = ENCODE (0, 2, 30);
776
777 switch (operand.type)
778 {
779 case MEMORY_OPERAND_OFFSET:
780 {
781 pre_index = ENCODE (1, 1, 24);
782 write_back = ENCODE (0, 1, 23);
783 break;
784 }
afbe19f8
PL
785 case MEMORY_OPERAND_POSTINDEX:
786 {
787 pre_index = ENCODE (0, 1, 24);
788 write_back = ENCODE (1, 1, 23);
789 break;
790 }
bb903df0
PL
791 case MEMORY_OPERAND_PREINDEX:
792 {
793 pre_index = ENCODE (1, 1, 24);
794 write_back = ENCODE (1, 1, 23);
795 break;
796 }
797 default:
798 return 0;
799 }
800
e1c587c3
YQ
801 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
802 | ENCODE (operand.index >> 3, 7, 15)
803 | ENCODE (rt2.num, 5, 10)
804 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
805}
806
afbe19f8
PL
807/* Write a STP instruction into *BUF.
808
809 STP rt, rt2, [rn, #offset]
810 STP rt, rt2, [rn, #index]!
811 STP rt, rt2, [rn], #index
812
813 RT and RT2 are the registers to store.
814 RN is the base address register.
815 OFFSET is the immediate to add to the base address. It is limited to a
816 -512 .. 504 range (7 bits << 3). */
817
818static int
819emit_stp (uint32_t *buf, struct aarch64_register rt,
820 struct aarch64_register rt2, struct aarch64_register rn,
821 struct aarch64_memory_operand operand)
822{
823 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
824}
825
826/* Write a LDP instruction into *BUF.
827
828 LDP rt, rt2, [rn, #offset]
829 LDP rt, rt2, [rn, #index]!
830 LDP rt, rt2, [rn], #index
831
832 RT and RT2 are the registers to store.
833 RN is the base address register.
834 OFFSET is the immediate to add to the base address. It is limited to a
835 -512 .. 504 range (7 bits << 3). */
836
837static int
838emit_ldp (uint32_t *buf, struct aarch64_register rt,
839 struct aarch64_register rt2, struct aarch64_register rn,
840 struct aarch64_memory_operand operand)
841{
842 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
843}
844
bb903df0
PL
845/* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
846
847 LDP qt, qt2, [rn, #offset]
848
849 RT and RT2 are the Q registers to store.
850 RN is the base address register.
851 OFFSET is the immediate to add to the base address. It is limited to
852 -1024 .. 1008 range (7 bits << 4). */
853
854static int
855emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
856 struct aarch64_register rn, int32_t offset)
857{
858 uint32_t opc = ENCODE (2, 2, 30);
859 uint32_t pre_index = ENCODE (1, 1, 24);
860
e1c587c3
YQ
861 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
862 | ENCODE (offset >> 4, 7, 15)
863 | ENCODE (rt2, 5, 10)
864 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
865}
866
867/* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
868
869 STP qt, qt2, [rn, #offset]
870
871 RT and RT2 are the Q registers to store.
872 RN is the base address register.
873 OFFSET is the immediate to add to the base address. It is limited to
874 -1024 .. 1008 range (7 bits << 4). */
875
876static int
877emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
878 struct aarch64_register rn, int32_t offset)
879{
880 uint32_t opc = ENCODE (2, 2, 30);
881 uint32_t pre_index = ENCODE (1, 1, 24);
882
e1c587c3 883 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
b6542f81
YQ
884 | ENCODE (offset >> 4, 7, 15)
885 | ENCODE (rt2, 5, 10)
886 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
887}
888
afbe19f8
PL
889/* Write a LDRH instruction into *BUF.
890
891 LDRH wt, [xn, #offset]
892 LDRH wt, [xn, #index]!
893 LDRH wt, [xn], #index
894
895 RT is the register to store.
896 RN is the base address register.
897 OFFSET is the immediate to add to the base address. It is limited to
898 0 .. 32760 range (12 bits << 3). */
899
900static int
901emit_ldrh (uint32_t *buf, struct aarch64_register rt,
902 struct aarch64_register rn,
903 struct aarch64_memory_operand operand)
904{
1c2e1515 905 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
afbe19f8
PL
906}
907
908/* Write a LDRB instruction into *BUF.
909
910 LDRB wt, [xn, #offset]
911 LDRB wt, [xn, #index]!
912 LDRB wt, [xn], #index
913
914 RT is the register to store.
915 RN is the base address register.
916 OFFSET is the immediate to add to the base address. It is limited to
917 0 .. 32760 range (12 bits << 3). */
918
919static int
920emit_ldrb (uint32_t *buf, struct aarch64_register rt,
921 struct aarch64_register rn,
922 struct aarch64_memory_operand operand)
923{
1c2e1515 924 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
afbe19f8
PL
925}
926
bb903df0 927
bb903df0
PL
928
929/* Write a STR instruction into *BUF.
930
931 STR rt, [rn, #offset]
932 STR rt, [rn, #index]!
afbe19f8 933 STR rt, [rn], #index
bb903df0
PL
934
935 RT is the register to store.
936 RN is the base address register.
937 OFFSET is the immediate to add to the base address. It is limited to
938 0 .. 32760 range (12 bits << 3). */
939
940static int
941emit_str (uint32_t *buf, struct aarch64_register rt,
942 struct aarch64_register rn,
943 struct aarch64_memory_operand operand)
944{
1c2e1515 945 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
bb903df0
PL
946}
947
948/* Helper function emitting an exclusive load or store instruction. */
949
950static int
951emit_load_store_exclusive (uint32_t *buf, uint32_t size,
952 enum aarch64_opcodes opcode,
953 struct aarch64_register rs,
954 struct aarch64_register rt,
955 struct aarch64_register rt2,
956 struct aarch64_register rn)
957{
e1c587c3
YQ
958 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
959 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
960 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
961}
962
963/* Write a LAXR instruction into *BUF.
964
965 LDAXR rt, [xn]
966
967 RT is the destination register.
968 RN is the base address register. */
969
970static int
971emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
972 struct aarch64_register rn)
973{
974 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
975 xzr, rn);
976}
977
978/* Write a STXR instruction into *BUF.
979
980 STXR ws, rt, [xn]
981
982 RS is the result register, it indicates if the store succeeded or not.
983 RT is the destination register.
984 RN is the base address register. */
985
986static int
987emit_stxr (uint32_t *buf, struct aarch64_register rs,
988 struct aarch64_register rt, struct aarch64_register rn)
989{
990 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
991 xzr, rn);
992}
993
994/* Write a STLR instruction into *BUF.
995
996 STLR rt, [xn]
997
998 RT is the register to store.
999 RN is the base address register. */
1000
1001static int
1002emit_stlr (uint32_t *buf, struct aarch64_register rt,
1003 struct aarch64_register rn)
1004{
1005 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1006 xzr, rn);
1007}
1008
1009/* Helper function for data processing instructions with register sources. */
1010
1011static int
1012emit_data_processing_reg (uint32_t *buf, enum aarch64_opcodes opcode,
1013 struct aarch64_register rd,
1014 struct aarch64_register rn,
1015 struct aarch64_register rm)
1016{
1017 uint32_t size = ENCODE (rd.is64, 1, 31);
1018
e1c587c3
YQ
1019 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1020 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1021}
1022
1023/* Helper function for data processing instructions taking either a register
1024 or an immediate. */
1025
1026static int
1027emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1028 struct aarch64_register rd,
1029 struct aarch64_register rn,
1030 struct aarch64_operand operand)
1031{
1032 uint32_t size = ENCODE (rd.is64, 1, 31);
1033 /* The opcode is different for register and immediate source operands. */
1034 uint32_t operand_opcode;
1035
1036 if (operand.type == OPERAND_IMMEDIATE)
1037 {
1038 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1039 operand_opcode = ENCODE (8, 4, 25);
1040
e1c587c3
YQ
1041 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1042 | ENCODE (operand.imm, 12, 10)
1043 | ENCODE (rn.num, 5, 5)
1044 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1045 }
1046 else
1047 {
1048 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1049 operand_opcode = ENCODE (5, 4, 25);
1050
1051 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1052 rn, operand.reg);
1053 }
1054}
1055
1056/* Write an ADD instruction into *BUF.
1057
1058 ADD rd, rn, #imm
1059 ADD rd, rn, rm
1060
1061 This function handles both an immediate and register add.
1062
1063 RD is the destination register.
1064 RN is the input register.
1065 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1066 OPERAND_REGISTER. */
1067
1068static int
1069emit_add (uint32_t *buf, struct aarch64_register rd,
1070 struct aarch64_register rn, struct aarch64_operand operand)
1071{
1072 return emit_data_processing (buf, ADD, rd, rn, operand);
1073}
1074
1075/* Write a SUB instruction into *BUF.
1076
1077 SUB rd, rn, #imm
1078 SUB rd, rn, rm
1079
1080 This function handles both an immediate and register sub.
1081
1082 RD is the destination register.
1083 RN is the input register.
1084 IMM is the immediate to substract to RN. */
1085
1086static int
1087emit_sub (uint32_t *buf, struct aarch64_register rd,
1088 struct aarch64_register rn, struct aarch64_operand operand)
1089{
1090 return emit_data_processing (buf, SUB, rd, rn, operand);
1091}
1092
1093/* Write a MOV instruction into *BUF.
1094
1095 MOV rd, #imm
1096 MOV rd, rm
1097
1098 This function handles both a wide immediate move and a register move,
1099 with the condition that the source register is not xzr. xzr and the
1100 stack pointer share the same encoding and this function only supports
1101 the stack pointer.
1102
1103 RD is the destination register.
1104 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1105 OPERAND_REGISTER. */
1106
1107static int
1108emit_mov (uint32_t *buf, struct aarch64_register rd,
1109 struct aarch64_operand operand)
1110{
1111 if (operand.type == OPERAND_IMMEDIATE)
1112 {
1113 uint32_t size = ENCODE (rd.is64, 1, 31);
1114 /* Do not shift the immediate. */
1115 uint32_t shift = ENCODE (0, 2, 21);
1116
e1c587c3
YQ
1117 return aarch64_emit_insn (buf, MOV | size | shift
1118 | ENCODE (operand.imm, 16, 5)
1119 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1120 }
1121 else
1122 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1123}
1124
1125/* Write a MOVK instruction into *BUF.
1126
1127 MOVK rd, #imm, lsl #shift
1128
1129 RD is the destination register.
1130 IMM is the immediate.
1131 SHIFT is the logical shift left to apply to IMM. */
1132
1133static int
7781c06f
YQ
1134emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1135 unsigned shift)
bb903df0
PL
1136{
1137 uint32_t size = ENCODE (rd.is64, 1, 31);
1138
e1c587c3
YQ
1139 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1140 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1141}
1142
1143/* Write instructions into *BUF in order to move ADDR into a register.
1144 ADDR can be a 64-bit value.
1145
1146 This function will emit a series of MOV and MOVK instructions, such as:
1147
1148 MOV xd, #(addr)
1149 MOVK xd, #(addr >> 16), lsl #16
1150 MOVK xd, #(addr >> 32), lsl #32
1151 MOVK xd, #(addr >> 48), lsl #48 */
1152
1153static int
1154emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1155{
1156 uint32_t *p = buf;
1157
1158 /* The MOV (wide immediate) instruction clears to top bits of the
1159 register. */
1160 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1161
1162 if ((addr >> 16) != 0)
1163 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1164 else
1165 return p - buf;
1166
1167 if ((addr >> 32) != 0)
1168 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1169 else
1170 return p - buf;
1171
1172 if ((addr >> 48) != 0)
1173 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1174
1175 return p - buf;
1176}
1177
afbe19f8
PL
1178/* Write a SUBS instruction into *BUF.
1179
1180 SUBS rd, rn, rm
1181
1182 This instruction update the condition flags.
1183
1184 RD is the destination register.
1185 RN and RM are the source registers. */
1186
1187static int
1188emit_subs (uint32_t *buf, struct aarch64_register rd,
1189 struct aarch64_register rn, struct aarch64_operand operand)
1190{
1191 return emit_data_processing (buf, SUBS, rd, rn, operand);
1192}
1193
1194/* Write a CMP instruction into *BUF.
1195
1196 CMP rn, rm
1197
1198 This instruction is an alias of SUBS xzr, rn, rm.
1199
1200 RN and RM are the registers to compare. */
1201
1202static int
1203emit_cmp (uint32_t *buf, struct aarch64_register rn,
1204 struct aarch64_operand operand)
1205{
1206 return emit_subs (buf, xzr, rn, operand);
1207}
1208
1209/* Write a AND instruction into *BUF.
1210
1211 AND rd, rn, rm
1212
1213 RD is the destination register.
1214 RN and RM are the source registers. */
1215
1216static int
1217emit_and (uint32_t *buf, struct aarch64_register rd,
1218 struct aarch64_register rn, struct aarch64_register rm)
1219{
1220 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1221}
1222
1223/* Write a ORR instruction into *BUF.
1224
1225 ORR rd, rn, rm
1226
1227 RD is the destination register.
1228 RN and RM are the source registers. */
1229
1230static int
1231emit_orr (uint32_t *buf, struct aarch64_register rd,
1232 struct aarch64_register rn, struct aarch64_register rm)
1233{
1234 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1235}
1236
1237/* Write a ORN instruction into *BUF.
1238
1239 ORN rd, rn, rm
1240
1241 RD is the destination register.
1242 RN and RM are the source registers. */
1243
1244static int
1245emit_orn (uint32_t *buf, struct aarch64_register rd,
1246 struct aarch64_register rn, struct aarch64_register rm)
1247{
1248 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1249}
1250
1251/* Write a EOR instruction into *BUF.
1252
1253 EOR rd, rn, rm
1254
1255 RD is the destination register.
1256 RN and RM are the source registers. */
1257
1258static int
1259emit_eor (uint32_t *buf, struct aarch64_register rd,
1260 struct aarch64_register rn, struct aarch64_register rm)
1261{
1262 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1263}
1264
1265/* Write a MVN instruction into *BUF.
1266
1267 MVN rd, rm
1268
1269 This is an alias for ORN rd, xzr, rm.
1270
1271 RD is the destination register.
1272 RM is the source register. */
1273
1274static int
1275emit_mvn (uint32_t *buf, struct aarch64_register rd,
1276 struct aarch64_register rm)
1277{
1278 return emit_orn (buf, rd, xzr, rm);
1279}
1280
1281/* Write a LSLV instruction into *BUF.
1282
1283 LSLV rd, rn, rm
1284
1285 RD is the destination register.
1286 RN and RM are the source registers. */
1287
1288static int
1289emit_lslv (uint32_t *buf, struct aarch64_register rd,
1290 struct aarch64_register rn, struct aarch64_register rm)
1291{
1292 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1293}
1294
1295/* Write a LSRV instruction into *BUF.
1296
1297 LSRV rd, rn, rm
1298
1299 RD is the destination register.
1300 RN and RM are the source registers. */
1301
1302static int
1303emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1304 struct aarch64_register rn, struct aarch64_register rm)
1305{
1306 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1307}
1308
1309/* Write a ASRV instruction into *BUF.
1310
1311 ASRV rd, rn, rm
1312
1313 RD is the destination register.
1314 RN and RM are the source registers. */
1315
1316static int
1317emit_asrv (uint32_t *buf, struct aarch64_register rd,
1318 struct aarch64_register rn, struct aarch64_register rm)
1319{
1320 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1321}
1322
1323/* Write a MUL instruction into *BUF.
1324
1325 MUL rd, rn, rm
1326
1327 RD is the destination register.
1328 RN and RM are the source registers. */
1329
1330static int
1331emit_mul (uint32_t *buf, struct aarch64_register rd,
1332 struct aarch64_register rn, struct aarch64_register rm)
1333{
1334 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1335}
1336
bb903df0
PL
1337/* Write a MRS instruction into *BUF. The register size is 64-bit.
1338
1339 MRS xt, system_reg
1340
1341 RT is the destination register.
1342 SYSTEM_REG is special purpose register to read. */
1343
1344static int
1345emit_mrs (uint32_t *buf, struct aarch64_register rt,
1346 enum aarch64_system_control_registers system_reg)
1347{
e1c587c3
YQ
1348 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1349 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1350}
1351
1352/* Write a MSR instruction into *BUF. The register size is 64-bit.
1353
1354 MSR system_reg, xt
1355
1356 SYSTEM_REG is special purpose register to write.
1357 RT is the input register. */
1358
1359static int
1360emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1361 struct aarch64_register rt)
1362{
e1c587c3
YQ
1363 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1364 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1365}
1366
1367/* Write a SEVL instruction into *BUF.
1368
1369 This is a hint instruction telling the hardware to trigger an event. */
1370
1371static int
1372emit_sevl (uint32_t *buf)
1373{
e1c587c3 1374 return aarch64_emit_insn (buf, SEVL);
bb903df0
PL
1375}
1376
1377/* Write a WFE instruction into *BUF.
1378
1379 This is a hint instruction telling the hardware to wait for an event. */
1380
1381static int
1382emit_wfe (uint32_t *buf)
1383{
e1c587c3 1384 return aarch64_emit_insn (buf, WFE);
bb903df0
PL
1385}
1386
afbe19f8
PL
1387/* Write a SBFM instruction into *BUF.
1388
1389 SBFM rd, rn, #immr, #imms
1390
1391 This instruction moves the bits from #immr to #imms into the
1392 destination, sign extending the result.
1393
1394 RD is the destination register.
1395 RN is the source register.
1396 IMMR is the bit number to start at (least significant bit).
1397 IMMS is the bit number to stop at (most significant bit). */
1398
1399static int
1400emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1401 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1402{
1403 uint32_t size = ENCODE (rd.is64, 1, 31);
1404 uint32_t n = ENCODE (rd.is64, 1, 22);
1405
e1c587c3
YQ
1406 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1407 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1408 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1409}
1410
1411/* Write a SBFX instruction into *BUF.
1412
1413 SBFX rd, rn, #lsb, #width
1414
1415 This instruction moves #width bits from #lsb into the destination, sign
1416 extending the result. This is an alias for:
1417
1418 SBFM rd, rn, #lsb, #(lsb + width - 1)
1419
1420 RD is the destination register.
1421 RN is the source register.
1422 LSB is the bit number to start at (least significant bit).
1423 WIDTH is the number of bits to move. */
1424
1425static int
1426emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1427 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1428{
1429 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1430}
1431
1432/* Write a UBFM instruction into *BUF.
1433
1434 UBFM rd, rn, #immr, #imms
1435
1436 This instruction moves the bits from #immr to #imms into the
1437 destination, extending the result with zeros.
1438
1439 RD is the destination register.
1440 RN is the source register.
1441 IMMR is the bit number to start at (least significant bit).
1442 IMMS is the bit number to stop at (most significant bit). */
1443
1444static int
1445emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1446 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1447{
1448 uint32_t size = ENCODE (rd.is64, 1, 31);
1449 uint32_t n = ENCODE (rd.is64, 1, 22);
1450
e1c587c3
YQ
1451 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1452 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1453 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1454}
1455
1456/* Write a UBFX instruction into *BUF.
1457
1458 UBFX rd, rn, #lsb, #width
1459
1460 This instruction moves #width bits from #lsb into the destination,
1461 extending the result with zeros. This is an alias for:
1462
1463 UBFM rd, rn, #lsb, #(lsb + width - 1)
1464
1465 RD is the destination register.
1466 RN is the source register.
1467 LSB is the bit number to start at (least significant bit).
1468 WIDTH is the number of bits to move. */
1469
1470static int
1471emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1472 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1473{
1474 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1475}
1476
1477/* Write a CSINC instruction into *BUF.
1478
1479 CSINC rd, rn, rm, cond
1480
1481 This instruction conditionally increments rn or rm and places the result
1482 in rd. rn is chosen is the condition is true.
1483
1484 RD is the destination register.
1485 RN and RM are the source registers.
1486 COND is the encoded condition. */
1487
1488static int
1489emit_csinc (uint32_t *buf, struct aarch64_register rd,
1490 struct aarch64_register rn, struct aarch64_register rm,
1491 unsigned cond)
1492{
1493 uint32_t size = ENCODE (rd.is64, 1, 31);
1494
e1c587c3
YQ
1495 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1496 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1497 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1498}
1499
1500/* Write a CSET instruction into *BUF.
1501
1502 CSET rd, cond
1503
1504 This instruction conditionally write 1 or 0 in the destination register.
1505 1 is written if the condition is true. This is an alias for:
1506
1507 CSINC rd, xzr, xzr, !cond
1508
1509 Note that the condition needs to be inverted.
1510
1511 RD is the destination register.
1512 RN and RM are the source registers.
1513 COND is the encoded condition. */
1514
1515static int
1516emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1517{
1518 /* The least significant bit of the condition needs toggling in order to
1519 invert it. */
1520 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1521}
1522
bb903df0
PL
1523/* Write LEN instructions from BUF into the inferior memory at *TO.
1524
1525 Note instructions are always little endian on AArch64, unlike data. */
1526
1527static void
1528append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1529{
1530 size_t byte_len = len * sizeof (uint32_t);
1531#if (__BYTE_ORDER == __BIG_ENDIAN)
1532 uint32_t *le_buf = xmalloc (byte_len);
1533 size_t i;
1534
1535 for (i = 0; i < len; i++)
1536 le_buf[i] = htole32 (buf[i]);
1537
1538 write_inferior_memory (*to, (const unsigned char *) le_buf, byte_len);
1539
1540 xfree (le_buf);
1541#else
1542 write_inferior_memory (*to, (const unsigned char *) buf, byte_len);
1543#endif
1544
1545 *to += byte_len;
1546}
1547
0badd99f
YQ
1548/* Sub-class of struct aarch64_insn_data, store information of
1549 instruction relocation for fast tracepoint. Visitor can
1550 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1551 the relocated instructions in buffer pointed by INSN_PTR. */
bb903df0 1552
0badd99f
YQ
1553struct aarch64_insn_relocation_data
1554{
1555 struct aarch64_insn_data base;
1556
1557 /* The new address the instruction is relocated to. */
1558 CORE_ADDR new_addr;
1559 /* Pointer to the buffer of relocated instruction(s). */
1560 uint32_t *insn_ptr;
1561};
1562
1563/* Implementation of aarch64_insn_visitor method "b". */
1564
1565static void
1566aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1567 struct aarch64_insn_data *data)
1568{
1569 struct aarch64_insn_relocation_data *insn_reloc
1570 = (struct aarch64_insn_relocation_data *) data;
1571 int32_t new_offset
1572 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1573
1574 if (can_encode_int32 (new_offset, 28))
1575 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1576}
1577
1578/* Implementation of aarch64_insn_visitor method "b_cond". */
1579
1580static void
1581aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1582 struct aarch64_insn_data *data)
1583{
1584 struct aarch64_insn_relocation_data *insn_reloc
1585 = (struct aarch64_insn_relocation_data *) data;
1586 int32_t new_offset
1587 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1588
1589 if (can_encode_int32 (new_offset, 21))
1590 {
1591 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1592 new_offset);
bb903df0 1593 }
0badd99f 1594 else if (can_encode_int32 (new_offset, 28))
bb903df0 1595 {
0badd99f
YQ
1596 /* The offset is out of range for a conditional branch
1597 instruction but not for a unconditional branch. We can use
1598 the following instructions instead:
bb903df0 1599
0badd99f
YQ
1600 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1601 B NOT_TAKEN ; Else jump over TAKEN and continue.
1602 TAKEN:
1603 B #(offset - 8)
1604 NOT_TAKEN:
1605
1606 */
1607
1608 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1609 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1610 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
bb903df0 1611 }
0badd99f 1612}
bb903df0 1613
0badd99f
YQ
1614/* Implementation of aarch64_insn_visitor method "cb". */
1615
1616static void
1617aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1618 const unsigned rn, int is64,
1619 struct aarch64_insn_data *data)
1620{
1621 struct aarch64_insn_relocation_data *insn_reloc
1622 = (struct aarch64_insn_relocation_data *) data;
1623 int32_t new_offset
1624 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1625
1626 if (can_encode_int32 (new_offset, 21))
1627 {
1628 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1629 aarch64_register (rn, is64), new_offset);
bb903df0 1630 }
0badd99f 1631 else if (can_encode_int32 (new_offset, 28))
bb903df0 1632 {
0badd99f
YQ
1633 /* The offset is out of range for a compare and branch
1634 instruction but not for a unconditional branch. We can use
1635 the following instructions instead:
1636
1637 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1638 B NOT_TAKEN ; Else jump over TAKEN and continue.
1639 TAKEN:
1640 B #(offset - 8)
1641 NOT_TAKEN:
1642
1643 */
1644 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1645 aarch64_register (rn, is64), 8);
1646 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1647 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1648 }
1649}
bb903df0 1650
0badd99f 1651/* Implementation of aarch64_insn_visitor method "tb". */
bb903df0 1652
0badd99f
YQ
1653static void
1654aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1655 const unsigned rt, unsigned bit,
1656 struct aarch64_insn_data *data)
1657{
1658 struct aarch64_insn_relocation_data *insn_reloc
1659 = (struct aarch64_insn_relocation_data *) data;
1660 int32_t new_offset
1661 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1662
1663 if (can_encode_int32 (new_offset, 16))
1664 {
1665 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1666 aarch64_register (rt, 1), new_offset);
bb903df0 1667 }
0badd99f 1668 else if (can_encode_int32 (new_offset, 28))
bb903df0 1669 {
0badd99f
YQ
1670 /* The offset is out of range for a test bit and branch
1671 instruction but not for a unconditional branch. We can use
1672 the following instructions instead:
1673
1674 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1675 B NOT_TAKEN ; Else jump over TAKEN and continue.
1676 TAKEN:
1677 B #(offset - 8)
1678 NOT_TAKEN:
1679
1680 */
1681 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1682 aarch64_register (rt, 1), 8);
1683 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1684 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1685 new_offset - 8);
1686 }
1687}
bb903df0 1688
0badd99f 1689/* Implementation of aarch64_insn_visitor method "adr". */
bb903df0 1690
0badd99f
YQ
1691static void
1692aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1693 const int is_adrp,
1694 struct aarch64_insn_data *data)
1695{
1696 struct aarch64_insn_relocation_data *insn_reloc
1697 = (struct aarch64_insn_relocation_data *) data;
1698 /* We know exactly the address the ADR{P,} instruction will compute.
1699 We can just write it to the destination register. */
1700 CORE_ADDR address = data->insn_addr + offset;
bb903df0 1701
0badd99f
YQ
1702 if (is_adrp)
1703 {
1704 /* Clear the lower 12 bits of the offset to get the 4K page. */
1705 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1706 aarch64_register (rd, 1),
1707 address & ~0xfff);
1708 }
1709 else
1710 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1711 aarch64_register (rd, 1), address);
1712}
bb903df0 1713
0badd99f 1714/* Implementation of aarch64_insn_visitor method "ldr_literal". */
bb903df0 1715
0badd99f
YQ
1716static void
1717aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1718 const unsigned rt, const int is64,
1719 struct aarch64_insn_data *data)
1720{
1721 struct aarch64_insn_relocation_data *insn_reloc
1722 = (struct aarch64_insn_relocation_data *) data;
1723 CORE_ADDR address = data->insn_addr + offset;
1724
1725 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1726 aarch64_register (rt, 1), address);
1727
1728 /* We know exactly what address to load from, and what register we
1729 can use:
1730
1731 MOV xd, #(oldloc + offset)
1732 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1733 ...
1734
1735 LDR xd, [xd] ; or LDRSW xd, [xd]
1736
1737 */
1738
1739 if (is_sw)
1740 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1741 aarch64_register (rt, 1),
1742 aarch64_register (rt, 1),
1743 offset_memory_operand (0));
bb903df0 1744 else
0badd99f
YQ
1745 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1746 aarch64_register (rt, is64),
1747 aarch64_register (rt, 1),
1748 offset_memory_operand (0));
1749}
1750
1751/* Implementation of aarch64_insn_visitor method "others". */
1752
1753static void
1754aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1755 struct aarch64_insn_data *data)
1756{
1757 struct aarch64_insn_relocation_data *insn_reloc
1758 = (struct aarch64_insn_relocation_data *) data;
bb903df0 1759
0badd99f
YQ
1760 /* The instruction is not PC relative. Just re-emit it at the new
1761 location. */
e1c587c3 1762 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
0badd99f
YQ
1763}
1764
1765static const struct aarch64_insn_visitor visitor =
1766{
1767 aarch64_ftrace_insn_reloc_b,
1768 aarch64_ftrace_insn_reloc_b_cond,
1769 aarch64_ftrace_insn_reloc_cb,
1770 aarch64_ftrace_insn_reloc_tb,
1771 aarch64_ftrace_insn_reloc_adr,
1772 aarch64_ftrace_insn_reloc_ldr_literal,
1773 aarch64_ftrace_insn_reloc_others,
1774};
1775
bb903df0
PL
1776/* Implementation of linux_target_ops method
1777 "install_fast_tracepoint_jump_pad". */
1778
1779static int
1780aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1781 CORE_ADDR tpaddr,
1782 CORE_ADDR collector,
1783 CORE_ADDR lockaddr,
1784 ULONGEST orig_size,
1785 CORE_ADDR *jump_entry,
1786 CORE_ADDR *trampoline,
1787 ULONGEST *trampoline_size,
1788 unsigned char *jjump_pad_insn,
1789 ULONGEST *jjump_pad_insn_size,
1790 CORE_ADDR *adjusted_insn_addr,
1791 CORE_ADDR *adjusted_insn_addr_end,
1792 char *err)
1793{
1794 uint32_t buf[256];
1795 uint32_t *p = buf;
1796 int32_t offset;
1797 int i;
70b439f0 1798 uint32_t insn;
bb903df0 1799 CORE_ADDR buildaddr = *jump_entry;
0badd99f 1800 struct aarch64_insn_relocation_data insn_data;
bb903df0
PL
1801
1802 /* We need to save the current state on the stack both to restore it
1803 later and to collect register values when the tracepoint is hit.
1804
1805 The saved registers are pushed in a layout that needs to be in sync
1806 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1807 the supply_fast_tracepoint_registers function will fill in the
1808 register cache from a pointer to saved registers on the stack we build
1809 here.
1810
1811 For simplicity, we set the size of each cell on the stack to 16 bytes.
1812 This way one cell can hold any register type, from system registers
1813 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1814 has to be 16 bytes aligned anyway.
1815
1816 Note that the CPSR register does not exist on AArch64. Instead we
1817 can access system bits describing the process state with the
1818 MRS/MSR instructions, namely the condition flags. We save them as
1819 if they are part of a CPSR register because that's how GDB
1820 interprets these system bits. At the moment, only the condition
1821 flags are saved in CPSR (NZCV).
1822
1823 Stack layout, each cell is 16 bytes (descending):
1824
1825 High *-------- SIMD&FP registers from 31 down to 0. --------*
1826 | q31 |
1827 . .
1828 . . 32 cells
1829 . .
1830 | q0 |
1831 *---- General purpose registers from 30 down to 0. ----*
1832 | x30 |
1833 . .
1834 . . 31 cells
1835 . .
1836 | x0 |
1837 *------------- Special purpose registers. -------------*
1838 | SP |
1839 | PC |
1840 | CPSR (NZCV) | 5 cells
1841 | FPSR |
1842 | FPCR | <- SP + 16
1843 *------------- collecting_t object --------------------*
1844 | TPIDR_EL0 | struct tracepoint * |
1845 Low *------------------------------------------------------*
1846
1847 After this stack is set up, we issue a call to the collector, passing
1848 it the saved registers at (SP + 16). */
1849
1850 /* Push SIMD&FP registers on the stack:
1851
1852 SUB sp, sp, #(32 * 16)
1853
1854 STP q30, q31, [sp, #(30 * 16)]
1855 ...
1856 STP q0, q1, [sp]
1857
1858 */
1859 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
1860 for (i = 30; i >= 0; i -= 2)
1861 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
1862
1863 /* Push general puspose registers on the stack. Note that we do not need
1864 to push x31 as it represents the xzr register and not the stack
1865 pointer in a STR instruction.
1866
1867 SUB sp, sp, #(31 * 16)
1868
1869 STR x30, [sp, #(30 * 16)]
1870 ...
1871 STR x0, [sp]
1872
1873 */
1874 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
1875 for (i = 30; i >= 0; i -= 1)
1876 p += emit_str (p, aarch64_register (i, 1), sp,
1877 offset_memory_operand (i * 16));
1878
1879 /* Make space for 5 more cells.
1880
1881 SUB sp, sp, #(5 * 16)
1882
1883 */
1884 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
1885
1886
1887 /* Save SP:
1888
1889 ADD x4, sp, #((32 + 31 + 5) * 16)
1890 STR x4, [sp, #(4 * 16)]
1891
1892 */
1893 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
1894 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
1895
1896 /* Save PC (tracepoint address):
1897
1898 MOV x3, #(tpaddr)
1899 ...
1900
1901 STR x3, [sp, #(3 * 16)]
1902
1903 */
1904
1905 p += emit_mov_addr (p, x3, tpaddr);
1906 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
1907
1908 /* Save CPSR (NZCV), FPSR and FPCR:
1909
1910 MRS x2, nzcv
1911 MRS x1, fpsr
1912 MRS x0, fpcr
1913
1914 STR x2, [sp, #(2 * 16)]
1915 STR x1, [sp, #(1 * 16)]
1916 STR x0, [sp, #(0 * 16)]
1917
1918 */
1919 p += emit_mrs (p, x2, NZCV);
1920 p += emit_mrs (p, x1, FPSR);
1921 p += emit_mrs (p, x0, FPCR);
1922 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
1923 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
1924 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
1925
1926 /* Push the collecting_t object. It consist of the address of the
1927 tracepoint and an ID for the current thread. We get the latter by
1928 reading the tpidr_el0 system register. It corresponds to the
1929 NT_ARM_TLS register accessible with ptrace.
1930
1931 MOV x0, #(tpoint)
1932 ...
1933
1934 MRS x1, tpidr_el0
1935
1936 STP x0, x1, [sp, #-16]!
1937
1938 */
1939
1940 p += emit_mov_addr (p, x0, tpoint);
1941 p += emit_mrs (p, x1, TPIDR_EL0);
1942 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
1943
1944 /* Spin-lock:
1945
1946 The shared memory for the lock is at lockaddr. It will hold zero
1947 if no-one is holding the lock, otherwise it contains the address of
1948 the collecting_t object on the stack of the thread which acquired it.
1949
1950 At this stage, the stack pointer points to this thread's collecting_t
1951 object.
1952
1953 We use the following registers:
1954 - x0: Address of the lock.
1955 - x1: Pointer to collecting_t object.
1956 - x2: Scratch register.
1957
1958 MOV x0, #(lockaddr)
1959 ...
1960 MOV x1, sp
1961
1962 ; Trigger an event local to this core. So the following WFE
1963 ; instruction is ignored.
1964 SEVL
1965 again:
1966 ; Wait for an event. The event is triggered by either the SEVL
1967 ; or STLR instructions (store release).
1968 WFE
1969
1970 ; Atomically read at lockaddr. This marks the memory location as
1971 ; exclusive. This instruction also has memory constraints which
1972 ; make sure all previous data reads and writes are done before
1973 ; executing it.
1974 LDAXR x2, [x0]
1975
1976 ; Try again if another thread holds the lock.
1977 CBNZ x2, again
1978
1979 ; We can lock it! Write the address of the collecting_t object.
1980 ; This instruction will fail if the memory location is not marked
1981 ; as exclusive anymore. If it succeeds, it will remove the
1982 ; exclusive mark on the memory location. This way, if another
1983 ; thread executes this instruction before us, we will fail and try
1984 ; all over again.
1985 STXR w2, x1, [x0]
1986 CBNZ w2, again
1987
1988 */
1989
1990 p += emit_mov_addr (p, x0, lockaddr);
1991 p += emit_mov (p, x1, register_operand (sp));
1992
1993 p += emit_sevl (p);
1994 p += emit_wfe (p);
1995 p += emit_ldaxr (p, x2, x0);
1996 p += emit_cb (p, 1, w2, -2 * 4);
1997 p += emit_stxr (p, w2, x1, x0);
1998 p += emit_cb (p, 1, x2, -4 * 4);
1999
2000 /* Call collector (struct tracepoint *, unsigned char *):
2001
2002 MOV x0, #(tpoint)
2003 ...
2004
2005 ; Saved registers start after the collecting_t object.
2006 ADD x1, sp, #16
2007
2008 ; We use an intra-procedure-call scratch register.
2009 MOV ip0, #(collector)
2010 ...
2011
2012 ; And call back to C!
2013 BLR ip0
2014
2015 */
2016
2017 p += emit_mov_addr (p, x0, tpoint);
2018 p += emit_add (p, x1, sp, immediate_operand (16));
2019
2020 p += emit_mov_addr (p, ip0, collector);
2021 p += emit_blr (p, ip0);
2022
2023 /* Release the lock.
2024
2025 MOV x0, #(lockaddr)
2026 ...
2027
2028 ; This instruction is a normal store with memory ordering
2029 ; constraints. Thanks to this we do not have to put a data
2030 ; barrier instruction to make sure all data read and writes are done
2031 ; before this instruction is executed. Furthermore, this instrucion
2032 ; will trigger an event, letting other threads know they can grab
2033 ; the lock.
2034 STLR xzr, [x0]
2035
2036 */
2037 p += emit_mov_addr (p, x0, lockaddr);
2038 p += emit_stlr (p, xzr, x0);
2039
2040 /* Free collecting_t object:
2041
2042 ADD sp, sp, #16
2043
2044 */
2045 p += emit_add (p, sp, sp, immediate_operand (16));
2046
2047 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2048 registers from the stack.
2049
2050 LDR x2, [sp, #(2 * 16)]
2051 LDR x1, [sp, #(1 * 16)]
2052 LDR x0, [sp, #(0 * 16)]
2053
2054 MSR NZCV, x2
2055 MSR FPSR, x1
2056 MSR FPCR, x0
2057
2058 ADD sp, sp #(5 * 16)
2059
2060 */
2061 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2062 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2063 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2064 p += emit_msr (p, NZCV, x2);
2065 p += emit_msr (p, FPSR, x1);
2066 p += emit_msr (p, FPCR, x0);
2067
2068 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2069
2070 /* Pop general purpose registers:
2071
2072 LDR x0, [sp]
2073 ...
2074 LDR x30, [sp, #(30 * 16)]
2075
2076 ADD sp, sp, #(31 * 16)
2077
2078 */
2079 for (i = 0; i <= 30; i += 1)
2080 p += emit_ldr (p, aarch64_register (i, 1), sp,
2081 offset_memory_operand (i * 16));
2082 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2083
2084 /* Pop SIMD&FP registers:
2085
2086 LDP q0, q1, [sp]
2087 ...
2088 LDP q30, q31, [sp, #(30 * 16)]
2089
2090 ADD sp, sp, #(32 * 16)
2091
2092 */
2093 for (i = 0; i <= 30; i += 2)
2094 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2095 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2096
2097 /* Write the code into the inferior memory. */
2098 append_insns (&buildaddr, p - buf, buf);
2099
2100 /* Now emit the relocated instruction. */
2101 *adjusted_insn_addr = buildaddr;
70b439f0 2102 target_read_uint32 (tpaddr, &insn);
0badd99f
YQ
2103
2104 insn_data.base.insn_addr = tpaddr;
2105 insn_data.new_addr = buildaddr;
2106 insn_data.insn_ptr = buf;
2107
2108 aarch64_relocate_instruction (insn, &visitor,
2109 (struct aarch64_insn_data *) &insn_data);
2110
bb903df0 2111 /* We may not have been able to relocate the instruction. */
0badd99f 2112 if (insn_data.insn_ptr == buf)
bb903df0
PL
2113 {
2114 sprintf (err,
2115 "E.Could not relocate instruction from %s to %s.",
2116 core_addr_to_string_nz (tpaddr),
2117 core_addr_to_string_nz (buildaddr));
2118 return 1;
2119 }
dfaffe9d 2120 else
0badd99f 2121 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
dfaffe9d 2122 *adjusted_insn_addr_end = buildaddr;
bb903df0
PL
2123
2124 /* Go back to the start of the buffer. */
2125 p = buf;
2126
2127 /* Emit a branch back from the jump pad. */
2128 offset = (tpaddr + orig_size - buildaddr);
2129 if (!can_encode_int32 (offset, 28))
2130 {
2131 sprintf (err,
2132 "E.Jump back from jump pad too far from tracepoint "
2133 "(offset 0x%" PRIx32 " cannot be encoded in 28 bits).",
2134 offset);
2135 return 1;
2136 }
2137
2138 p += emit_b (p, 0, offset);
2139 append_insns (&buildaddr, p - buf, buf);
2140
2141 /* Give the caller a branch instruction into the jump pad. */
2142 offset = (*jump_entry - tpaddr);
2143 if (!can_encode_int32 (offset, 28))
2144 {
2145 sprintf (err,
2146 "E.Jump pad too far from tracepoint "
2147 "(offset 0x%" PRIx32 " cannot be encoded in 28 bits).",
2148 offset);
2149 return 1;
2150 }
2151
2152 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2153 *jjump_pad_insn_size = 4;
2154
2155 /* Return the end address of our pad. */
2156 *jump_entry = buildaddr;
2157
2158 return 0;
2159}
2160
afbe19f8
PL
2161/* Helper function writing LEN instructions from START into
2162 current_insn_ptr. */
2163
2164static void
2165emit_ops_insns (const uint32_t *start, int len)
2166{
2167 CORE_ADDR buildaddr = current_insn_ptr;
2168
2169 if (debug_threads)
2170 debug_printf ("Adding %d instrucions at %s\n",
2171 len, paddress (buildaddr));
2172
2173 append_insns (&buildaddr, len, start);
2174 current_insn_ptr = buildaddr;
2175}
2176
2177/* Pop a register from the stack. */
2178
2179static int
2180emit_pop (uint32_t *buf, struct aarch64_register rt)
2181{
2182 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2183}
2184
2185/* Push a register on the stack. */
2186
2187static int
2188emit_push (uint32_t *buf, struct aarch64_register rt)
2189{
2190 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2191}
2192
2193/* Implementation of emit_ops method "emit_prologue". */
2194
2195static void
2196aarch64_emit_prologue (void)
2197{
2198 uint32_t buf[16];
2199 uint32_t *p = buf;
2200
2201 /* This function emit a prologue for the following function prototype:
2202
2203 enum eval_result_type f (unsigned char *regs,
2204 ULONGEST *value);
2205
2206 The first argument is a buffer of raw registers. The second
2207 argument is the result of
2208 evaluating the expression, which will be set to whatever is on top of
2209 the stack at the end.
2210
2211 The stack set up by the prologue is as such:
2212
2213 High *------------------------------------------------------*
2214 | LR |
2215 | FP | <- FP
2216 | x1 (ULONGEST *value) |
2217 | x0 (unsigned char *regs) |
2218 Low *------------------------------------------------------*
2219
2220 As we are implementing a stack machine, each opcode can expand the
2221 stack so we never know how far we are from the data saved by this
2222 prologue. In order to be able refer to value and regs later, we save
2223 the current stack pointer in the frame pointer. This way, it is not
2224 clobbered when calling C functions.
2225
2226 Finally, throughtout every operation, we are using register x0 as the
2227 top of the stack, and x1 as a scratch register. */
2228
2229 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2230 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2231 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2232
2233 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2234
2235
2236 emit_ops_insns (buf, p - buf);
2237}
2238
2239/* Implementation of emit_ops method "emit_epilogue". */
2240
2241static void
2242aarch64_emit_epilogue (void)
2243{
2244 uint32_t buf[16];
2245 uint32_t *p = buf;
2246
2247 /* Store the result of the expression (x0) in *value. */
2248 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2249 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2250 p += emit_str (p, x0, x1, offset_memory_operand (0));
2251
2252 /* Restore the previous state. */
2253 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2254 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2255
2256 /* Return expr_eval_no_error. */
2257 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2258 p += emit_ret (p, lr);
2259
2260 emit_ops_insns (buf, p - buf);
2261}
2262
2263/* Implementation of emit_ops method "emit_add". */
2264
2265static void
2266aarch64_emit_add (void)
2267{
2268 uint32_t buf[16];
2269 uint32_t *p = buf;
2270
2271 p += emit_pop (p, x1);
2272 p += emit_add (p, x0, x0, register_operand (x1));
2273
2274 emit_ops_insns (buf, p - buf);
2275}
2276
2277/* Implementation of emit_ops method "emit_sub". */
2278
2279static void
2280aarch64_emit_sub (void)
2281{
2282 uint32_t buf[16];
2283 uint32_t *p = buf;
2284
2285 p += emit_pop (p, x1);
2286 p += emit_sub (p, x0, x0, register_operand (x1));
2287
2288 emit_ops_insns (buf, p - buf);
2289}
2290
2291/* Implementation of emit_ops method "emit_mul". */
2292
2293static void
2294aarch64_emit_mul (void)
2295{
2296 uint32_t buf[16];
2297 uint32_t *p = buf;
2298
2299 p += emit_pop (p, x1);
2300 p += emit_mul (p, x0, x1, x0);
2301
2302 emit_ops_insns (buf, p - buf);
2303}
2304
2305/* Implementation of emit_ops method "emit_lsh". */
2306
2307static void
2308aarch64_emit_lsh (void)
2309{
2310 uint32_t buf[16];
2311 uint32_t *p = buf;
2312
2313 p += emit_pop (p, x1);
2314 p += emit_lslv (p, x0, x1, x0);
2315
2316 emit_ops_insns (buf, p - buf);
2317}
2318
2319/* Implementation of emit_ops method "emit_rsh_signed". */
2320
2321static void
2322aarch64_emit_rsh_signed (void)
2323{
2324 uint32_t buf[16];
2325 uint32_t *p = buf;
2326
2327 p += emit_pop (p, x1);
2328 p += emit_asrv (p, x0, x1, x0);
2329
2330 emit_ops_insns (buf, p - buf);
2331}
2332
2333/* Implementation of emit_ops method "emit_rsh_unsigned". */
2334
2335static void
2336aarch64_emit_rsh_unsigned (void)
2337{
2338 uint32_t buf[16];
2339 uint32_t *p = buf;
2340
2341 p += emit_pop (p, x1);
2342 p += emit_lsrv (p, x0, x1, x0);
2343
2344 emit_ops_insns (buf, p - buf);
2345}
2346
2347/* Implementation of emit_ops method "emit_ext". */
2348
2349static void
2350aarch64_emit_ext (int arg)
2351{
2352 uint32_t buf[16];
2353 uint32_t *p = buf;
2354
2355 p += emit_sbfx (p, x0, x0, 0, arg);
2356
2357 emit_ops_insns (buf, p - buf);
2358}
2359
2360/* Implementation of emit_ops method "emit_log_not". */
2361
2362static void
2363aarch64_emit_log_not (void)
2364{
2365 uint32_t buf[16];
2366 uint32_t *p = buf;
2367
2368 /* If the top of the stack is 0, replace it with 1. Else replace it with
2369 0. */
2370
2371 p += emit_cmp (p, x0, immediate_operand (0));
2372 p += emit_cset (p, x0, EQ);
2373
2374 emit_ops_insns (buf, p - buf);
2375}
2376
2377/* Implementation of emit_ops method "emit_bit_and". */
2378
2379static void
2380aarch64_emit_bit_and (void)
2381{
2382 uint32_t buf[16];
2383 uint32_t *p = buf;
2384
2385 p += emit_pop (p, x1);
2386 p += emit_and (p, x0, x0, x1);
2387
2388 emit_ops_insns (buf, p - buf);
2389}
2390
2391/* Implementation of emit_ops method "emit_bit_or". */
2392
2393static void
2394aarch64_emit_bit_or (void)
2395{
2396 uint32_t buf[16];
2397 uint32_t *p = buf;
2398
2399 p += emit_pop (p, x1);
2400 p += emit_orr (p, x0, x0, x1);
2401
2402 emit_ops_insns (buf, p - buf);
2403}
2404
2405/* Implementation of emit_ops method "emit_bit_xor". */
2406
2407static void
2408aarch64_emit_bit_xor (void)
2409{
2410 uint32_t buf[16];
2411 uint32_t *p = buf;
2412
2413 p += emit_pop (p, x1);
2414 p += emit_eor (p, x0, x0, x1);
2415
2416 emit_ops_insns (buf, p - buf);
2417}
2418
2419/* Implementation of emit_ops method "emit_bit_not". */
2420
2421static void
2422aarch64_emit_bit_not (void)
2423{
2424 uint32_t buf[16];
2425 uint32_t *p = buf;
2426
2427 p += emit_mvn (p, x0, x0);
2428
2429 emit_ops_insns (buf, p - buf);
2430}
2431
2432/* Implementation of emit_ops method "emit_equal". */
2433
2434static void
2435aarch64_emit_equal (void)
2436{
2437 uint32_t buf[16];
2438 uint32_t *p = buf;
2439
2440 p += emit_pop (p, x1);
2441 p += emit_cmp (p, x0, register_operand (x1));
2442 p += emit_cset (p, x0, EQ);
2443
2444 emit_ops_insns (buf, p - buf);
2445}
2446
2447/* Implementation of emit_ops method "emit_less_signed". */
2448
2449static void
2450aarch64_emit_less_signed (void)
2451{
2452 uint32_t buf[16];
2453 uint32_t *p = buf;
2454
2455 p += emit_pop (p, x1);
2456 p += emit_cmp (p, x1, register_operand (x0));
2457 p += emit_cset (p, x0, LT);
2458
2459 emit_ops_insns (buf, p - buf);
2460}
2461
2462/* Implementation of emit_ops method "emit_less_unsigned". */
2463
2464static void
2465aarch64_emit_less_unsigned (void)
2466{
2467 uint32_t buf[16];
2468 uint32_t *p = buf;
2469
2470 p += emit_pop (p, x1);
2471 p += emit_cmp (p, x1, register_operand (x0));
2472 p += emit_cset (p, x0, LO);
2473
2474 emit_ops_insns (buf, p - buf);
2475}
2476
2477/* Implementation of emit_ops method "emit_ref". */
2478
2479static void
2480aarch64_emit_ref (int size)
2481{
2482 uint32_t buf[16];
2483 uint32_t *p = buf;
2484
2485 switch (size)
2486 {
2487 case 1:
2488 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2489 break;
2490 case 2:
2491 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2492 break;
2493 case 4:
2494 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2495 break;
2496 case 8:
2497 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2498 break;
2499 default:
2500 /* Unknown size, bail on compilation. */
2501 emit_error = 1;
2502 break;
2503 }
2504
2505 emit_ops_insns (buf, p - buf);
2506}
2507
2508/* Implementation of emit_ops method "emit_if_goto". */
2509
2510static void
2511aarch64_emit_if_goto (int *offset_p, int *size_p)
2512{
2513 uint32_t buf[16];
2514 uint32_t *p = buf;
2515
2516 /* The Z flag is set or cleared here. */
2517 p += emit_cmp (p, x0, immediate_operand (0));
2518 /* This instruction must not change the Z flag. */
2519 p += emit_pop (p, x0);
2520 /* Branch over the next instruction if x0 == 0. */
2521 p += emit_bcond (p, EQ, 8);
2522
2523 /* The NOP instruction will be patched with an unconditional branch. */
2524 if (offset_p)
2525 *offset_p = (p - buf) * 4;
2526 if (size_p)
2527 *size_p = 4;
2528 p += emit_nop (p);
2529
2530 emit_ops_insns (buf, p - buf);
2531}
2532
2533/* Implementation of emit_ops method "emit_goto". */
2534
2535static void
2536aarch64_emit_goto (int *offset_p, int *size_p)
2537{
2538 uint32_t buf[16];
2539 uint32_t *p = buf;
2540
2541 /* The NOP instruction will be patched with an unconditional branch. */
2542 if (offset_p)
2543 *offset_p = 0;
2544 if (size_p)
2545 *size_p = 4;
2546 p += emit_nop (p);
2547
2548 emit_ops_insns (buf, p - buf);
2549}
2550
2551/* Implementation of emit_ops method "write_goto_address". */
2552
2553void
2554aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2555{
2556 uint32_t insn;
2557
2558 emit_b (&insn, 0, to - from);
2559 append_insns (&from, 1, &insn);
2560}
2561
2562/* Implementation of emit_ops method "emit_const". */
2563
2564static void
2565aarch64_emit_const (LONGEST num)
2566{
2567 uint32_t buf[16];
2568 uint32_t *p = buf;
2569
2570 p += emit_mov_addr (p, x0, num);
2571
2572 emit_ops_insns (buf, p - buf);
2573}
2574
2575/* Implementation of emit_ops method "emit_call". */
2576
2577static void
2578aarch64_emit_call (CORE_ADDR fn)
2579{
2580 uint32_t buf[16];
2581 uint32_t *p = buf;
2582
2583 p += emit_mov_addr (p, ip0, fn);
2584 p += emit_blr (p, ip0);
2585
2586 emit_ops_insns (buf, p - buf);
2587}
2588
2589/* Implementation of emit_ops method "emit_reg". */
2590
2591static void
2592aarch64_emit_reg (int reg)
2593{
2594 uint32_t buf[16];
2595 uint32_t *p = buf;
2596
2597 /* Set x0 to unsigned char *regs. */
2598 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2599 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2600 p += emit_mov (p, x1, immediate_operand (reg));
2601
2602 emit_ops_insns (buf, p - buf);
2603
2604 aarch64_emit_call (get_raw_reg_func_addr ());
2605}
2606
2607/* Implementation of emit_ops method "emit_pop". */
2608
2609static void
2610aarch64_emit_pop (void)
2611{
2612 uint32_t buf[16];
2613 uint32_t *p = buf;
2614
2615 p += emit_pop (p, x0);
2616
2617 emit_ops_insns (buf, p - buf);
2618}
2619
2620/* Implementation of emit_ops method "emit_stack_flush". */
2621
2622static void
2623aarch64_emit_stack_flush (void)
2624{
2625 uint32_t buf[16];
2626 uint32_t *p = buf;
2627
2628 p += emit_push (p, x0);
2629
2630 emit_ops_insns (buf, p - buf);
2631}
2632
2633/* Implementation of emit_ops method "emit_zero_ext". */
2634
2635static void
2636aarch64_emit_zero_ext (int arg)
2637{
2638 uint32_t buf[16];
2639 uint32_t *p = buf;
2640
2641 p += emit_ubfx (p, x0, x0, 0, arg);
2642
2643 emit_ops_insns (buf, p - buf);
2644}
2645
2646/* Implementation of emit_ops method "emit_swap". */
2647
2648static void
2649aarch64_emit_swap (void)
2650{
2651 uint32_t buf[16];
2652 uint32_t *p = buf;
2653
2654 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2655 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2656 p += emit_mov (p, x0, register_operand (x1));
2657
2658 emit_ops_insns (buf, p - buf);
2659}
2660
2661/* Implementation of emit_ops method "emit_stack_adjust". */
2662
2663static void
2664aarch64_emit_stack_adjust (int n)
2665{
2666 /* This is not needed with our design. */
2667 uint32_t buf[16];
2668 uint32_t *p = buf;
2669
2670 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2671
2672 emit_ops_insns (buf, p - buf);
2673}
2674
2675/* Implementation of emit_ops method "emit_int_call_1". */
2676
2677static void
2678aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2679{
2680 uint32_t buf[16];
2681 uint32_t *p = buf;
2682
2683 p += emit_mov (p, x0, immediate_operand (arg1));
2684
2685 emit_ops_insns (buf, p - buf);
2686
2687 aarch64_emit_call (fn);
2688}
2689
2690/* Implementation of emit_ops method "emit_void_call_2". */
2691
2692static void
2693aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2694{
2695 uint32_t buf[16];
2696 uint32_t *p = buf;
2697
2698 /* Push x0 on the stack. */
2699 aarch64_emit_stack_flush ();
2700
2701 /* Setup arguments for the function call:
2702
2703 x0: arg1
2704 x1: top of the stack
2705
2706 MOV x1, x0
2707 MOV x0, #arg1 */
2708
2709 p += emit_mov (p, x1, register_operand (x0));
2710 p += emit_mov (p, x0, immediate_operand (arg1));
2711
2712 emit_ops_insns (buf, p - buf);
2713
2714 aarch64_emit_call (fn);
2715
2716 /* Restore x0. */
2717 aarch64_emit_pop ();
2718}
2719
2720/* Implementation of emit_ops method "emit_eq_goto". */
2721
2722static void
2723aarch64_emit_eq_goto (int *offset_p, int *size_p)
2724{
2725 uint32_t buf[16];
2726 uint32_t *p = buf;
2727
2728 p += emit_pop (p, x1);
2729 p += emit_cmp (p, x1, register_operand (x0));
2730 /* Branch over the next instruction if x0 != x1. */
2731 p += emit_bcond (p, NE, 8);
2732 /* The NOP instruction will be patched with an unconditional branch. */
2733 if (offset_p)
2734 *offset_p = (p - buf) * 4;
2735 if (size_p)
2736 *size_p = 4;
2737 p += emit_nop (p);
2738
2739 emit_ops_insns (buf, p - buf);
2740}
2741
2742/* Implementation of emit_ops method "emit_ne_goto". */
2743
2744static void
2745aarch64_emit_ne_goto (int *offset_p, int *size_p)
2746{
2747 uint32_t buf[16];
2748 uint32_t *p = buf;
2749
2750 p += emit_pop (p, x1);
2751 p += emit_cmp (p, x1, register_operand (x0));
2752 /* Branch over the next instruction if x0 == x1. */
2753 p += emit_bcond (p, EQ, 8);
2754 /* The NOP instruction will be patched with an unconditional branch. */
2755 if (offset_p)
2756 *offset_p = (p - buf) * 4;
2757 if (size_p)
2758 *size_p = 4;
2759 p += emit_nop (p);
2760
2761 emit_ops_insns (buf, p - buf);
2762}
2763
2764/* Implementation of emit_ops method "emit_lt_goto". */
2765
2766static void
2767aarch64_emit_lt_goto (int *offset_p, int *size_p)
2768{
2769 uint32_t buf[16];
2770 uint32_t *p = buf;
2771
2772 p += emit_pop (p, x1);
2773 p += emit_cmp (p, x1, register_operand (x0));
2774 /* Branch over the next instruction if x0 >= x1. */
2775 p += emit_bcond (p, GE, 8);
2776 /* The NOP instruction will be patched with an unconditional branch. */
2777 if (offset_p)
2778 *offset_p = (p - buf) * 4;
2779 if (size_p)
2780 *size_p = 4;
2781 p += emit_nop (p);
2782
2783 emit_ops_insns (buf, p - buf);
2784}
2785
2786/* Implementation of emit_ops method "emit_le_goto". */
2787
2788static void
2789aarch64_emit_le_goto (int *offset_p, int *size_p)
2790{
2791 uint32_t buf[16];
2792 uint32_t *p = buf;
2793
2794 p += emit_pop (p, x1);
2795 p += emit_cmp (p, x1, register_operand (x0));
2796 /* Branch over the next instruction if x0 > x1. */
2797 p += emit_bcond (p, GT, 8);
2798 /* The NOP instruction will be patched with an unconditional branch. */
2799 if (offset_p)
2800 *offset_p = (p - buf) * 4;
2801 if (size_p)
2802 *size_p = 4;
2803 p += emit_nop (p);
2804
2805 emit_ops_insns (buf, p - buf);
2806}
2807
2808/* Implementation of emit_ops method "emit_gt_goto". */
2809
2810static void
2811aarch64_emit_gt_goto (int *offset_p, int *size_p)
2812{
2813 uint32_t buf[16];
2814 uint32_t *p = buf;
2815
2816 p += emit_pop (p, x1);
2817 p += emit_cmp (p, x1, register_operand (x0));
2818 /* Branch over the next instruction if x0 <= x1. */
2819 p += emit_bcond (p, LE, 8);
2820 /* The NOP instruction will be patched with an unconditional branch. */
2821 if (offset_p)
2822 *offset_p = (p - buf) * 4;
2823 if (size_p)
2824 *size_p = 4;
2825 p += emit_nop (p);
2826
2827 emit_ops_insns (buf, p - buf);
2828}
2829
2830/* Implementation of emit_ops method "emit_ge_got". */
2831
2832static void
2833aarch64_emit_ge_got (int *offset_p, int *size_p)
2834{
2835 uint32_t buf[16];
2836 uint32_t *p = buf;
2837
2838 p += emit_pop (p, x1);
2839 p += emit_cmp (p, x1, register_operand (x0));
2840 /* Branch over the next instruction if x0 <= x1. */
2841 p += emit_bcond (p, LT, 8);
2842 /* The NOP instruction will be patched with an unconditional branch. */
2843 if (offset_p)
2844 *offset_p = (p - buf) * 4;
2845 if (size_p)
2846 *size_p = 4;
2847 p += emit_nop (p);
2848
2849 emit_ops_insns (buf, p - buf);
2850}
2851
2852static struct emit_ops aarch64_emit_ops_impl =
2853{
2854 aarch64_emit_prologue,
2855 aarch64_emit_epilogue,
2856 aarch64_emit_add,
2857 aarch64_emit_sub,
2858 aarch64_emit_mul,
2859 aarch64_emit_lsh,
2860 aarch64_emit_rsh_signed,
2861 aarch64_emit_rsh_unsigned,
2862 aarch64_emit_ext,
2863 aarch64_emit_log_not,
2864 aarch64_emit_bit_and,
2865 aarch64_emit_bit_or,
2866 aarch64_emit_bit_xor,
2867 aarch64_emit_bit_not,
2868 aarch64_emit_equal,
2869 aarch64_emit_less_signed,
2870 aarch64_emit_less_unsigned,
2871 aarch64_emit_ref,
2872 aarch64_emit_if_goto,
2873 aarch64_emit_goto,
2874 aarch64_write_goto_address,
2875 aarch64_emit_const,
2876 aarch64_emit_call,
2877 aarch64_emit_reg,
2878 aarch64_emit_pop,
2879 aarch64_emit_stack_flush,
2880 aarch64_emit_zero_ext,
2881 aarch64_emit_swap,
2882 aarch64_emit_stack_adjust,
2883 aarch64_emit_int_call_1,
2884 aarch64_emit_void_call_2,
2885 aarch64_emit_eq_goto,
2886 aarch64_emit_ne_goto,
2887 aarch64_emit_lt_goto,
2888 aarch64_emit_le_goto,
2889 aarch64_emit_gt_goto,
2890 aarch64_emit_ge_got,
2891};
2892
2893/* Implementation of linux_target_ops method "emit_ops". */
2894
2895static struct emit_ops *
2896aarch64_emit_ops (void)
2897{
2898 return &aarch64_emit_ops_impl;
2899}
2900
bb903df0
PL
2901/* Implementation of linux_target_ops method
2902 "get_min_fast_tracepoint_insn_len". */
2903
2904static int
2905aarch64_get_min_fast_tracepoint_insn_len (void)
2906{
2907 return 4;
2908}
2909
d1d0aea1
PL
2910/* Implementation of linux_target_ops method "supports_range_stepping". */
2911
2912static int
2913aarch64_supports_range_stepping (void)
2914{
2915 return 1;
2916}
2917
176eb98c
MS
2918struct linux_target_ops the_low_target =
2919{
2920 aarch64_arch_setup,
3aee8918 2921 aarch64_regs_info,
176eb98c
MS
2922 aarch64_cannot_fetch_register,
2923 aarch64_cannot_store_register,
421530db 2924 NULL, /* fetch_register */
176eb98c
MS
2925 aarch64_get_pc,
2926 aarch64_set_pc,
2927 (const unsigned char *) &aarch64_breakpoint,
2928 aarch64_breakpoint_len,
421530db
PL
2929 NULL, /* breakpoint_reinsert_addr */
2930 0, /* decr_pc_after_break */
176eb98c 2931 aarch64_breakpoint_at,
802e8e6d 2932 aarch64_supports_z_point_type,
176eb98c
MS
2933 aarch64_insert_point,
2934 aarch64_remove_point,
2935 aarch64_stopped_by_watchpoint,
2936 aarch64_stopped_data_address,
421530db
PL
2937 NULL, /* collect_ptrace_register */
2938 NULL, /* supply_ptrace_register */
ade90bde 2939 aarch64_linux_siginfo_fixup,
176eb98c
MS
2940 aarch64_linux_new_process,
2941 aarch64_linux_new_thread,
3a8a0396 2942 aarch64_linux_new_fork,
176eb98c 2943 aarch64_linux_prepare_to_resume,
421530db 2944 NULL, /* process_qsupported */
7671bf47 2945 aarch64_supports_tracepoints,
bb903df0
PL
2946 aarch64_get_thread_area,
2947 aarch64_install_fast_tracepoint_jump_pad,
afbe19f8 2948 aarch64_emit_ops,
bb903df0 2949 aarch64_get_min_fast_tracepoint_insn_len,
d1d0aea1 2950 aarch64_supports_range_stepping,
176eb98c 2951};
3aee8918
PA
2952
2953void
2954initialize_low_arch (void)
2955{
2956 init_registers_aarch64 ();
2957
3b53ae99
YQ
2958 initialize_low_arch_aarch32 ();
2959
3aee8918
PA
2960 initialize_regsets_info (&aarch64_regsets_info);
2961}
This page took 0.382201 seconds and 4 git commands to generate.