Refactor queries for hardware and software single stepping support in GDBServer.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-aarch64-low.c
CommitLineData
176eb98c
MS
1/* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
32d0add0 4 Copyright (C) 2009-2015 Free Software Foundation, Inc.
176eb98c
MS
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "server.h"
23#include "linux-low.h"
db3cb7cb 24#include "nat/aarch64-linux.h"
554717a3 25#include "nat/aarch64-linux-hw-point.h"
bb903df0 26#include "arch/aarch64-insn.h"
3b53ae99 27#include "linux-aarch32-low.h"
176eb98c 28#include "elf/common.h"
afbe19f8
PL
29#include "ax.h"
30#include "tracepoint.h"
176eb98c
MS
31
32#include <signal.h>
33#include <sys/user.h>
5826e159 34#include "nat/gdb_ptrace.h"
e9dae05e 35#include <asm/ptrace.h>
bb903df0
PL
36#include <inttypes.h>
37#include <endian.h>
38#include <sys/uio.h>
176eb98c
MS
39
40#include "gdb_proc_service.h"
41
42/* Defined in auto-generated files. */
43void init_registers_aarch64 (void);
3aee8918 44extern const struct target_desc *tdesc_aarch64;
176eb98c 45
176eb98c
MS
46#ifdef HAVE_SYS_REG_H
47#include <sys/reg.h>
48#endif
49
50#define AARCH64_X_REGS_NUM 31
51#define AARCH64_V_REGS_NUM 32
52#define AARCH64_X0_REGNO 0
53#define AARCH64_SP_REGNO 31
54#define AARCH64_PC_REGNO 32
55#define AARCH64_CPSR_REGNO 33
56#define AARCH64_V0_REGNO 34
bf330350
CU
57#define AARCH64_FPSR_REGNO (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM)
58#define AARCH64_FPCR_REGNO (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM + 1)
176eb98c 59
bf330350 60#define AARCH64_NUM_REGS (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM + 2)
176eb98c 61
176eb98c
MS
62/* Per-process arch-specific data we want to keep. */
63
64struct arch_process_info
65{
66 /* Hardware breakpoint/watchpoint data.
67 The reason for them to be per-process rather than per-thread is
68 due to the lack of information in the gdbserver environment;
69 gdbserver is not told that whether a requested hardware
70 breakpoint/watchpoint is thread specific or not, so it has to set
71 each hw bp/wp for every thread in the current process. The
72 higher level bp/wp management in gdb will resume a thread if a hw
73 bp/wp trap is not expected for it. Since the hw bp/wp setting is
74 same for each thread, it is reasonable for the data to live here.
75 */
76 struct aarch64_debug_reg_state debug_reg_state;
77};
78
3b53ae99
YQ
79/* Return true if the size of register 0 is 8 byte. */
80
81static int
82is_64bit_tdesc (void)
83{
84 struct regcache *regcache = get_thread_regcache (current_thread, 0);
85
86 return register_size (regcache->tdesc, 0) == 8;
87}
88
421530db
PL
89/* Implementation of linux_target_ops method "cannot_store_register". */
90
176eb98c
MS
91static int
92aarch64_cannot_store_register (int regno)
93{
94 return regno >= AARCH64_NUM_REGS;
95}
96
421530db
PL
97/* Implementation of linux_target_ops method "cannot_fetch_register". */
98
176eb98c
MS
99static int
100aarch64_cannot_fetch_register (int regno)
101{
102 return regno >= AARCH64_NUM_REGS;
103}
104
105static void
106aarch64_fill_gregset (struct regcache *regcache, void *buf)
107{
6a69a054 108 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
176eb98c
MS
109 int i;
110
111 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
112 collect_register (regcache, AARCH64_X0_REGNO + i, &regset->regs[i]);
113 collect_register (regcache, AARCH64_SP_REGNO, &regset->sp);
114 collect_register (regcache, AARCH64_PC_REGNO, &regset->pc);
115 collect_register (regcache, AARCH64_CPSR_REGNO, &regset->pstate);
116}
117
118static void
119aarch64_store_gregset (struct regcache *regcache, const void *buf)
120{
6a69a054 121 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
176eb98c
MS
122 int i;
123
124 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
125 supply_register (regcache, AARCH64_X0_REGNO + i, &regset->regs[i]);
126 supply_register (regcache, AARCH64_SP_REGNO, &regset->sp);
127 supply_register (regcache, AARCH64_PC_REGNO, &regset->pc);
128 supply_register (regcache, AARCH64_CPSR_REGNO, &regset->pstate);
129}
130
131static void
132aarch64_fill_fpregset (struct regcache *regcache, void *buf)
133{
9caa3311 134 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
176eb98c
MS
135 int i;
136
137 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
138 collect_register (regcache, AARCH64_V0_REGNO + i, &regset->vregs[i]);
bf330350
CU
139 collect_register (regcache, AARCH64_FPSR_REGNO, &regset->fpsr);
140 collect_register (regcache, AARCH64_FPCR_REGNO, &regset->fpcr);
176eb98c
MS
141}
142
143static void
144aarch64_store_fpregset (struct regcache *regcache, const void *buf)
145{
9caa3311
YQ
146 const struct user_fpsimd_state *regset
147 = (const struct user_fpsimd_state *) buf;
176eb98c
MS
148 int i;
149
150 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
151 supply_register (regcache, AARCH64_V0_REGNO + i, &regset->vregs[i]);
bf330350
CU
152 supply_register (regcache, AARCH64_FPSR_REGNO, &regset->fpsr);
153 supply_register (regcache, AARCH64_FPCR_REGNO, &regset->fpcr);
176eb98c
MS
154}
155
176eb98c
MS
156/* Enable miscellaneous debugging output. The name is historical - it
157 was originally used to debug LinuxThreads support. */
158extern int debug_threads;
159
421530db
PL
160/* Implementation of linux_target_ops method "get_pc". */
161
176eb98c
MS
162static CORE_ADDR
163aarch64_get_pc (struct regcache *regcache)
164{
8a7e4587
YQ
165 if (register_size (regcache->tdesc, 0) == 8)
166 {
167 unsigned long pc;
168
169 collect_register_by_name (regcache, "pc", &pc);
170 if (debug_threads)
171 debug_printf ("stop pc is %08lx\n", pc);
172 return pc;
173 }
174 else
175 {
176 unsigned int pc;
177
178 collect_register_by_name (regcache, "pc", &pc);
179 if (debug_threads)
180 debug_printf ("stop pc is %04x\n", pc);
181 return pc;
182 }
176eb98c
MS
183}
184
421530db
PL
185/* Implementation of linux_target_ops method "set_pc". */
186
176eb98c
MS
187static void
188aarch64_set_pc (struct regcache *regcache, CORE_ADDR pc)
189{
8a7e4587
YQ
190 if (register_size (regcache->tdesc, 0) == 8)
191 {
192 unsigned long newpc = pc;
193 supply_register_by_name (regcache, "pc", &newpc);
194 }
195 else
196 {
197 unsigned int newpc = pc;
198 supply_register_by_name (regcache, "pc", &newpc);
199 }
176eb98c
MS
200}
201
176eb98c
MS
202#define aarch64_breakpoint_len 4
203
37d66942
PL
204/* AArch64 BRK software debug mode instruction.
205 This instruction needs to match gdb/aarch64-tdep.c
206 (aarch64_default_breakpoint). */
207static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
176eb98c 208
421530db
PL
209/* Implementation of linux_target_ops method "breakpoint_at". */
210
176eb98c
MS
211static int
212aarch64_breakpoint_at (CORE_ADDR where)
213{
37d66942 214 gdb_byte insn[aarch64_breakpoint_len];
176eb98c 215
37d66942
PL
216 (*the_target->read_memory) (where, (unsigned char *) &insn,
217 aarch64_breakpoint_len);
218 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
176eb98c
MS
219 return 1;
220
221 return 0;
222}
223
176eb98c
MS
224static void
225aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
226{
227 int i;
228
229 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
230 {
231 state->dr_addr_bp[i] = 0;
232 state->dr_ctrl_bp[i] = 0;
233 state->dr_ref_count_bp[i] = 0;
234 }
235
236 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
237 {
238 state->dr_addr_wp[i] = 0;
239 state->dr_ctrl_wp[i] = 0;
240 state->dr_ref_count_wp[i] = 0;
241 }
242}
243
176eb98c
MS
244/* Return the pointer to the debug register state structure in the
245 current process' arch-specific data area. */
246
db3cb7cb 247struct aarch64_debug_reg_state *
88e2cf7e 248aarch64_get_debug_reg_state (pid_t pid)
176eb98c 249{
88e2cf7e 250 struct process_info *proc = find_process_pid (pid);
176eb98c 251
fe978cb0 252 return &proc->priv->arch_private->debug_reg_state;
176eb98c
MS
253}
254
421530db
PL
255/* Implementation of linux_target_ops method "supports_z_point_type". */
256
4ff0d3d8
PA
257static int
258aarch64_supports_z_point_type (char z_type)
259{
260 switch (z_type)
261 {
96c97461 262 case Z_PACKET_SW_BP:
6085d6f6
YQ
263 {
264 if (!extended_protocol && is_64bit_tdesc ())
265 {
266 /* Only enable Z0 packet in non-multi-arch debugging. If
267 extended protocol is used, don't enable Z0 packet because
268 GDBserver may attach to 32-bit process. */
269 return 1;
270 }
271 else
272 {
273 /* Disable Z0 packet so that GDBserver doesn't have to handle
274 different breakpoint instructions (aarch64, arm, thumb etc)
275 in multi-arch debugging. */
276 return 0;
277 }
278 }
4ff0d3d8
PA
279 case Z_PACKET_HW_BP:
280 case Z_PACKET_WRITE_WP:
281 case Z_PACKET_READ_WP:
282 case Z_PACKET_ACCESS_WP:
283 return 1;
284 default:
4ff0d3d8
PA
285 return 0;
286 }
287}
288
421530db 289/* Implementation of linux_target_ops method "insert_point".
176eb98c 290
421530db
PL
291 It actually only records the info of the to-be-inserted bp/wp;
292 the actual insertion will happen when threads are resumed. */
176eb98c
MS
293
294static int
802e8e6d
PA
295aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
296 int len, struct raw_breakpoint *bp)
176eb98c
MS
297{
298 int ret;
4ff0d3d8 299 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
300 struct aarch64_debug_reg_state *state
301 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 302
c5e92cca 303 if (show_debug_regs)
176eb98c
MS
304 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
305 (unsigned long) addr, len);
306
802e8e6d
PA
307 /* Determine the type from the raw breakpoint type. */
308 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
309
310 if (targ_type != hw_execute)
39edd165
YQ
311 {
312 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
313 ret = aarch64_handle_watchpoint (targ_type, addr, len,
314 1 /* is_insert */, state);
315 else
316 ret = -1;
317 }
176eb98c 318 else
8d689ee5
YQ
319 {
320 if (len == 3)
321 {
322 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
323 instruction. Set it to 2 to correctly encode length bit
324 mask in hardware/watchpoint control register. */
325 len = 2;
326 }
327 ret = aarch64_handle_breakpoint (targ_type, addr, len,
328 1 /* is_insert */, state);
329 }
176eb98c 330
60a191ed 331 if (show_debug_regs)
88e2cf7e
YQ
332 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
333 targ_type);
176eb98c
MS
334
335 return ret;
336}
337
421530db 338/* Implementation of linux_target_ops method "remove_point".
176eb98c 339
421530db
PL
340 It actually only records the info of the to-be-removed bp/wp,
341 the actual removal will be done when threads are resumed. */
176eb98c
MS
342
343static int
802e8e6d
PA
344aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
345 int len, struct raw_breakpoint *bp)
176eb98c
MS
346{
347 int ret;
4ff0d3d8 348 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
349 struct aarch64_debug_reg_state *state
350 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 351
c5e92cca 352 if (show_debug_regs)
176eb98c
MS
353 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
354 (unsigned long) addr, len);
355
802e8e6d
PA
356 /* Determine the type from the raw breakpoint type. */
357 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
358
359 /* Set up state pointers. */
360 if (targ_type != hw_execute)
361 ret =
c67ca4de
YQ
362 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
363 state);
176eb98c 364 else
8d689ee5
YQ
365 {
366 if (len == 3)
367 {
368 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
369 instruction. Set it to 2 to correctly encode length bit
370 mask in hardware/watchpoint control register. */
371 len = 2;
372 }
373 ret = aarch64_handle_breakpoint (targ_type, addr, len,
374 0 /* is_insert */, state);
375 }
176eb98c 376
60a191ed 377 if (show_debug_regs)
88e2cf7e
YQ
378 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
379 targ_type);
176eb98c
MS
380
381 return ret;
382}
383
421530db 384/* Implementation of linux_target_ops method "stopped_data_address". */
176eb98c
MS
385
386static CORE_ADDR
387aarch64_stopped_data_address (void)
388{
389 siginfo_t siginfo;
390 int pid, i;
391 struct aarch64_debug_reg_state *state;
392
0bfdf32f 393 pid = lwpid_of (current_thread);
176eb98c
MS
394
395 /* Get the siginfo. */
396 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
397 return (CORE_ADDR) 0;
398
399 /* Need to be a hardware breakpoint/watchpoint trap. */
400 if (siginfo.si_signo != SIGTRAP
401 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
402 return (CORE_ADDR) 0;
403
404 /* Check if the address matches any watched address. */
88e2cf7e 405 state = aarch64_get_debug_reg_state (pid_of (current_thread));
176eb98c
MS
406 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
407 {
408 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
409 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
410 const CORE_ADDR addr_watch = state->dr_addr_wp[i];
411 if (state->dr_ref_count_wp[i]
412 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
413 && addr_trap >= addr_watch
414 && addr_trap < addr_watch + len)
415 return addr_trap;
416 }
417
418 return (CORE_ADDR) 0;
419}
420
421530db 421/* Implementation of linux_target_ops method "stopped_by_watchpoint". */
176eb98c
MS
422
423static int
424aarch64_stopped_by_watchpoint (void)
425{
426 if (aarch64_stopped_data_address () != 0)
427 return 1;
428 else
429 return 0;
430}
431
432/* Fetch the thread-local storage pointer for libthread_db. */
433
434ps_err_e
55fac6e0 435ps_get_thread_area (const struct ps_prochandle *ph,
176eb98c
MS
436 lwpid_t lwpid, int idx, void **base)
437{
a0cc84cd
YQ
438 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
439 is_64bit_tdesc ());
176eb98c
MS
440}
441
ade90bde
YQ
442/* Implementation of linux_target_ops method "siginfo_fixup". */
443
444static int
445aarch64_linux_siginfo_fixup (siginfo_t *native, void *inf, int direction)
446{
447 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
448 if (!is_64bit_tdesc ())
449 {
450 if (direction == 0)
451 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
452 native);
453 else
454 aarch64_siginfo_from_compat_siginfo (native,
455 (struct compat_siginfo *) inf);
456
457 return 1;
458 }
459
460 return 0;
461}
462
421530db 463/* Implementation of linux_target_ops method "linux_new_process". */
176eb98c
MS
464
465static struct arch_process_info *
466aarch64_linux_new_process (void)
467{
8d749320 468 struct arch_process_info *info = XCNEW (struct arch_process_info);
176eb98c
MS
469
470 aarch64_init_debug_reg_state (&info->debug_reg_state);
471
472 return info;
473}
474
421530db
PL
475/* Implementation of linux_target_ops method "linux_new_fork". */
476
3a8a0396
DB
477static void
478aarch64_linux_new_fork (struct process_info *parent,
479 struct process_info *child)
480{
481 /* These are allocated by linux_add_process. */
61a7418c
DB
482 gdb_assert (parent->priv != NULL
483 && parent->priv->arch_private != NULL);
484 gdb_assert (child->priv != NULL
485 && child->priv->arch_private != NULL);
3a8a0396
DB
486
487 /* Linux kernel before 2.6.33 commit
488 72f674d203cd230426437cdcf7dd6f681dad8b0d
489 will inherit hardware debug registers from parent
490 on fork/vfork/clone. Newer Linux kernels create such tasks with
491 zeroed debug registers.
492
493 GDB core assumes the child inherits the watchpoints/hw
494 breakpoints of the parent, and will remove them all from the
495 forked off process. Copy the debug registers mirrors into the
496 new process so that all breakpoints and watchpoints can be
497 removed together. The debug registers mirror will become zeroed
498 in the end before detaching the forked off process, thus making
499 this compatible with older Linux kernels too. */
500
61a7418c 501 *child->priv->arch_private = *parent->priv->arch_private;
3a8a0396
DB
502}
503
3b53ae99
YQ
504/* Return the right target description according to the ELF file of
505 current thread. */
506
507static const struct target_desc *
508aarch64_linux_read_description (void)
509{
510 unsigned int machine;
511 int is_elf64;
512 int tid;
513
514 tid = lwpid_of (current_thread);
515
516 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
517
518 if (is_elf64)
519 return tdesc_aarch64;
520 else
521 return tdesc_arm_with_neon;
522}
523
421530db
PL
524/* Implementation of linux_target_ops method "arch_setup". */
525
176eb98c
MS
526static void
527aarch64_arch_setup (void)
528{
3b53ae99 529 current_process ()->tdesc = aarch64_linux_read_description ();
176eb98c 530
af1b22f3 531 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
176eb98c
MS
532}
533
3aee8918 534static struct regset_info aarch64_regsets[] =
176eb98c
MS
535{
536 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
537 sizeof (struct user_pt_regs), GENERAL_REGS,
538 aarch64_fill_gregset, aarch64_store_gregset },
539 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
540 sizeof (struct user_fpsimd_state), FP_REGS,
541 aarch64_fill_fpregset, aarch64_store_fpregset
542 },
50bc912a 543 NULL_REGSET
176eb98c
MS
544};
545
3aee8918
PA
546static struct regsets_info aarch64_regsets_info =
547 {
548 aarch64_regsets, /* regsets */
549 0, /* num_regsets */
550 NULL, /* disabled_regsets */
551 };
552
3b53ae99 553static struct regs_info regs_info_aarch64 =
3aee8918
PA
554 {
555 NULL, /* regset_bitmap */
c2d65f38 556 NULL, /* usrregs */
3aee8918
PA
557 &aarch64_regsets_info,
558 };
559
421530db
PL
560/* Implementation of linux_target_ops method "regs_info". */
561
3aee8918
PA
562static const struct regs_info *
563aarch64_regs_info (void)
564{
3b53ae99
YQ
565 if (is_64bit_tdesc ())
566 return &regs_info_aarch64;
567 else
568 return &regs_info_aarch32;
3aee8918
PA
569}
570
7671bf47
PL
571/* Implementation of linux_target_ops method "supports_tracepoints". */
572
573static int
574aarch64_supports_tracepoints (void)
575{
524b57e6
YQ
576 if (current_thread == NULL)
577 return 1;
578 else
579 {
580 /* We don't support tracepoints on aarch32 now. */
581 return is_64bit_tdesc ();
582 }
7671bf47
PL
583}
584
bb903df0
PL
585/* Implementation of linux_target_ops method "get_thread_area". */
586
587static int
588aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
589{
590 struct iovec iovec;
591 uint64_t reg;
592
593 iovec.iov_base = &reg;
594 iovec.iov_len = sizeof (reg);
595
596 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
597 return -1;
598
599 *addrp = reg;
600
601 return 0;
602}
603
afbe19f8
PL
604/* List of condition codes that we need. */
605
606enum aarch64_condition_codes
607{
608 EQ = 0x0,
609 NE = 0x1,
610 LO = 0x3,
611 GE = 0xa,
612 LT = 0xb,
613 GT = 0xc,
614 LE = 0xd,
bb903df0
PL
615};
616
6c1c9a8b
YQ
617enum aarch64_operand_type
618{
619 OPERAND_IMMEDIATE,
620 OPERAND_REGISTER,
621};
622
bb903df0
PL
623/* Representation of an operand. At this time, it only supports register
624 and immediate types. */
625
626struct aarch64_operand
627{
628 /* Type of the operand. */
6c1c9a8b
YQ
629 enum aarch64_operand_type type;
630
bb903df0
PL
631 /* Value of the operand according to the type. */
632 union
633 {
634 uint32_t imm;
635 struct aarch64_register reg;
636 };
637};
638
639/* List of registers that we are currently using, we can add more here as
640 we need to use them. */
641
642/* General purpose scratch registers (64 bit). */
643static const struct aarch64_register x0 = { 0, 1 };
644static const struct aarch64_register x1 = { 1, 1 };
645static const struct aarch64_register x2 = { 2, 1 };
646static const struct aarch64_register x3 = { 3, 1 };
647static const struct aarch64_register x4 = { 4, 1 };
648
649/* General purpose scratch registers (32 bit). */
afbe19f8 650static const struct aarch64_register w0 = { 0, 0 };
bb903df0
PL
651static const struct aarch64_register w2 = { 2, 0 };
652
653/* Intra-procedure scratch registers. */
654static const struct aarch64_register ip0 = { 16, 1 };
655
656/* Special purpose registers. */
afbe19f8
PL
657static const struct aarch64_register fp = { 29, 1 };
658static const struct aarch64_register lr = { 30, 1 };
bb903df0
PL
659static const struct aarch64_register sp = { 31, 1 };
660static const struct aarch64_register xzr = { 31, 1 };
661
662/* Dynamically allocate a new register. If we know the register
663 statically, we should make it a global as above instead of using this
664 helper function. */
665
666static struct aarch64_register
667aarch64_register (unsigned num, int is64)
668{
669 return (struct aarch64_register) { num, is64 };
670}
671
672/* Helper function to create a register operand, for instructions with
673 different types of operands.
674
675 For example:
676 p += emit_mov (p, x0, register_operand (x1)); */
677
678static struct aarch64_operand
679register_operand (struct aarch64_register reg)
680{
681 struct aarch64_operand operand;
682
683 operand.type = OPERAND_REGISTER;
684 operand.reg = reg;
685
686 return operand;
687}
688
689/* Helper function to create an immediate operand, for instructions with
690 different types of operands.
691
692 For example:
693 p += emit_mov (p, x0, immediate_operand (12)); */
694
695static struct aarch64_operand
696immediate_operand (uint32_t imm)
697{
698 struct aarch64_operand operand;
699
700 operand.type = OPERAND_IMMEDIATE;
701 operand.imm = imm;
702
703 return operand;
704}
705
bb903df0
PL
706/* Helper function to create an offset memory operand.
707
708 For example:
709 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
710
711static struct aarch64_memory_operand
712offset_memory_operand (int32_t offset)
713{
714 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
715}
716
717/* Helper function to create a pre-index memory operand.
718
719 For example:
720 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
721
722static struct aarch64_memory_operand
723preindex_memory_operand (int32_t index)
724{
725 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
726}
727
afbe19f8
PL
728/* Helper function to create a post-index memory operand.
729
730 For example:
731 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
732
733static struct aarch64_memory_operand
734postindex_memory_operand (int32_t index)
735{
736 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
737}
738
bb903df0
PL
739/* System control registers. These special registers can be written and
740 read with the MRS and MSR instructions.
741
742 - NZCV: Condition flags. GDB refers to this register under the CPSR
743 name.
744 - FPSR: Floating-point status register.
745 - FPCR: Floating-point control registers.
746 - TPIDR_EL0: Software thread ID register. */
747
748enum aarch64_system_control_registers
749{
750 /* op0 op1 crn crm op2 */
751 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
752 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
753 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
754 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
755};
756
bb903df0
PL
757/* Write a BLR instruction into *BUF.
758
759 BLR rn
760
761 RN is the register to branch to. */
762
763static int
764emit_blr (uint32_t *buf, struct aarch64_register rn)
765{
e1c587c3 766 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
bb903df0
PL
767}
768
afbe19f8 769/* Write a RET instruction into *BUF.
bb903df0 770
afbe19f8 771 RET xn
bb903df0 772
afbe19f8 773 RN is the register to branch to. */
bb903df0
PL
774
775static int
afbe19f8
PL
776emit_ret (uint32_t *buf, struct aarch64_register rn)
777{
e1c587c3 778 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
afbe19f8
PL
779}
780
781static int
782emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
783 struct aarch64_register rt,
784 struct aarch64_register rt2,
785 struct aarch64_register rn,
786 struct aarch64_memory_operand operand)
bb903df0
PL
787{
788 uint32_t opc;
789 uint32_t pre_index;
790 uint32_t write_back;
791
792 if (rt.is64)
793 opc = ENCODE (2, 2, 30);
794 else
795 opc = ENCODE (0, 2, 30);
796
797 switch (operand.type)
798 {
799 case MEMORY_OPERAND_OFFSET:
800 {
801 pre_index = ENCODE (1, 1, 24);
802 write_back = ENCODE (0, 1, 23);
803 break;
804 }
afbe19f8
PL
805 case MEMORY_OPERAND_POSTINDEX:
806 {
807 pre_index = ENCODE (0, 1, 24);
808 write_back = ENCODE (1, 1, 23);
809 break;
810 }
bb903df0
PL
811 case MEMORY_OPERAND_PREINDEX:
812 {
813 pre_index = ENCODE (1, 1, 24);
814 write_back = ENCODE (1, 1, 23);
815 break;
816 }
817 default:
818 return 0;
819 }
820
e1c587c3
YQ
821 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
822 | ENCODE (operand.index >> 3, 7, 15)
823 | ENCODE (rt2.num, 5, 10)
824 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
825}
826
afbe19f8
PL
827/* Write a STP instruction into *BUF.
828
829 STP rt, rt2, [rn, #offset]
830 STP rt, rt2, [rn, #index]!
831 STP rt, rt2, [rn], #index
832
833 RT and RT2 are the registers to store.
834 RN is the base address register.
835 OFFSET is the immediate to add to the base address. It is limited to a
836 -512 .. 504 range (7 bits << 3). */
837
838static int
839emit_stp (uint32_t *buf, struct aarch64_register rt,
840 struct aarch64_register rt2, struct aarch64_register rn,
841 struct aarch64_memory_operand operand)
842{
843 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
844}
845
846/* Write a LDP instruction into *BUF.
847
848 LDP rt, rt2, [rn, #offset]
849 LDP rt, rt2, [rn, #index]!
850 LDP rt, rt2, [rn], #index
851
852 RT and RT2 are the registers to store.
853 RN is the base address register.
854 OFFSET is the immediate to add to the base address. It is limited to a
855 -512 .. 504 range (7 bits << 3). */
856
857static int
858emit_ldp (uint32_t *buf, struct aarch64_register rt,
859 struct aarch64_register rt2, struct aarch64_register rn,
860 struct aarch64_memory_operand operand)
861{
862 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
863}
864
bb903df0
PL
865/* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
866
867 LDP qt, qt2, [rn, #offset]
868
869 RT and RT2 are the Q registers to store.
870 RN is the base address register.
871 OFFSET is the immediate to add to the base address. It is limited to
872 -1024 .. 1008 range (7 bits << 4). */
873
874static int
875emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
876 struct aarch64_register rn, int32_t offset)
877{
878 uint32_t opc = ENCODE (2, 2, 30);
879 uint32_t pre_index = ENCODE (1, 1, 24);
880
e1c587c3
YQ
881 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
882 | ENCODE (offset >> 4, 7, 15)
883 | ENCODE (rt2, 5, 10)
884 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
885}
886
887/* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
888
889 STP qt, qt2, [rn, #offset]
890
891 RT and RT2 are the Q registers to store.
892 RN is the base address register.
893 OFFSET is the immediate to add to the base address. It is limited to
894 -1024 .. 1008 range (7 bits << 4). */
895
896static int
897emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
898 struct aarch64_register rn, int32_t offset)
899{
900 uint32_t opc = ENCODE (2, 2, 30);
901 uint32_t pre_index = ENCODE (1, 1, 24);
902
e1c587c3 903 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
b6542f81
YQ
904 | ENCODE (offset >> 4, 7, 15)
905 | ENCODE (rt2, 5, 10)
906 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
907}
908
afbe19f8
PL
909/* Write a LDRH instruction into *BUF.
910
911 LDRH wt, [xn, #offset]
912 LDRH wt, [xn, #index]!
913 LDRH wt, [xn], #index
914
915 RT is the register to store.
916 RN is the base address register.
917 OFFSET is the immediate to add to the base address. It is limited to
918 0 .. 32760 range (12 bits << 3). */
919
920static int
921emit_ldrh (uint32_t *buf, struct aarch64_register rt,
922 struct aarch64_register rn,
923 struct aarch64_memory_operand operand)
924{
1c2e1515 925 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
afbe19f8
PL
926}
927
928/* Write a LDRB instruction into *BUF.
929
930 LDRB wt, [xn, #offset]
931 LDRB wt, [xn, #index]!
932 LDRB wt, [xn], #index
933
934 RT is the register to store.
935 RN is the base address register.
936 OFFSET is the immediate to add to the base address. It is limited to
937 0 .. 32760 range (12 bits << 3). */
938
939static int
940emit_ldrb (uint32_t *buf, struct aarch64_register rt,
941 struct aarch64_register rn,
942 struct aarch64_memory_operand operand)
943{
1c2e1515 944 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
afbe19f8
PL
945}
946
bb903df0 947
bb903df0
PL
948
949/* Write a STR instruction into *BUF.
950
951 STR rt, [rn, #offset]
952 STR rt, [rn, #index]!
afbe19f8 953 STR rt, [rn], #index
bb903df0
PL
954
955 RT is the register to store.
956 RN is the base address register.
957 OFFSET is the immediate to add to the base address. It is limited to
958 0 .. 32760 range (12 bits << 3). */
959
960static int
961emit_str (uint32_t *buf, struct aarch64_register rt,
962 struct aarch64_register rn,
963 struct aarch64_memory_operand operand)
964{
1c2e1515 965 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
bb903df0
PL
966}
967
968/* Helper function emitting an exclusive load or store instruction. */
969
970static int
971emit_load_store_exclusive (uint32_t *buf, uint32_t size,
972 enum aarch64_opcodes opcode,
973 struct aarch64_register rs,
974 struct aarch64_register rt,
975 struct aarch64_register rt2,
976 struct aarch64_register rn)
977{
e1c587c3
YQ
978 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
979 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
980 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
981}
982
983/* Write a LAXR instruction into *BUF.
984
985 LDAXR rt, [xn]
986
987 RT is the destination register.
988 RN is the base address register. */
989
990static int
991emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
992 struct aarch64_register rn)
993{
994 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
995 xzr, rn);
996}
997
998/* Write a STXR instruction into *BUF.
999
1000 STXR ws, rt, [xn]
1001
1002 RS is the result register, it indicates if the store succeeded or not.
1003 RT is the destination register.
1004 RN is the base address register. */
1005
1006static int
1007emit_stxr (uint32_t *buf, struct aarch64_register rs,
1008 struct aarch64_register rt, struct aarch64_register rn)
1009{
1010 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1011 xzr, rn);
1012}
1013
1014/* Write a STLR instruction into *BUF.
1015
1016 STLR rt, [xn]
1017
1018 RT is the register to store.
1019 RN is the base address register. */
1020
1021static int
1022emit_stlr (uint32_t *buf, struct aarch64_register rt,
1023 struct aarch64_register rn)
1024{
1025 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1026 xzr, rn);
1027}
1028
1029/* Helper function for data processing instructions with register sources. */
1030
1031static int
231c0592 1032emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
bb903df0
PL
1033 struct aarch64_register rd,
1034 struct aarch64_register rn,
1035 struct aarch64_register rm)
1036{
1037 uint32_t size = ENCODE (rd.is64, 1, 31);
1038
e1c587c3
YQ
1039 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1040 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1041}
1042
1043/* Helper function for data processing instructions taking either a register
1044 or an immediate. */
1045
1046static int
1047emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1048 struct aarch64_register rd,
1049 struct aarch64_register rn,
1050 struct aarch64_operand operand)
1051{
1052 uint32_t size = ENCODE (rd.is64, 1, 31);
1053 /* The opcode is different for register and immediate source operands. */
1054 uint32_t operand_opcode;
1055
1056 if (operand.type == OPERAND_IMMEDIATE)
1057 {
1058 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1059 operand_opcode = ENCODE (8, 4, 25);
1060
e1c587c3
YQ
1061 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1062 | ENCODE (operand.imm, 12, 10)
1063 | ENCODE (rn.num, 5, 5)
1064 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1065 }
1066 else
1067 {
1068 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1069 operand_opcode = ENCODE (5, 4, 25);
1070
1071 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1072 rn, operand.reg);
1073 }
1074}
1075
1076/* Write an ADD instruction into *BUF.
1077
1078 ADD rd, rn, #imm
1079 ADD rd, rn, rm
1080
1081 This function handles both an immediate and register add.
1082
1083 RD is the destination register.
1084 RN is the input register.
1085 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1086 OPERAND_REGISTER. */
1087
1088static int
1089emit_add (uint32_t *buf, struct aarch64_register rd,
1090 struct aarch64_register rn, struct aarch64_operand operand)
1091{
1092 return emit_data_processing (buf, ADD, rd, rn, operand);
1093}
1094
1095/* Write a SUB instruction into *BUF.
1096
1097 SUB rd, rn, #imm
1098 SUB rd, rn, rm
1099
1100 This function handles both an immediate and register sub.
1101
1102 RD is the destination register.
1103 RN is the input register.
1104 IMM is the immediate to substract to RN. */
1105
1106static int
1107emit_sub (uint32_t *buf, struct aarch64_register rd,
1108 struct aarch64_register rn, struct aarch64_operand operand)
1109{
1110 return emit_data_processing (buf, SUB, rd, rn, operand);
1111}
1112
1113/* Write a MOV instruction into *BUF.
1114
1115 MOV rd, #imm
1116 MOV rd, rm
1117
1118 This function handles both a wide immediate move and a register move,
1119 with the condition that the source register is not xzr. xzr and the
1120 stack pointer share the same encoding and this function only supports
1121 the stack pointer.
1122
1123 RD is the destination register.
1124 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1125 OPERAND_REGISTER. */
1126
1127static int
1128emit_mov (uint32_t *buf, struct aarch64_register rd,
1129 struct aarch64_operand operand)
1130{
1131 if (operand.type == OPERAND_IMMEDIATE)
1132 {
1133 uint32_t size = ENCODE (rd.is64, 1, 31);
1134 /* Do not shift the immediate. */
1135 uint32_t shift = ENCODE (0, 2, 21);
1136
e1c587c3
YQ
1137 return aarch64_emit_insn (buf, MOV | size | shift
1138 | ENCODE (operand.imm, 16, 5)
1139 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1140 }
1141 else
1142 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1143}
1144
1145/* Write a MOVK instruction into *BUF.
1146
1147 MOVK rd, #imm, lsl #shift
1148
1149 RD is the destination register.
1150 IMM is the immediate.
1151 SHIFT is the logical shift left to apply to IMM. */
1152
1153static int
7781c06f
YQ
1154emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1155 unsigned shift)
bb903df0
PL
1156{
1157 uint32_t size = ENCODE (rd.is64, 1, 31);
1158
e1c587c3
YQ
1159 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1160 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1161}
1162
1163/* Write instructions into *BUF in order to move ADDR into a register.
1164 ADDR can be a 64-bit value.
1165
1166 This function will emit a series of MOV and MOVK instructions, such as:
1167
1168 MOV xd, #(addr)
1169 MOVK xd, #(addr >> 16), lsl #16
1170 MOVK xd, #(addr >> 32), lsl #32
1171 MOVK xd, #(addr >> 48), lsl #48 */
1172
1173static int
1174emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1175{
1176 uint32_t *p = buf;
1177
1178 /* The MOV (wide immediate) instruction clears to top bits of the
1179 register. */
1180 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1181
1182 if ((addr >> 16) != 0)
1183 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1184 else
1185 return p - buf;
1186
1187 if ((addr >> 32) != 0)
1188 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1189 else
1190 return p - buf;
1191
1192 if ((addr >> 48) != 0)
1193 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1194
1195 return p - buf;
1196}
1197
afbe19f8
PL
1198/* Write a SUBS instruction into *BUF.
1199
1200 SUBS rd, rn, rm
1201
1202 This instruction update the condition flags.
1203
1204 RD is the destination register.
1205 RN and RM are the source registers. */
1206
1207static int
1208emit_subs (uint32_t *buf, struct aarch64_register rd,
1209 struct aarch64_register rn, struct aarch64_operand operand)
1210{
1211 return emit_data_processing (buf, SUBS, rd, rn, operand);
1212}
1213
1214/* Write a CMP instruction into *BUF.
1215
1216 CMP rn, rm
1217
1218 This instruction is an alias of SUBS xzr, rn, rm.
1219
1220 RN and RM are the registers to compare. */
1221
1222static int
1223emit_cmp (uint32_t *buf, struct aarch64_register rn,
1224 struct aarch64_operand operand)
1225{
1226 return emit_subs (buf, xzr, rn, operand);
1227}
1228
1229/* Write a AND instruction into *BUF.
1230
1231 AND rd, rn, rm
1232
1233 RD is the destination register.
1234 RN and RM are the source registers. */
1235
1236static int
1237emit_and (uint32_t *buf, struct aarch64_register rd,
1238 struct aarch64_register rn, struct aarch64_register rm)
1239{
1240 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1241}
1242
1243/* Write a ORR instruction into *BUF.
1244
1245 ORR rd, rn, rm
1246
1247 RD is the destination register.
1248 RN and RM are the source registers. */
1249
1250static int
1251emit_orr (uint32_t *buf, struct aarch64_register rd,
1252 struct aarch64_register rn, struct aarch64_register rm)
1253{
1254 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1255}
1256
1257/* Write a ORN instruction into *BUF.
1258
1259 ORN rd, rn, rm
1260
1261 RD is the destination register.
1262 RN and RM are the source registers. */
1263
1264static int
1265emit_orn (uint32_t *buf, struct aarch64_register rd,
1266 struct aarch64_register rn, struct aarch64_register rm)
1267{
1268 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1269}
1270
1271/* Write a EOR instruction into *BUF.
1272
1273 EOR rd, rn, rm
1274
1275 RD is the destination register.
1276 RN and RM are the source registers. */
1277
1278static int
1279emit_eor (uint32_t *buf, struct aarch64_register rd,
1280 struct aarch64_register rn, struct aarch64_register rm)
1281{
1282 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1283}
1284
1285/* Write a MVN instruction into *BUF.
1286
1287 MVN rd, rm
1288
1289 This is an alias for ORN rd, xzr, rm.
1290
1291 RD is the destination register.
1292 RM is the source register. */
1293
1294static int
1295emit_mvn (uint32_t *buf, struct aarch64_register rd,
1296 struct aarch64_register rm)
1297{
1298 return emit_orn (buf, rd, xzr, rm);
1299}
1300
1301/* Write a LSLV instruction into *BUF.
1302
1303 LSLV rd, rn, rm
1304
1305 RD is the destination register.
1306 RN and RM are the source registers. */
1307
1308static int
1309emit_lslv (uint32_t *buf, struct aarch64_register rd,
1310 struct aarch64_register rn, struct aarch64_register rm)
1311{
1312 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1313}
1314
1315/* Write a LSRV instruction into *BUF.
1316
1317 LSRV rd, rn, rm
1318
1319 RD is the destination register.
1320 RN and RM are the source registers. */
1321
1322static int
1323emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1324 struct aarch64_register rn, struct aarch64_register rm)
1325{
1326 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1327}
1328
1329/* Write a ASRV instruction into *BUF.
1330
1331 ASRV rd, rn, rm
1332
1333 RD is the destination register.
1334 RN and RM are the source registers. */
1335
1336static int
1337emit_asrv (uint32_t *buf, struct aarch64_register rd,
1338 struct aarch64_register rn, struct aarch64_register rm)
1339{
1340 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1341}
1342
1343/* Write a MUL instruction into *BUF.
1344
1345 MUL rd, rn, rm
1346
1347 RD is the destination register.
1348 RN and RM are the source registers. */
1349
1350static int
1351emit_mul (uint32_t *buf, struct aarch64_register rd,
1352 struct aarch64_register rn, struct aarch64_register rm)
1353{
1354 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1355}
1356
bb903df0
PL
1357/* Write a MRS instruction into *BUF. The register size is 64-bit.
1358
1359 MRS xt, system_reg
1360
1361 RT is the destination register.
1362 SYSTEM_REG is special purpose register to read. */
1363
1364static int
1365emit_mrs (uint32_t *buf, struct aarch64_register rt,
1366 enum aarch64_system_control_registers system_reg)
1367{
e1c587c3
YQ
1368 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1369 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1370}
1371
1372/* Write a MSR instruction into *BUF. The register size is 64-bit.
1373
1374 MSR system_reg, xt
1375
1376 SYSTEM_REG is special purpose register to write.
1377 RT is the input register. */
1378
1379static int
1380emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1381 struct aarch64_register rt)
1382{
e1c587c3
YQ
1383 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1384 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1385}
1386
1387/* Write a SEVL instruction into *BUF.
1388
1389 This is a hint instruction telling the hardware to trigger an event. */
1390
1391static int
1392emit_sevl (uint32_t *buf)
1393{
e1c587c3 1394 return aarch64_emit_insn (buf, SEVL);
bb903df0
PL
1395}
1396
1397/* Write a WFE instruction into *BUF.
1398
1399 This is a hint instruction telling the hardware to wait for an event. */
1400
1401static int
1402emit_wfe (uint32_t *buf)
1403{
e1c587c3 1404 return aarch64_emit_insn (buf, WFE);
bb903df0
PL
1405}
1406
afbe19f8
PL
1407/* Write a SBFM instruction into *BUF.
1408
1409 SBFM rd, rn, #immr, #imms
1410
1411 This instruction moves the bits from #immr to #imms into the
1412 destination, sign extending the result.
1413
1414 RD is the destination register.
1415 RN is the source register.
1416 IMMR is the bit number to start at (least significant bit).
1417 IMMS is the bit number to stop at (most significant bit). */
1418
1419static int
1420emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1421 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1422{
1423 uint32_t size = ENCODE (rd.is64, 1, 31);
1424 uint32_t n = ENCODE (rd.is64, 1, 22);
1425
e1c587c3
YQ
1426 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1427 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1428 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1429}
1430
1431/* Write a SBFX instruction into *BUF.
1432
1433 SBFX rd, rn, #lsb, #width
1434
1435 This instruction moves #width bits from #lsb into the destination, sign
1436 extending the result. This is an alias for:
1437
1438 SBFM rd, rn, #lsb, #(lsb + width - 1)
1439
1440 RD is the destination register.
1441 RN is the source register.
1442 LSB is the bit number to start at (least significant bit).
1443 WIDTH is the number of bits to move. */
1444
1445static int
1446emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1447 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1448{
1449 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1450}
1451
1452/* Write a UBFM instruction into *BUF.
1453
1454 UBFM rd, rn, #immr, #imms
1455
1456 This instruction moves the bits from #immr to #imms into the
1457 destination, extending the result with zeros.
1458
1459 RD is the destination register.
1460 RN is the source register.
1461 IMMR is the bit number to start at (least significant bit).
1462 IMMS is the bit number to stop at (most significant bit). */
1463
1464static int
1465emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1466 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1467{
1468 uint32_t size = ENCODE (rd.is64, 1, 31);
1469 uint32_t n = ENCODE (rd.is64, 1, 22);
1470
e1c587c3
YQ
1471 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1472 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1473 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1474}
1475
1476/* Write a UBFX instruction into *BUF.
1477
1478 UBFX rd, rn, #lsb, #width
1479
1480 This instruction moves #width bits from #lsb into the destination,
1481 extending the result with zeros. This is an alias for:
1482
1483 UBFM rd, rn, #lsb, #(lsb + width - 1)
1484
1485 RD is the destination register.
1486 RN is the source register.
1487 LSB is the bit number to start at (least significant bit).
1488 WIDTH is the number of bits to move. */
1489
1490static int
1491emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1492 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1493{
1494 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1495}
1496
1497/* Write a CSINC instruction into *BUF.
1498
1499 CSINC rd, rn, rm, cond
1500
1501 This instruction conditionally increments rn or rm and places the result
1502 in rd. rn is chosen is the condition is true.
1503
1504 RD is the destination register.
1505 RN and RM are the source registers.
1506 COND is the encoded condition. */
1507
1508static int
1509emit_csinc (uint32_t *buf, struct aarch64_register rd,
1510 struct aarch64_register rn, struct aarch64_register rm,
1511 unsigned cond)
1512{
1513 uint32_t size = ENCODE (rd.is64, 1, 31);
1514
e1c587c3
YQ
1515 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1516 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1517 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1518}
1519
1520/* Write a CSET instruction into *BUF.
1521
1522 CSET rd, cond
1523
1524 This instruction conditionally write 1 or 0 in the destination register.
1525 1 is written if the condition is true. This is an alias for:
1526
1527 CSINC rd, xzr, xzr, !cond
1528
1529 Note that the condition needs to be inverted.
1530
1531 RD is the destination register.
1532 RN and RM are the source registers.
1533 COND is the encoded condition. */
1534
1535static int
1536emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1537{
1538 /* The least significant bit of the condition needs toggling in order to
1539 invert it. */
1540 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1541}
1542
bb903df0
PL
1543/* Write LEN instructions from BUF into the inferior memory at *TO.
1544
1545 Note instructions are always little endian on AArch64, unlike data. */
1546
1547static void
1548append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1549{
1550 size_t byte_len = len * sizeof (uint32_t);
1551#if (__BYTE_ORDER == __BIG_ENDIAN)
1552 uint32_t *le_buf = xmalloc (byte_len);
1553 size_t i;
1554
1555 for (i = 0; i < len; i++)
1556 le_buf[i] = htole32 (buf[i]);
1557
1558 write_inferior_memory (*to, (const unsigned char *) le_buf, byte_len);
1559
1560 xfree (le_buf);
1561#else
1562 write_inferior_memory (*to, (const unsigned char *) buf, byte_len);
1563#endif
1564
1565 *to += byte_len;
1566}
1567
0badd99f
YQ
1568/* Sub-class of struct aarch64_insn_data, store information of
1569 instruction relocation for fast tracepoint. Visitor can
1570 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1571 the relocated instructions in buffer pointed by INSN_PTR. */
bb903df0 1572
0badd99f
YQ
1573struct aarch64_insn_relocation_data
1574{
1575 struct aarch64_insn_data base;
1576
1577 /* The new address the instruction is relocated to. */
1578 CORE_ADDR new_addr;
1579 /* Pointer to the buffer of relocated instruction(s). */
1580 uint32_t *insn_ptr;
1581};
1582
1583/* Implementation of aarch64_insn_visitor method "b". */
1584
1585static void
1586aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1587 struct aarch64_insn_data *data)
1588{
1589 struct aarch64_insn_relocation_data *insn_reloc
1590 = (struct aarch64_insn_relocation_data *) data;
1591 int32_t new_offset
1592 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1593
1594 if (can_encode_int32 (new_offset, 28))
1595 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1596}
1597
1598/* Implementation of aarch64_insn_visitor method "b_cond". */
1599
1600static void
1601aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1602 struct aarch64_insn_data *data)
1603{
1604 struct aarch64_insn_relocation_data *insn_reloc
1605 = (struct aarch64_insn_relocation_data *) data;
1606 int32_t new_offset
1607 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1608
1609 if (can_encode_int32 (new_offset, 21))
1610 {
1611 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1612 new_offset);
bb903df0 1613 }
0badd99f 1614 else if (can_encode_int32 (new_offset, 28))
bb903df0 1615 {
0badd99f
YQ
1616 /* The offset is out of range for a conditional branch
1617 instruction but not for a unconditional branch. We can use
1618 the following instructions instead:
bb903df0 1619
0badd99f
YQ
1620 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1621 B NOT_TAKEN ; Else jump over TAKEN and continue.
1622 TAKEN:
1623 B #(offset - 8)
1624 NOT_TAKEN:
1625
1626 */
1627
1628 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1629 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1630 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
bb903df0 1631 }
0badd99f 1632}
bb903df0 1633
0badd99f
YQ
1634/* Implementation of aarch64_insn_visitor method "cb". */
1635
1636static void
1637aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1638 const unsigned rn, int is64,
1639 struct aarch64_insn_data *data)
1640{
1641 struct aarch64_insn_relocation_data *insn_reloc
1642 = (struct aarch64_insn_relocation_data *) data;
1643 int32_t new_offset
1644 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1645
1646 if (can_encode_int32 (new_offset, 21))
1647 {
1648 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1649 aarch64_register (rn, is64), new_offset);
bb903df0 1650 }
0badd99f 1651 else if (can_encode_int32 (new_offset, 28))
bb903df0 1652 {
0badd99f
YQ
1653 /* The offset is out of range for a compare and branch
1654 instruction but not for a unconditional branch. We can use
1655 the following instructions instead:
1656
1657 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1658 B NOT_TAKEN ; Else jump over TAKEN and continue.
1659 TAKEN:
1660 B #(offset - 8)
1661 NOT_TAKEN:
1662
1663 */
1664 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1665 aarch64_register (rn, is64), 8);
1666 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1667 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1668 }
1669}
bb903df0 1670
0badd99f 1671/* Implementation of aarch64_insn_visitor method "tb". */
bb903df0 1672
0badd99f
YQ
1673static void
1674aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1675 const unsigned rt, unsigned bit,
1676 struct aarch64_insn_data *data)
1677{
1678 struct aarch64_insn_relocation_data *insn_reloc
1679 = (struct aarch64_insn_relocation_data *) data;
1680 int32_t new_offset
1681 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1682
1683 if (can_encode_int32 (new_offset, 16))
1684 {
1685 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1686 aarch64_register (rt, 1), new_offset);
bb903df0 1687 }
0badd99f 1688 else if (can_encode_int32 (new_offset, 28))
bb903df0 1689 {
0badd99f
YQ
1690 /* The offset is out of range for a test bit and branch
1691 instruction but not for a unconditional branch. We can use
1692 the following instructions instead:
1693
1694 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1695 B NOT_TAKEN ; Else jump over TAKEN and continue.
1696 TAKEN:
1697 B #(offset - 8)
1698 NOT_TAKEN:
1699
1700 */
1701 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1702 aarch64_register (rt, 1), 8);
1703 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1704 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1705 new_offset - 8);
1706 }
1707}
bb903df0 1708
0badd99f 1709/* Implementation of aarch64_insn_visitor method "adr". */
bb903df0 1710
0badd99f
YQ
1711static void
1712aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1713 const int is_adrp,
1714 struct aarch64_insn_data *data)
1715{
1716 struct aarch64_insn_relocation_data *insn_reloc
1717 = (struct aarch64_insn_relocation_data *) data;
1718 /* We know exactly the address the ADR{P,} instruction will compute.
1719 We can just write it to the destination register. */
1720 CORE_ADDR address = data->insn_addr + offset;
bb903df0 1721
0badd99f
YQ
1722 if (is_adrp)
1723 {
1724 /* Clear the lower 12 bits of the offset to get the 4K page. */
1725 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1726 aarch64_register (rd, 1),
1727 address & ~0xfff);
1728 }
1729 else
1730 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1731 aarch64_register (rd, 1), address);
1732}
bb903df0 1733
0badd99f 1734/* Implementation of aarch64_insn_visitor method "ldr_literal". */
bb903df0 1735
0badd99f
YQ
1736static void
1737aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1738 const unsigned rt, const int is64,
1739 struct aarch64_insn_data *data)
1740{
1741 struct aarch64_insn_relocation_data *insn_reloc
1742 = (struct aarch64_insn_relocation_data *) data;
1743 CORE_ADDR address = data->insn_addr + offset;
1744
1745 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1746 aarch64_register (rt, 1), address);
1747
1748 /* We know exactly what address to load from, and what register we
1749 can use:
1750
1751 MOV xd, #(oldloc + offset)
1752 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1753 ...
1754
1755 LDR xd, [xd] ; or LDRSW xd, [xd]
1756
1757 */
1758
1759 if (is_sw)
1760 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1761 aarch64_register (rt, 1),
1762 aarch64_register (rt, 1),
1763 offset_memory_operand (0));
bb903df0 1764 else
0badd99f
YQ
1765 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1766 aarch64_register (rt, is64),
1767 aarch64_register (rt, 1),
1768 offset_memory_operand (0));
1769}
1770
1771/* Implementation of aarch64_insn_visitor method "others". */
1772
1773static void
1774aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1775 struct aarch64_insn_data *data)
1776{
1777 struct aarch64_insn_relocation_data *insn_reloc
1778 = (struct aarch64_insn_relocation_data *) data;
bb903df0 1779
0badd99f
YQ
1780 /* The instruction is not PC relative. Just re-emit it at the new
1781 location. */
e1c587c3 1782 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
0badd99f
YQ
1783}
1784
1785static const struct aarch64_insn_visitor visitor =
1786{
1787 aarch64_ftrace_insn_reloc_b,
1788 aarch64_ftrace_insn_reloc_b_cond,
1789 aarch64_ftrace_insn_reloc_cb,
1790 aarch64_ftrace_insn_reloc_tb,
1791 aarch64_ftrace_insn_reloc_adr,
1792 aarch64_ftrace_insn_reloc_ldr_literal,
1793 aarch64_ftrace_insn_reloc_others,
1794};
1795
bb903df0
PL
1796/* Implementation of linux_target_ops method
1797 "install_fast_tracepoint_jump_pad". */
1798
1799static int
1800aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1801 CORE_ADDR tpaddr,
1802 CORE_ADDR collector,
1803 CORE_ADDR lockaddr,
1804 ULONGEST orig_size,
1805 CORE_ADDR *jump_entry,
1806 CORE_ADDR *trampoline,
1807 ULONGEST *trampoline_size,
1808 unsigned char *jjump_pad_insn,
1809 ULONGEST *jjump_pad_insn_size,
1810 CORE_ADDR *adjusted_insn_addr,
1811 CORE_ADDR *adjusted_insn_addr_end,
1812 char *err)
1813{
1814 uint32_t buf[256];
1815 uint32_t *p = buf;
1816 int32_t offset;
1817 int i;
70b439f0 1818 uint32_t insn;
bb903df0 1819 CORE_ADDR buildaddr = *jump_entry;
0badd99f 1820 struct aarch64_insn_relocation_data insn_data;
bb903df0
PL
1821
1822 /* We need to save the current state on the stack both to restore it
1823 later and to collect register values when the tracepoint is hit.
1824
1825 The saved registers are pushed in a layout that needs to be in sync
1826 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1827 the supply_fast_tracepoint_registers function will fill in the
1828 register cache from a pointer to saved registers on the stack we build
1829 here.
1830
1831 For simplicity, we set the size of each cell on the stack to 16 bytes.
1832 This way one cell can hold any register type, from system registers
1833 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1834 has to be 16 bytes aligned anyway.
1835
1836 Note that the CPSR register does not exist on AArch64. Instead we
1837 can access system bits describing the process state with the
1838 MRS/MSR instructions, namely the condition flags. We save them as
1839 if they are part of a CPSR register because that's how GDB
1840 interprets these system bits. At the moment, only the condition
1841 flags are saved in CPSR (NZCV).
1842
1843 Stack layout, each cell is 16 bytes (descending):
1844
1845 High *-------- SIMD&FP registers from 31 down to 0. --------*
1846 | q31 |
1847 . .
1848 . . 32 cells
1849 . .
1850 | q0 |
1851 *---- General purpose registers from 30 down to 0. ----*
1852 | x30 |
1853 . .
1854 . . 31 cells
1855 . .
1856 | x0 |
1857 *------------- Special purpose registers. -------------*
1858 | SP |
1859 | PC |
1860 | CPSR (NZCV) | 5 cells
1861 | FPSR |
1862 | FPCR | <- SP + 16
1863 *------------- collecting_t object --------------------*
1864 | TPIDR_EL0 | struct tracepoint * |
1865 Low *------------------------------------------------------*
1866
1867 After this stack is set up, we issue a call to the collector, passing
1868 it the saved registers at (SP + 16). */
1869
1870 /* Push SIMD&FP registers on the stack:
1871
1872 SUB sp, sp, #(32 * 16)
1873
1874 STP q30, q31, [sp, #(30 * 16)]
1875 ...
1876 STP q0, q1, [sp]
1877
1878 */
1879 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
1880 for (i = 30; i >= 0; i -= 2)
1881 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
1882
1883 /* Push general puspose registers on the stack. Note that we do not need
1884 to push x31 as it represents the xzr register and not the stack
1885 pointer in a STR instruction.
1886
1887 SUB sp, sp, #(31 * 16)
1888
1889 STR x30, [sp, #(30 * 16)]
1890 ...
1891 STR x0, [sp]
1892
1893 */
1894 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
1895 for (i = 30; i >= 0; i -= 1)
1896 p += emit_str (p, aarch64_register (i, 1), sp,
1897 offset_memory_operand (i * 16));
1898
1899 /* Make space for 5 more cells.
1900
1901 SUB sp, sp, #(5 * 16)
1902
1903 */
1904 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
1905
1906
1907 /* Save SP:
1908
1909 ADD x4, sp, #((32 + 31 + 5) * 16)
1910 STR x4, [sp, #(4 * 16)]
1911
1912 */
1913 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
1914 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
1915
1916 /* Save PC (tracepoint address):
1917
1918 MOV x3, #(tpaddr)
1919 ...
1920
1921 STR x3, [sp, #(3 * 16)]
1922
1923 */
1924
1925 p += emit_mov_addr (p, x3, tpaddr);
1926 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
1927
1928 /* Save CPSR (NZCV), FPSR and FPCR:
1929
1930 MRS x2, nzcv
1931 MRS x1, fpsr
1932 MRS x0, fpcr
1933
1934 STR x2, [sp, #(2 * 16)]
1935 STR x1, [sp, #(1 * 16)]
1936 STR x0, [sp, #(0 * 16)]
1937
1938 */
1939 p += emit_mrs (p, x2, NZCV);
1940 p += emit_mrs (p, x1, FPSR);
1941 p += emit_mrs (p, x0, FPCR);
1942 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
1943 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
1944 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
1945
1946 /* Push the collecting_t object. It consist of the address of the
1947 tracepoint and an ID for the current thread. We get the latter by
1948 reading the tpidr_el0 system register. It corresponds to the
1949 NT_ARM_TLS register accessible with ptrace.
1950
1951 MOV x0, #(tpoint)
1952 ...
1953
1954 MRS x1, tpidr_el0
1955
1956 STP x0, x1, [sp, #-16]!
1957
1958 */
1959
1960 p += emit_mov_addr (p, x0, tpoint);
1961 p += emit_mrs (p, x1, TPIDR_EL0);
1962 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
1963
1964 /* Spin-lock:
1965
1966 The shared memory for the lock is at lockaddr. It will hold zero
1967 if no-one is holding the lock, otherwise it contains the address of
1968 the collecting_t object on the stack of the thread which acquired it.
1969
1970 At this stage, the stack pointer points to this thread's collecting_t
1971 object.
1972
1973 We use the following registers:
1974 - x0: Address of the lock.
1975 - x1: Pointer to collecting_t object.
1976 - x2: Scratch register.
1977
1978 MOV x0, #(lockaddr)
1979 ...
1980 MOV x1, sp
1981
1982 ; Trigger an event local to this core. So the following WFE
1983 ; instruction is ignored.
1984 SEVL
1985 again:
1986 ; Wait for an event. The event is triggered by either the SEVL
1987 ; or STLR instructions (store release).
1988 WFE
1989
1990 ; Atomically read at lockaddr. This marks the memory location as
1991 ; exclusive. This instruction also has memory constraints which
1992 ; make sure all previous data reads and writes are done before
1993 ; executing it.
1994 LDAXR x2, [x0]
1995
1996 ; Try again if another thread holds the lock.
1997 CBNZ x2, again
1998
1999 ; We can lock it! Write the address of the collecting_t object.
2000 ; This instruction will fail if the memory location is not marked
2001 ; as exclusive anymore. If it succeeds, it will remove the
2002 ; exclusive mark on the memory location. This way, if another
2003 ; thread executes this instruction before us, we will fail and try
2004 ; all over again.
2005 STXR w2, x1, [x0]
2006 CBNZ w2, again
2007
2008 */
2009
2010 p += emit_mov_addr (p, x0, lockaddr);
2011 p += emit_mov (p, x1, register_operand (sp));
2012
2013 p += emit_sevl (p);
2014 p += emit_wfe (p);
2015 p += emit_ldaxr (p, x2, x0);
2016 p += emit_cb (p, 1, w2, -2 * 4);
2017 p += emit_stxr (p, w2, x1, x0);
2018 p += emit_cb (p, 1, x2, -4 * 4);
2019
2020 /* Call collector (struct tracepoint *, unsigned char *):
2021
2022 MOV x0, #(tpoint)
2023 ...
2024
2025 ; Saved registers start after the collecting_t object.
2026 ADD x1, sp, #16
2027
2028 ; We use an intra-procedure-call scratch register.
2029 MOV ip0, #(collector)
2030 ...
2031
2032 ; And call back to C!
2033 BLR ip0
2034
2035 */
2036
2037 p += emit_mov_addr (p, x0, tpoint);
2038 p += emit_add (p, x1, sp, immediate_operand (16));
2039
2040 p += emit_mov_addr (p, ip0, collector);
2041 p += emit_blr (p, ip0);
2042
2043 /* Release the lock.
2044
2045 MOV x0, #(lockaddr)
2046 ...
2047
2048 ; This instruction is a normal store with memory ordering
2049 ; constraints. Thanks to this we do not have to put a data
2050 ; barrier instruction to make sure all data read and writes are done
2051 ; before this instruction is executed. Furthermore, this instrucion
2052 ; will trigger an event, letting other threads know they can grab
2053 ; the lock.
2054 STLR xzr, [x0]
2055
2056 */
2057 p += emit_mov_addr (p, x0, lockaddr);
2058 p += emit_stlr (p, xzr, x0);
2059
2060 /* Free collecting_t object:
2061
2062 ADD sp, sp, #16
2063
2064 */
2065 p += emit_add (p, sp, sp, immediate_operand (16));
2066
2067 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2068 registers from the stack.
2069
2070 LDR x2, [sp, #(2 * 16)]
2071 LDR x1, [sp, #(1 * 16)]
2072 LDR x0, [sp, #(0 * 16)]
2073
2074 MSR NZCV, x2
2075 MSR FPSR, x1
2076 MSR FPCR, x0
2077
2078 ADD sp, sp #(5 * 16)
2079
2080 */
2081 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2082 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2083 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2084 p += emit_msr (p, NZCV, x2);
2085 p += emit_msr (p, FPSR, x1);
2086 p += emit_msr (p, FPCR, x0);
2087
2088 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2089
2090 /* Pop general purpose registers:
2091
2092 LDR x0, [sp]
2093 ...
2094 LDR x30, [sp, #(30 * 16)]
2095
2096 ADD sp, sp, #(31 * 16)
2097
2098 */
2099 for (i = 0; i <= 30; i += 1)
2100 p += emit_ldr (p, aarch64_register (i, 1), sp,
2101 offset_memory_operand (i * 16));
2102 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2103
2104 /* Pop SIMD&FP registers:
2105
2106 LDP q0, q1, [sp]
2107 ...
2108 LDP q30, q31, [sp, #(30 * 16)]
2109
2110 ADD sp, sp, #(32 * 16)
2111
2112 */
2113 for (i = 0; i <= 30; i += 2)
2114 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2115 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2116
2117 /* Write the code into the inferior memory. */
2118 append_insns (&buildaddr, p - buf, buf);
2119
2120 /* Now emit the relocated instruction. */
2121 *adjusted_insn_addr = buildaddr;
70b439f0 2122 target_read_uint32 (tpaddr, &insn);
0badd99f
YQ
2123
2124 insn_data.base.insn_addr = tpaddr;
2125 insn_data.new_addr = buildaddr;
2126 insn_data.insn_ptr = buf;
2127
2128 aarch64_relocate_instruction (insn, &visitor,
2129 (struct aarch64_insn_data *) &insn_data);
2130
bb903df0 2131 /* We may not have been able to relocate the instruction. */
0badd99f 2132 if (insn_data.insn_ptr == buf)
bb903df0
PL
2133 {
2134 sprintf (err,
2135 "E.Could not relocate instruction from %s to %s.",
2136 core_addr_to_string_nz (tpaddr),
2137 core_addr_to_string_nz (buildaddr));
2138 return 1;
2139 }
dfaffe9d 2140 else
0badd99f 2141 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
dfaffe9d 2142 *adjusted_insn_addr_end = buildaddr;
bb903df0
PL
2143
2144 /* Go back to the start of the buffer. */
2145 p = buf;
2146
2147 /* Emit a branch back from the jump pad. */
2148 offset = (tpaddr + orig_size - buildaddr);
2149 if (!can_encode_int32 (offset, 28))
2150 {
2151 sprintf (err,
2152 "E.Jump back from jump pad too far from tracepoint "
2153 "(offset 0x%" PRIx32 " cannot be encoded in 28 bits).",
2154 offset);
2155 return 1;
2156 }
2157
2158 p += emit_b (p, 0, offset);
2159 append_insns (&buildaddr, p - buf, buf);
2160
2161 /* Give the caller a branch instruction into the jump pad. */
2162 offset = (*jump_entry - tpaddr);
2163 if (!can_encode_int32 (offset, 28))
2164 {
2165 sprintf (err,
2166 "E.Jump pad too far from tracepoint "
2167 "(offset 0x%" PRIx32 " cannot be encoded in 28 bits).",
2168 offset);
2169 return 1;
2170 }
2171
2172 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2173 *jjump_pad_insn_size = 4;
2174
2175 /* Return the end address of our pad. */
2176 *jump_entry = buildaddr;
2177
2178 return 0;
2179}
2180
afbe19f8
PL
2181/* Helper function writing LEN instructions from START into
2182 current_insn_ptr. */
2183
2184static void
2185emit_ops_insns (const uint32_t *start, int len)
2186{
2187 CORE_ADDR buildaddr = current_insn_ptr;
2188
2189 if (debug_threads)
2190 debug_printf ("Adding %d instrucions at %s\n",
2191 len, paddress (buildaddr));
2192
2193 append_insns (&buildaddr, len, start);
2194 current_insn_ptr = buildaddr;
2195}
2196
2197/* Pop a register from the stack. */
2198
2199static int
2200emit_pop (uint32_t *buf, struct aarch64_register rt)
2201{
2202 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2203}
2204
2205/* Push a register on the stack. */
2206
2207static int
2208emit_push (uint32_t *buf, struct aarch64_register rt)
2209{
2210 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2211}
2212
2213/* Implementation of emit_ops method "emit_prologue". */
2214
2215static void
2216aarch64_emit_prologue (void)
2217{
2218 uint32_t buf[16];
2219 uint32_t *p = buf;
2220
2221 /* This function emit a prologue for the following function prototype:
2222
2223 enum eval_result_type f (unsigned char *regs,
2224 ULONGEST *value);
2225
2226 The first argument is a buffer of raw registers. The second
2227 argument is the result of
2228 evaluating the expression, which will be set to whatever is on top of
2229 the stack at the end.
2230
2231 The stack set up by the prologue is as such:
2232
2233 High *------------------------------------------------------*
2234 | LR |
2235 | FP | <- FP
2236 | x1 (ULONGEST *value) |
2237 | x0 (unsigned char *regs) |
2238 Low *------------------------------------------------------*
2239
2240 As we are implementing a stack machine, each opcode can expand the
2241 stack so we never know how far we are from the data saved by this
2242 prologue. In order to be able refer to value and regs later, we save
2243 the current stack pointer in the frame pointer. This way, it is not
2244 clobbered when calling C functions.
2245
2246 Finally, throughtout every operation, we are using register x0 as the
2247 top of the stack, and x1 as a scratch register. */
2248
2249 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2250 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2251 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2252
2253 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2254
2255
2256 emit_ops_insns (buf, p - buf);
2257}
2258
2259/* Implementation of emit_ops method "emit_epilogue". */
2260
2261static void
2262aarch64_emit_epilogue (void)
2263{
2264 uint32_t buf[16];
2265 uint32_t *p = buf;
2266
2267 /* Store the result of the expression (x0) in *value. */
2268 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2269 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2270 p += emit_str (p, x0, x1, offset_memory_operand (0));
2271
2272 /* Restore the previous state. */
2273 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2274 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2275
2276 /* Return expr_eval_no_error. */
2277 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2278 p += emit_ret (p, lr);
2279
2280 emit_ops_insns (buf, p - buf);
2281}
2282
2283/* Implementation of emit_ops method "emit_add". */
2284
2285static void
2286aarch64_emit_add (void)
2287{
2288 uint32_t buf[16];
2289 uint32_t *p = buf;
2290
2291 p += emit_pop (p, x1);
2292 p += emit_add (p, x0, x0, register_operand (x1));
2293
2294 emit_ops_insns (buf, p - buf);
2295}
2296
2297/* Implementation of emit_ops method "emit_sub". */
2298
2299static void
2300aarch64_emit_sub (void)
2301{
2302 uint32_t buf[16];
2303 uint32_t *p = buf;
2304
2305 p += emit_pop (p, x1);
2306 p += emit_sub (p, x0, x0, register_operand (x1));
2307
2308 emit_ops_insns (buf, p - buf);
2309}
2310
2311/* Implementation of emit_ops method "emit_mul". */
2312
2313static void
2314aarch64_emit_mul (void)
2315{
2316 uint32_t buf[16];
2317 uint32_t *p = buf;
2318
2319 p += emit_pop (p, x1);
2320 p += emit_mul (p, x0, x1, x0);
2321
2322 emit_ops_insns (buf, p - buf);
2323}
2324
2325/* Implementation of emit_ops method "emit_lsh". */
2326
2327static void
2328aarch64_emit_lsh (void)
2329{
2330 uint32_t buf[16];
2331 uint32_t *p = buf;
2332
2333 p += emit_pop (p, x1);
2334 p += emit_lslv (p, x0, x1, x0);
2335
2336 emit_ops_insns (buf, p - buf);
2337}
2338
2339/* Implementation of emit_ops method "emit_rsh_signed". */
2340
2341static void
2342aarch64_emit_rsh_signed (void)
2343{
2344 uint32_t buf[16];
2345 uint32_t *p = buf;
2346
2347 p += emit_pop (p, x1);
2348 p += emit_asrv (p, x0, x1, x0);
2349
2350 emit_ops_insns (buf, p - buf);
2351}
2352
2353/* Implementation of emit_ops method "emit_rsh_unsigned". */
2354
2355static void
2356aarch64_emit_rsh_unsigned (void)
2357{
2358 uint32_t buf[16];
2359 uint32_t *p = buf;
2360
2361 p += emit_pop (p, x1);
2362 p += emit_lsrv (p, x0, x1, x0);
2363
2364 emit_ops_insns (buf, p - buf);
2365}
2366
2367/* Implementation of emit_ops method "emit_ext". */
2368
2369static void
2370aarch64_emit_ext (int arg)
2371{
2372 uint32_t buf[16];
2373 uint32_t *p = buf;
2374
2375 p += emit_sbfx (p, x0, x0, 0, arg);
2376
2377 emit_ops_insns (buf, p - buf);
2378}
2379
2380/* Implementation of emit_ops method "emit_log_not". */
2381
2382static void
2383aarch64_emit_log_not (void)
2384{
2385 uint32_t buf[16];
2386 uint32_t *p = buf;
2387
2388 /* If the top of the stack is 0, replace it with 1. Else replace it with
2389 0. */
2390
2391 p += emit_cmp (p, x0, immediate_operand (0));
2392 p += emit_cset (p, x0, EQ);
2393
2394 emit_ops_insns (buf, p - buf);
2395}
2396
2397/* Implementation of emit_ops method "emit_bit_and". */
2398
2399static void
2400aarch64_emit_bit_and (void)
2401{
2402 uint32_t buf[16];
2403 uint32_t *p = buf;
2404
2405 p += emit_pop (p, x1);
2406 p += emit_and (p, x0, x0, x1);
2407
2408 emit_ops_insns (buf, p - buf);
2409}
2410
2411/* Implementation of emit_ops method "emit_bit_or". */
2412
2413static void
2414aarch64_emit_bit_or (void)
2415{
2416 uint32_t buf[16];
2417 uint32_t *p = buf;
2418
2419 p += emit_pop (p, x1);
2420 p += emit_orr (p, x0, x0, x1);
2421
2422 emit_ops_insns (buf, p - buf);
2423}
2424
2425/* Implementation of emit_ops method "emit_bit_xor". */
2426
2427static void
2428aarch64_emit_bit_xor (void)
2429{
2430 uint32_t buf[16];
2431 uint32_t *p = buf;
2432
2433 p += emit_pop (p, x1);
2434 p += emit_eor (p, x0, x0, x1);
2435
2436 emit_ops_insns (buf, p - buf);
2437}
2438
2439/* Implementation of emit_ops method "emit_bit_not". */
2440
2441static void
2442aarch64_emit_bit_not (void)
2443{
2444 uint32_t buf[16];
2445 uint32_t *p = buf;
2446
2447 p += emit_mvn (p, x0, x0);
2448
2449 emit_ops_insns (buf, p - buf);
2450}
2451
2452/* Implementation of emit_ops method "emit_equal". */
2453
2454static void
2455aarch64_emit_equal (void)
2456{
2457 uint32_t buf[16];
2458 uint32_t *p = buf;
2459
2460 p += emit_pop (p, x1);
2461 p += emit_cmp (p, x0, register_operand (x1));
2462 p += emit_cset (p, x0, EQ);
2463
2464 emit_ops_insns (buf, p - buf);
2465}
2466
2467/* Implementation of emit_ops method "emit_less_signed". */
2468
2469static void
2470aarch64_emit_less_signed (void)
2471{
2472 uint32_t buf[16];
2473 uint32_t *p = buf;
2474
2475 p += emit_pop (p, x1);
2476 p += emit_cmp (p, x1, register_operand (x0));
2477 p += emit_cset (p, x0, LT);
2478
2479 emit_ops_insns (buf, p - buf);
2480}
2481
2482/* Implementation of emit_ops method "emit_less_unsigned". */
2483
2484static void
2485aarch64_emit_less_unsigned (void)
2486{
2487 uint32_t buf[16];
2488 uint32_t *p = buf;
2489
2490 p += emit_pop (p, x1);
2491 p += emit_cmp (p, x1, register_operand (x0));
2492 p += emit_cset (p, x0, LO);
2493
2494 emit_ops_insns (buf, p - buf);
2495}
2496
2497/* Implementation of emit_ops method "emit_ref". */
2498
2499static void
2500aarch64_emit_ref (int size)
2501{
2502 uint32_t buf[16];
2503 uint32_t *p = buf;
2504
2505 switch (size)
2506 {
2507 case 1:
2508 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2509 break;
2510 case 2:
2511 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2512 break;
2513 case 4:
2514 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2515 break;
2516 case 8:
2517 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2518 break;
2519 default:
2520 /* Unknown size, bail on compilation. */
2521 emit_error = 1;
2522 break;
2523 }
2524
2525 emit_ops_insns (buf, p - buf);
2526}
2527
2528/* Implementation of emit_ops method "emit_if_goto". */
2529
2530static void
2531aarch64_emit_if_goto (int *offset_p, int *size_p)
2532{
2533 uint32_t buf[16];
2534 uint32_t *p = buf;
2535
2536 /* The Z flag is set or cleared here. */
2537 p += emit_cmp (p, x0, immediate_operand (0));
2538 /* This instruction must not change the Z flag. */
2539 p += emit_pop (p, x0);
2540 /* Branch over the next instruction if x0 == 0. */
2541 p += emit_bcond (p, EQ, 8);
2542
2543 /* The NOP instruction will be patched with an unconditional branch. */
2544 if (offset_p)
2545 *offset_p = (p - buf) * 4;
2546 if (size_p)
2547 *size_p = 4;
2548 p += emit_nop (p);
2549
2550 emit_ops_insns (buf, p - buf);
2551}
2552
2553/* Implementation of emit_ops method "emit_goto". */
2554
2555static void
2556aarch64_emit_goto (int *offset_p, int *size_p)
2557{
2558 uint32_t buf[16];
2559 uint32_t *p = buf;
2560
2561 /* The NOP instruction will be patched with an unconditional branch. */
2562 if (offset_p)
2563 *offset_p = 0;
2564 if (size_p)
2565 *size_p = 4;
2566 p += emit_nop (p);
2567
2568 emit_ops_insns (buf, p - buf);
2569}
2570
2571/* Implementation of emit_ops method "write_goto_address". */
2572
2573void
2574aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2575{
2576 uint32_t insn;
2577
2578 emit_b (&insn, 0, to - from);
2579 append_insns (&from, 1, &insn);
2580}
2581
2582/* Implementation of emit_ops method "emit_const". */
2583
2584static void
2585aarch64_emit_const (LONGEST num)
2586{
2587 uint32_t buf[16];
2588 uint32_t *p = buf;
2589
2590 p += emit_mov_addr (p, x0, num);
2591
2592 emit_ops_insns (buf, p - buf);
2593}
2594
2595/* Implementation of emit_ops method "emit_call". */
2596
2597static void
2598aarch64_emit_call (CORE_ADDR fn)
2599{
2600 uint32_t buf[16];
2601 uint32_t *p = buf;
2602
2603 p += emit_mov_addr (p, ip0, fn);
2604 p += emit_blr (p, ip0);
2605
2606 emit_ops_insns (buf, p - buf);
2607}
2608
2609/* Implementation of emit_ops method "emit_reg". */
2610
2611static void
2612aarch64_emit_reg (int reg)
2613{
2614 uint32_t buf[16];
2615 uint32_t *p = buf;
2616
2617 /* Set x0 to unsigned char *regs. */
2618 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2619 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2620 p += emit_mov (p, x1, immediate_operand (reg));
2621
2622 emit_ops_insns (buf, p - buf);
2623
2624 aarch64_emit_call (get_raw_reg_func_addr ());
2625}
2626
2627/* Implementation of emit_ops method "emit_pop". */
2628
2629static void
2630aarch64_emit_pop (void)
2631{
2632 uint32_t buf[16];
2633 uint32_t *p = buf;
2634
2635 p += emit_pop (p, x0);
2636
2637 emit_ops_insns (buf, p - buf);
2638}
2639
2640/* Implementation of emit_ops method "emit_stack_flush". */
2641
2642static void
2643aarch64_emit_stack_flush (void)
2644{
2645 uint32_t buf[16];
2646 uint32_t *p = buf;
2647
2648 p += emit_push (p, x0);
2649
2650 emit_ops_insns (buf, p - buf);
2651}
2652
2653/* Implementation of emit_ops method "emit_zero_ext". */
2654
2655static void
2656aarch64_emit_zero_ext (int arg)
2657{
2658 uint32_t buf[16];
2659 uint32_t *p = buf;
2660
2661 p += emit_ubfx (p, x0, x0, 0, arg);
2662
2663 emit_ops_insns (buf, p - buf);
2664}
2665
2666/* Implementation of emit_ops method "emit_swap". */
2667
2668static void
2669aarch64_emit_swap (void)
2670{
2671 uint32_t buf[16];
2672 uint32_t *p = buf;
2673
2674 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2675 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2676 p += emit_mov (p, x0, register_operand (x1));
2677
2678 emit_ops_insns (buf, p - buf);
2679}
2680
2681/* Implementation of emit_ops method "emit_stack_adjust". */
2682
2683static void
2684aarch64_emit_stack_adjust (int n)
2685{
2686 /* This is not needed with our design. */
2687 uint32_t buf[16];
2688 uint32_t *p = buf;
2689
2690 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2691
2692 emit_ops_insns (buf, p - buf);
2693}
2694
2695/* Implementation of emit_ops method "emit_int_call_1". */
2696
2697static void
2698aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2699{
2700 uint32_t buf[16];
2701 uint32_t *p = buf;
2702
2703 p += emit_mov (p, x0, immediate_operand (arg1));
2704
2705 emit_ops_insns (buf, p - buf);
2706
2707 aarch64_emit_call (fn);
2708}
2709
2710/* Implementation of emit_ops method "emit_void_call_2". */
2711
2712static void
2713aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2714{
2715 uint32_t buf[16];
2716 uint32_t *p = buf;
2717
2718 /* Push x0 on the stack. */
2719 aarch64_emit_stack_flush ();
2720
2721 /* Setup arguments for the function call:
2722
2723 x0: arg1
2724 x1: top of the stack
2725
2726 MOV x1, x0
2727 MOV x0, #arg1 */
2728
2729 p += emit_mov (p, x1, register_operand (x0));
2730 p += emit_mov (p, x0, immediate_operand (arg1));
2731
2732 emit_ops_insns (buf, p - buf);
2733
2734 aarch64_emit_call (fn);
2735
2736 /* Restore x0. */
2737 aarch64_emit_pop ();
2738}
2739
2740/* Implementation of emit_ops method "emit_eq_goto". */
2741
2742static void
2743aarch64_emit_eq_goto (int *offset_p, int *size_p)
2744{
2745 uint32_t buf[16];
2746 uint32_t *p = buf;
2747
2748 p += emit_pop (p, x1);
2749 p += emit_cmp (p, x1, register_operand (x0));
2750 /* Branch over the next instruction if x0 != x1. */
2751 p += emit_bcond (p, NE, 8);
2752 /* The NOP instruction will be patched with an unconditional branch. */
2753 if (offset_p)
2754 *offset_p = (p - buf) * 4;
2755 if (size_p)
2756 *size_p = 4;
2757 p += emit_nop (p);
2758
2759 emit_ops_insns (buf, p - buf);
2760}
2761
2762/* Implementation of emit_ops method "emit_ne_goto". */
2763
2764static void
2765aarch64_emit_ne_goto (int *offset_p, int *size_p)
2766{
2767 uint32_t buf[16];
2768 uint32_t *p = buf;
2769
2770 p += emit_pop (p, x1);
2771 p += emit_cmp (p, x1, register_operand (x0));
2772 /* Branch over the next instruction if x0 == x1. */
2773 p += emit_bcond (p, EQ, 8);
2774 /* The NOP instruction will be patched with an unconditional branch. */
2775 if (offset_p)
2776 *offset_p = (p - buf) * 4;
2777 if (size_p)
2778 *size_p = 4;
2779 p += emit_nop (p);
2780
2781 emit_ops_insns (buf, p - buf);
2782}
2783
2784/* Implementation of emit_ops method "emit_lt_goto". */
2785
2786static void
2787aarch64_emit_lt_goto (int *offset_p, int *size_p)
2788{
2789 uint32_t buf[16];
2790 uint32_t *p = buf;
2791
2792 p += emit_pop (p, x1);
2793 p += emit_cmp (p, x1, register_operand (x0));
2794 /* Branch over the next instruction if x0 >= x1. */
2795 p += emit_bcond (p, GE, 8);
2796 /* The NOP instruction will be patched with an unconditional branch. */
2797 if (offset_p)
2798 *offset_p = (p - buf) * 4;
2799 if (size_p)
2800 *size_p = 4;
2801 p += emit_nop (p);
2802
2803 emit_ops_insns (buf, p - buf);
2804}
2805
2806/* Implementation of emit_ops method "emit_le_goto". */
2807
2808static void
2809aarch64_emit_le_goto (int *offset_p, int *size_p)
2810{
2811 uint32_t buf[16];
2812 uint32_t *p = buf;
2813
2814 p += emit_pop (p, x1);
2815 p += emit_cmp (p, x1, register_operand (x0));
2816 /* Branch over the next instruction if x0 > x1. */
2817 p += emit_bcond (p, GT, 8);
2818 /* The NOP instruction will be patched with an unconditional branch. */
2819 if (offset_p)
2820 *offset_p = (p - buf) * 4;
2821 if (size_p)
2822 *size_p = 4;
2823 p += emit_nop (p);
2824
2825 emit_ops_insns (buf, p - buf);
2826}
2827
2828/* Implementation of emit_ops method "emit_gt_goto". */
2829
2830static void
2831aarch64_emit_gt_goto (int *offset_p, int *size_p)
2832{
2833 uint32_t buf[16];
2834 uint32_t *p = buf;
2835
2836 p += emit_pop (p, x1);
2837 p += emit_cmp (p, x1, register_operand (x0));
2838 /* Branch over the next instruction if x0 <= x1. */
2839 p += emit_bcond (p, LE, 8);
2840 /* The NOP instruction will be patched with an unconditional branch. */
2841 if (offset_p)
2842 *offset_p = (p - buf) * 4;
2843 if (size_p)
2844 *size_p = 4;
2845 p += emit_nop (p);
2846
2847 emit_ops_insns (buf, p - buf);
2848}
2849
2850/* Implementation of emit_ops method "emit_ge_got". */
2851
2852static void
2853aarch64_emit_ge_got (int *offset_p, int *size_p)
2854{
2855 uint32_t buf[16];
2856 uint32_t *p = buf;
2857
2858 p += emit_pop (p, x1);
2859 p += emit_cmp (p, x1, register_operand (x0));
2860 /* Branch over the next instruction if x0 <= x1. */
2861 p += emit_bcond (p, LT, 8);
2862 /* The NOP instruction will be patched with an unconditional branch. */
2863 if (offset_p)
2864 *offset_p = (p - buf) * 4;
2865 if (size_p)
2866 *size_p = 4;
2867 p += emit_nop (p);
2868
2869 emit_ops_insns (buf, p - buf);
2870}
2871
2872static struct emit_ops aarch64_emit_ops_impl =
2873{
2874 aarch64_emit_prologue,
2875 aarch64_emit_epilogue,
2876 aarch64_emit_add,
2877 aarch64_emit_sub,
2878 aarch64_emit_mul,
2879 aarch64_emit_lsh,
2880 aarch64_emit_rsh_signed,
2881 aarch64_emit_rsh_unsigned,
2882 aarch64_emit_ext,
2883 aarch64_emit_log_not,
2884 aarch64_emit_bit_and,
2885 aarch64_emit_bit_or,
2886 aarch64_emit_bit_xor,
2887 aarch64_emit_bit_not,
2888 aarch64_emit_equal,
2889 aarch64_emit_less_signed,
2890 aarch64_emit_less_unsigned,
2891 aarch64_emit_ref,
2892 aarch64_emit_if_goto,
2893 aarch64_emit_goto,
2894 aarch64_write_goto_address,
2895 aarch64_emit_const,
2896 aarch64_emit_call,
2897 aarch64_emit_reg,
2898 aarch64_emit_pop,
2899 aarch64_emit_stack_flush,
2900 aarch64_emit_zero_ext,
2901 aarch64_emit_swap,
2902 aarch64_emit_stack_adjust,
2903 aarch64_emit_int_call_1,
2904 aarch64_emit_void_call_2,
2905 aarch64_emit_eq_goto,
2906 aarch64_emit_ne_goto,
2907 aarch64_emit_lt_goto,
2908 aarch64_emit_le_goto,
2909 aarch64_emit_gt_goto,
2910 aarch64_emit_ge_got,
2911};
2912
2913/* Implementation of linux_target_ops method "emit_ops". */
2914
2915static struct emit_ops *
2916aarch64_emit_ops (void)
2917{
2918 return &aarch64_emit_ops_impl;
2919}
2920
bb903df0
PL
2921/* Implementation of linux_target_ops method
2922 "get_min_fast_tracepoint_insn_len". */
2923
2924static int
2925aarch64_get_min_fast_tracepoint_insn_len (void)
2926{
2927 return 4;
2928}
2929
d1d0aea1
PL
2930/* Implementation of linux_target_ops method "supports_range_stepping". */
2931
2932static int
2933aarch64_supports_range_stepping (void)
2934{
2935 return 1;
2936}
2937
dd373349
AT
2938/* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2939
2940static const gdb_byte *
2941aarch64_sw_breakpoint_from_kind (int kind, int *size)
2942{
2943 *size = aarch64_breakpoint_len;
2944 return aarch64_breakpoint;
2945}
2946
7d00775e
AT
2947/* Support for hardware single step. */
2948
2949static int
2950aarch64_supports_hardware_single_step (void)
2951{
2952 return 1;
2953}
2954
176eb98c
MS
2955struct linux_target_ops the_low_target =
2956{
2957 aarch64_arch_setup,
3aee8918 2958 aarch64_regs_info,
176eb98c
MS
2959 aarch64_cannot_fetch_register,
2960 aarch64_cannot_store_register,
421530db 2961 NULL, /* fetch_register */
176eb98c
MS
2962 aarch64_get_pc,
2963 aarch64_set_pc,
dd373349
AT
2964 NULL, /* breakpoint_kind_from_pc */
2965 aarch64_sw_breakpoint_from_kind,
421530db
PL
2966 NULL, /* breakpoint_reinsert_addr */
2967 0, /* decr_pc_after_break */
176eb98c 2968 aarch64_breakpoint_at,
802e8e6d 2969 aarch64_supports_z_point_type,
176eb98c
MS
2970 aarch64_insert_point,
2971 aarch64_remove_point,
2972 aarch64_stopped_by_watchpoint,
2973 aarch64_stopped_data_address,
421530db
PL
2974 NULL, /* collect_ptrace_register */
2975 NULL, /* supply_ptrace_register */
ade90bde 2976 aarch64_linux_siginfo_fixup,
176eb98c
MS
2977 aarch64_linux_new_process,
2978 aarch64_linux_new_thread,
3a8a0396 2979 aarch64_linux_new_fork,
176eb98c 2980 aarch64_linux_prepare_to_resume,
421530db 2981 NULL, /* process_qsupported */
7671bf47 2982 aarch64_supports_tracepoints,
bb903df0
PL
2983 aarch64_get_thread_area,
2984 aarch64_install_fast_tracepoint_jump_pad,
afbe19f8 2985 aarch64_emit_ops,
bb903df0 2986 aarch64_get_min_fast_tracepoint_insn_len,
d1d0aea1 2987 aarch64_supports_range_stepping,
7d00775e
AT
2988 NULL, /* breakpoint_kind_from_current_state */
2989 aarch64_supports_hardware_single_step,
176eb98c 2990};
3aee8918
PA
2991
2992void
2993initialize_low_arch (void)
2994{
2995 init_registers_aarch64 ();
2996
3b53ae99
YQ
2997 initialize_low_arch_aarch32 ();
2998
3aee8918
PA
2999 initialize_regsets_info (&aarch64_regsets_info);
3000}
This page took 0.437743 seconds and 4 git commands to generate.