Fix formatting in coff-x86_64.c
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-aarch64-low.c
CommitLineData
176eb98c
MS
1/* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
32d0add0 4 Copyright (C) 2009-2015 Free Software Foundation, Inc.
176eb98c
MS
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "server.h"
23#include "linux-low.h"
db3cb7cb 24#include "nat/aarch64-linux.h"
554717a3 25#include "nat/aarch64-linux-hw-point.h"
bb903df0 26#include "arch/aarch64-insn.h"
3b53ae99 27#include "linux-aarch32-low.h"
176eb98c 28#include "elf/common.h"
afbe19f8
PL
29#include "ax.h"
30#include "tracepoint.h"
176eb98c
MS
31
32#include <signal.h>
33#include <sys/user.h>
5826e159 34#include "nat/gdb_ptrace.h"
e9dae05e 35#include <asm/ptrace.h>
bb903df0
PL
36#include <inttypes.h>
37#include <endian.h>
38#include <sys/uio.h>
176eb98c
MS
39
40#include "gdb_proc_service.h"
41
42/* Defined in auto-generated files. */
43void init_registers_aarch64 (void);
3aee8918 44extern const struct target_desc *tdesc_aarch64;
176eb98c 45
176eb98c
MS
46#ifdef HAVE_SYS_REG_H
47#include <sys/reg.h>
48#endif
49
50#define AARCH64_X_REGS_NUM 31
51#define AARCH64_V_REGS_NUM 32
52#define AARCH64_X0_REGNO 0
53#define AARCH64_SP_REGNO 31
54#define AARCH64_PC_REGNO 32
55#define AARCH64_CPSR_REGNO 33
56#define AARCH64_V0_REGNO 34
bf330350
CU
57#define AARCH64_FPSR_REGNO (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM)
58#define AARCH64_FPCR_REGNO (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM + 1)
176eb98c 59
bf330350 60#define AARCH64_NUM_REGS (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM + 2)
176eb98c 61
176eb98c
MS
62/* Per-process arch-specific data we want to keep. */
63
64struct arch_process_info
65{
66 /* Hardware breakpoint/watchpoint data.
67 The reason for them to be per-process rather than per-thread is
68 due to the lack of information in the gdbserver environment;
69 gdbserver is not told that whether a requested hardware
70 breakpoint/watchpoint is thread specific or not, so it has to set
71 each hw bp/wp for every thread in the current process. The
72 higher level bp/wp management in gdb will resume a thread if a hw
73 bp/wp trap is not expected for it. Since the hw bp/wp setting is
74 same for each thread, it is reasonable for the data to live here.
75 */
76 struct aarch64_debug_reg_state debug_reg_state;
77};
78
3b53ae99
YQ
79/* Return true if the size of register 0 is 8 byte. */
80
81static int
82is_64bit_tdesc (void)
83{
84 struct regcache *regcache = get_thread_regcache (current_thread, 0);
85
86 return register_size (regcache->tdesc, 0) == 8;
87}
88
421530db
PL
89/* Implementation of linux_target_ops method "cannot_store_register". */
90
176eb98c
MS
91static int
92aarch64_cannot_store_register (int regno)
93{
94 return regno >= AARCH64_NUM_REGS;
95}
96
421530db
PL
97/* Implementation of linux_target_ops method "cannot_fetch_register". */
98
176eb98c
MS
99static int
100aarch64_cannot_fetch_register (int regno)
101{
102 return regno >= AARCH64_NUM_REGS;
103}
104
105static void
106aarch64_fill_gregset (struct regcache *regcache, void *buf)
107{
6a69a054 108 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
176eb98c
MS
109 int i;
110
111 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
112 collect_register (regcache, AARCH64_X0_REGNO + i, &regset->regs[i]);
113 collect_register (regcache, AARCH64_SP_REGNO, &regset->sp);
114 collect_register (regcache, AARCH64_PC_REGNO, &regset->pc);
115 collect_register (regcache, AARCH64_CPSR_REGNO, &regset->pstate);
116}
117
118static void
119aarch64_store_gregset (struct regcache *regcache, const void *buf)
120{
6a69a054 121 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
176eb98c
MS
122 int i;
123
124 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
125 supply_register (regcache, AARCH64_X0_REGNO + i, &regset->regs[i]);
126 supply_register (regcache, AARCH64_SP_REGNO, &regset->sp);
127 supply_register (regcache, AARCH64_PC_REGNO, &regset->pc);
128 supply_register (regcache, AARCH64_CPSR_REGNO, &regset->pstate);
129}
130
131static void
132aarch64_fill_fpregset (struct regcache *regcache, void *buf)
133{
9caa3311 134 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
176eb98c
MS
135 int i;
136
137 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
138 collect_register (regcache, AARCH64_V0_REGNO + i, &regset->vregs[i]);
bf330350
CU
139 collect_register (regcache, AARCH64_FPSR_REGNO, &regset->fpsr);
140 collect_register (regcache, AARCH64_FPCR_REGNO, &regset->fpcr);
176eb98c
MS
141}
142
143static void
144aarch64_store_fpregset (struct regcache *regcache, const void *buf)
145{
9caa3311
YQ
146 const struct user_fpsimd_state *regset
147 = (const struct user_fpsimd_state *) buf;
176eb98c
MS
148 int i;
149
150 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
151 supply_register (regcache, AARCH64_V0_REGNO + i, &regset->vregs[i]);
bf330350
CU
152 supply_register (regcache, AARCH64_FPSR_REGNO, &regset->fpsr);
153 supply_register (regcache, AARCH64_FPCR_REGNO, &regset->fpcr);
176eb98c
MS
154}
155
176eb98c
MS
156/* Enable miscellaneous debugging output. The name is historical - it
157 was originally used to debug LinuxThreads support. */
158extern int debug_threads;
159
421530db
PL
160/* Implementation of linux_target_ops method "get_pc". */
161
176eb98c
MS
162static CORE_ADDR
163aarch64_get_pc (struct regcache *regcache)
164{
8a7e4587
YQ
165 if (register_size (regcache->tdesc, 0) == 8)
166 {
167 unsigned long pc;
168
169 collect_register_by_name (regcache, "pc", &pc);
170 if (debug_threads)
171 debug_printf ("stop pc is %08lx\n", pc);
172 return pc;
173 }
174 else
175 {
176 unsigned int pc;
177
178 collect_register_by_name (regcache, "pc", &pc);
179 if (debug_threads)
180 debug_printf ("stop pc is %04x\n", pc);
181 return pc;
182 }
176eb98c
MS
183}
184
421530db
PL
185/* Implementation of linux_target_ops method "set_pc". */
186
176eb98c
MS
187static void
188aarch64_set_pc (struct regcache *regcache, CORE_ADDR pc)
189{
8a7e4587
YQ
190 if (register_size (regcache->tdesc, 0) == 8)
191 {
192 unsigned long newpc = pc;
193 supply_register_by_name (regcache, "pc", &newpc);
194 }
195 else
196 {
197 unsigned int newpc = pc;
198 supply_register_by_name (regcache, "pc", &newpc);
199 }
176eb98c
MS
200}
201
176eb98c
MS
202#define aarch64_breakpoint_len 4
203
37d66942
PL
204/* AArch64 BRK software debug mode instruction.
205 This instruction needs to match gdb/aarch64-tdep.c
206 (aarch64_default_breakpoint). */
207static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
176eb98c 208
421530db
PL
209/* Implementation of linux_target_ops method "breakpoint_at". */
210
176eb98c
MS
211static int
212aarch64_breakpoint_at (CORE_ADDR where)
213{
db91f502
YQ
214 if (is_64bit_tdesc ())
215 {
216 gdb_byte insn[aarch64_breakpoint_len];
176eb98c 217
db91f502
YQ
218 (*the_target->read_memory) (where, (unsigned char *) &insn,
219 aarch64_breakpoint_len);
220 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
221 return 1;
176eb98c 222
db91f502
YQ
223 return 0;
224 }
225 else
226 return arm_breakpoint_at (where);
176eb98c
MS
227}
228
176eb98c
MS
229static void
230aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
231{
232 int i;
233
234 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
235 {
236 state->dr_addr_bp[i] = 0;
237 state->dr_ctrl_bp[i] = 0;
238 state->dr_ref_count_bp[i] = 0;
239 }
240
241 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
242 {
243 state->dr_addr_wp[i] = 0;
244 state->dr_ctrl_wp[i] = 0;
245 state->dr_ref_count_wp[i] = 0;
246 }
247}
248
176eb98c
MS
249/* Return the pointer to the debug register state structure in the
250 current process' arch-specific data area. */
251
db3cb7cb 252struct aarch64_debug_reg_state *
88e2cf7e 253aarch64_get_debug_reg_state (pid_t pid)
176eb98c 254{
88e2cf7e 255 struct process_info *proc = find_process_pid (pid);
176eb98c 256
fe978cb0 257 return &proc->priv->arch_private->debug_reg_state;
176eb98c
MS
258}
259
421530db
PL
260/* Implementation of linux_target_ops method "supports_z_point_type". */
261
4ff0d3d8
PA
262static int
263aarch64_supports_z_point_type (char z_type)
264{
265 switch (z_type)
266 {
96c97461 267 case Z_PACKET_SW_BP:
4ff0d3d8
PA
268 case Z_PACKET_HW_BP:
269 case Z_PACKET_WRITE_WP:
270 case Z_PACKET_READ_WP:
271 case Z_PACKET_ACCESS_WP:
272 return 1;
273 default:
4ff0d3d8
PA
274 return 0;
275 }
276}
277
421530db 278/* Implementation of linux_target_ops method "insert_point".
176eb98c 279
421530db
PL
280 It actually only records the info of the to-be-inserted bp/wp;
281 the actual insertion will happen when threads are resumed. */
176eb98c
MS
282
283static int
802e8e6d
PA
284aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
285 int len, struct raw_breakpoint *bp)
176eb98c
MS
286{
287 int ret;
4ff0d3d8 288 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
289 struct aarch64_debug_reg_state *state
290 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 291
c5e92cca 292 if (show_debug_regs)
176eb98c
MS
293 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
294 (unsigned long) addr, len);
295
802e8e6d
PA
296 /* Determine the type from the raw breakpoint type. */
297 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
298
299 if (targ_type != hw_execute)
39edd165
YQ
300 {
301 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
302 ret = aarch64_handle_watchpoint (targ_type, addr, len,
303 1 /* is_insert */, state);
304 else
305 ret = -1;
306 }
176eb98c 307 else
8d689ee5
YQ
308 {
309 if (len == 3)
310 {
311 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
312 instruction. Set it to 2 to correctly encode length bit
313 mask in hardware/watchpoint control register. */
314 len = 2;
315 }
316 ret = aarch64_handle_breakpoint (targ_type, addr, len,
317 1 /* is_insert */, state);
318 }
176eb98c 319
60a191ed 320 if (show_debug_regs)
88e2cf7e
YQ
321 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
322 targ_type);
176eb98c
MS
323
324 return ret;
325}
326
421530db 327/* Implementation of linux_target_ops method "remove_point".
176eb98c 328
421530db
PL
329 It actually only records the info of the to-be-removed bp/wp,
330 the actual removal will be done when threads are resumed. */
176eb98c
MS
331
332static int
802e8e6d
PA
333aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
334 int len, struct raw_breakpoint *bp)
176eb98c
MS
335{
336 int ret;
4ff0d3d8 337 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
338 struct aarch64_debug_reg_state *state
339 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 340
c5e92cca 341 if (show_debug_regs)
176eb98c
MS
342 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
343 (unsigned long) addr, len);
344
802e8e6d
PA
345 /* Determine the type from the raw breakpoint type. */
346 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
347
348 /* Set up state pointers. */
349 if (targ_type != hw_execute)
350 ret =
c67ca4de
YQ
351 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
352 state);
176eb98c 353 else
8d689ee5
YQ
354 {
355 if (len == 3)
356 {
357 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
358 instruction. Set it to 2 to correctly encode length bit
359 mask in hardware/watchpoint control register. */
360 len = 2;
361 }
362 ret = aarch64_handle_breakpoint (targ_type, addr, len,
363 0 /* is_insert */, state);
364 }
176eb98c 365
60a191ed 366 if (show_debug_regs)
88e2cf7e
YQ
367 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
368 targ_type);
176eb98c
MS
369
370 return ret;
371}
372
421530db 373/* Implementation of linux_target_ops method "stopped_data_address". */
176eb98c
MS
374
375static CORE_ADDR
376aarch64_stopped_data_address (void)
377{
378 siginfo_t siginfo;
379 int pid, i;
380 struct aarch64_debug_reg_state *state;
381
0bfdf32f 382 pid = lwpid_of (current_thread);
176eb98c
MS
383
384 /* Get the siginfo. */
385 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
386 return (CORE_ADDR) 0;
387
388 /* Need to be a hardware breakpoint/watchpoint trap. */
389 if (siginfo.si_signo != SIGTRAP
390 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
391 return (CORE_ADDR) 0;
392
393 /* Check if the address matches any watched address. */
88e2cf7e 394 state = aarch64_get_debug_reg_state (pid_of (current_thread));
176eb98c
MS
395 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
396 {
397 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
398 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
399 const CORE_ADDR addr_watch = state->dr_addr_wp[i];
400 if (state->dr_ref_count_wp[i]
401 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
402 && addr_trap >= addr_watch
403 && addr_trap < addr_watch + len)
404 return addr_trap;
405 }
406
407 return (CORE_ADDR) 0;
408}
409
421530db 410/* Implementation of linux_target_ops method "stopped_by_watchpoint". */
176eb98c
MS
411
412static int
413aarch64_stopped_by_watchpoint (void)
414{
415 if (aarch64_stopped_data_address () != 0)
416 return 1;
417 else
418 return 0;
419}
420
421/* Fetch the thread-local storage pointer for libthread_db. */
422
423ps_err_e
55fac6e0 424ps_get_thread_area (const struct ps_prochandle *ph,
176eb98c
MS
425 lwpid_t lwpid, int idx, void **base)
426{
a0cc84cd
YQ
427 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
428 is_64bit_tdesc ());
176eb98c
MS
429}
430
ade90bde
YQ
431/* Implementation of linux_target_ops method "siginfo_fixup". */
432
433static int
434aarch64_linux_siginfo_fixup (siginfo_t *native, void *inf, int direction)
435{
436 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
437 if (!is_64bit_tdesc ())
438 {
439 if (direction == 0)
440 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
441 native);
442 else
443 aarch64_siginfo_from_compat_siginfo (native,
444 (struct compat_siginfo *) inf);
445
446 return 1;
447 }
448
449 return 0;
450}
451
421530db 452/* Implementation of linux_target_ops method "linux_new_process". */
176eb98c
MS
453
454static struct arch_process_info *
455aarch64_linux_new_process (void)
456{
8d749320 457 struct arch_process_info *info = XCNEW (struct arch_process_info);
176eb98c
MS
458
459 aarch64_init_debug_reg_state (&info->debug_reg_state);
460
461 return info;
462}
463
421530db
PL
464/* Implementation of linux_target_ops method "linux_new_fork". */
465
3a8a0396
DB
466static void
467aarch64_linux_new_fork (struct process_info *parent,
468 struct process_info *child)
469{
470 /* These are allocated by linux_add_process. */
61a7418c
DB
471 gdb_assert (parent->priv != NULL
472 && parent->priv->arch_private != NULL);
473 gdb_assert (child->priv != NULL
474 && child->priv->arch_private != NULL);
3a8a0396
DB
475
476 /* Linux kernel before 2.6.33 commit
477 72f674d203cd230426437cdcf7dd6f681dad8b0d
478 will inherit hardware debug registers from parent
479 on fork/vfork/clone. Newer Linux kernels create such tasks with
480 zeroed debug registers.
481
482 GDB core assumes the child inherits the watchpoints/hw
483 breakpoints of the parent, and will remove them all from the
484 forked off process. Copy the debug registers mirrors into the
485 new process so that all breakpoints and watchpoints can be
486 removed together. The debug registers mirror will become zeroed
487 in the end before detaching the forked off process, thus making
488 this compatible with older Linux kernels too. */
489
61a7418c 490 *child->priv->arch_private = *parent->priv->arch_private;
3a8a0396
DB
491}
492
3b53ae99
YQ
493/* Return the right target description according to the ELF file of
494 current thread. */
495
496static const struct target_desc *
497aarch64_linux_read_description (void)
498{
499 unsigned int machine;
500 int is_elf64;
501 int tid;
502
503 tid = lwpid_of (current_thread);
504
505 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
506
507 if (is_elf64)
508 return tdesc_aarch64;
509 else
510 return tdesc_arm_with_neon;
511}
512
421530db
PL
513/* Implementation of linux_target_ops method "arch_setup". */
514
176eb98c
MS
515static void
516aarch64_arch_setup (void)
517{
3b53ae99 518 current_process ()->tdesc = aarch64_linux_read_description ();
176eb98c 519
af1b22f3 520 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
176eb98c
MS
521}
522
3aee8918 523static struct regset_info aarch64_regsets[] =
176eb98c
MS
524{
525 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
526 sizeof (struct user_pt_regs), GENERAL_REGS,
527 aarch64_fill_gregset, aarch64_store_gregset },
528 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
529 sizeof (struct user_fpsimd_state), FP_REGS,
530 aarch64_fill_fpregset, aarch64_store_fpregset
531 },
50bc912a 532 NULL_REGSET
176eb98c
MS
533};
534
3aee8918
PA
535static struct regsets_info aarch64_regsets_info =
536 {
537 aarch64_regsets, /* regsets */
538 0, /* num_regsets */
539 NULL, /* disabled_regsets */
540 };
541
3b53ae99 542static struct regs_info regs_info_aarch64 =
3aee8918
PA
543 {
544 NULL, /* regset_bitmap */
c2d65f38 545 NULL, /* usrregs */
3aee8918
PA
546 &aarch64_regsets_info,
547 };
548
421530db
PL
549/* Implementation of linux_target_ops method "regs_info". */
550
3aee8918
PA
551static const struct regs_info *
552aarch64_regs_info (void)
553{
3b53ae99
YQ
554 if (is_64bit_tdesc ())
555 return &regs_info_aarch64;
556 else
557 return &regs_info_aarch32;
3aee8918
PA
558}
559
7671bf47
PL
560/* Implementation of linux_target_ops method "supports_tracepoints". */
561
562static int
563aarch64_supports_tracepoints (void)
564{
524b57e6
YQ
565 if (current_thread == NULL)
566 return 1;
567 else
568 {
569 /* We don't support tracepoints on aarch32 now. */
570 return is_64bit_tdesc ();
571 }
7671bf47
PL
572}
573
bb903df0
PL
574/* Implementation of linux_target_ops method "get_thread_area". */
575
576static int
577aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
578{
579 struct iovec iovec;
580 uint64_t reg;
581
582 iovec.iov_base = &reg;
583 iovec.iov_len = sizeof (reg);
584
585 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
586 return -1;
587
588 *addrp = reg;
589
590 return 0;
591}
592
afbe19f8
PL
593/* List of condition codes that we need. */
594
595enum aarch64_condition_codes
596{
597 EQ = 0x0,
598 NE = 0x1,
599 LO = 0x3,
600 GE = 0xa,
601 LT = 0xb,
602 GT = 0xc,
603 LE = 0xd,
bb903df0
PL
604};
605
6c1c9a8b
YQ
606enum aarch64_operand_type
607{
608 OPERAND_IMMEDIATE,
609 OPERAND_REGISTER,
610};
611
bb903df0
PL
612/* Representation of an operand. At this time, it only supports register
613 and immediate types. */
614
615struct aarch64_operand
616{
617 /* Type of the operand. */
6c1c9a8b
YQ
618 enum aarch64_operand_type type;
619
bb903df0
PL
620 /* Value of the operand according to the type. */
621 union
622 {
623 uint32_t imm;
624 struct aarch64_register reg;
625 };
626};
627
628/* List of registers that we are currently using, we can add more here as
629 we need to use them. */
630
631/* General purpose scratch registers (64 bit). */
632static const struct aarch64_register x0 = { 0, 1 };
633static const struct aarch64_register x1 = { 1, 1 };
634static const struct aarch64_register x2 = { 2, 1 };
635static const struct aarch64_register x3 = { 3, 1 };
636static const struct aarch64_register x4 = { 4, 1 };
637
638/* General purpose scratch registers (32 bit). */
afbe19f8 639static const struct aarch64_register w0 = { 0, 0 };
bb903df0
PL
640static const struct aarch64_register w2 = { 2, 0 };
641
642/* Intra-procedure scratch registers. */
643static const struct aarch64_register ip0 = { 16, 1 };
644
645/* Special purpose registers. */
afbe19f8
PL
646static const struct aarch64_register fp = { 29, 1 };
647static const struct aarch64_register lr = { 30, 1 };
bb903df0
PL
648static const struct aarch64_register sp = { 31, 1 };
649static const struct aarch64_register xzr = { 31, 1 };
650
651/* Dynamically allocate a new register. If we know the register
652 statically, we should make it a global as above instead of using this
653 helper function. */
654
655static struct aarch64_register
656aarch64_register (unsigned num, int is64)
657{
658 return (struct aarch64_register) { num, is64 };
659}
660
661/* Helper function to create a register operand, for instructions with
662 different types of operands.
663
664 For example:
665 p += emit_mov (p, x0, register_operand (x1)); */
666
667static struct aarch64_operand
668register_operand (struct aarch64_register reg)
669{
670 struct aarch64_operand operand;
671
672 operand.type = OPERAND_REGISTER;
673 operand.reg = reg;
674
675 return operand;
676}
677
678/* Helper function to create an immediate operand, for instructions with
679 different types of operands.
680
681 For example:
682 p += emit_mov (p, x0, immediate_operand (12)); */
683
684static struct aarch64_operand
685immediate_operand (uint32_t imm)
686{
687 struct aarch64_operand operand;
688
689 operand.type = OPERAND_IMMEDIATE;
690 operand.imm = imm;
691
692 return operand;
693}
694
bb903df0
PL
695/* Helper function to create an offset memory operand.
696
697 For example:
698 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
699
700static struct aarch64_memory_operand
701offset_memory_operand (int32_t offset)
702{
703 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
704}
705
706/* Helper function to create a pre-index memory operand.
707
708 For example:
709 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
710
711static struct aarch64_memory_operand
712preindex_memory_operand (int32_t index)
713{
714 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
715}
716
afbe19f8
PL
717/* Helper function to create a post-index memory operand.
718
719 For example:
720 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
721
722static struct aarch64_memory_operand
723postindex_memory_operand (int32_t index)
724{
725 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
726}
727
bb903df0
PL
728/* System control registers. These special registers can be written and
729 read with the MRS and MSR instructions.
730
731 - NZCV: Condition flags. GDB refers to this register under the CPSR
732 name.
733 - FPSR: Floating-point status register.
734 - FPCR: Floating-point control registers.
735 - TPIDR_EL0: Software thread ID register. */
736
737enum aarch64_system_control_registers
738{
739 /* op0 op1 crn crm op2 */
740 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
741 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
742 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
743 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
744};
745
bb903df0
PL
746/* Write a BLR instruction into *BUF.
747
748 BLR rn
749
750 RN is the register to branch to. */
751
752static int
753emit_blr (uint32_t *buf, struct aarch64_register rn)
754{
e1c587c3 755 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
bb903df0
PL
756}
757
afbe19f8 758/* Write a RET instruction into *BUF.
bb903df0 759
afbe19f8 760 RET xn
bb903df0 761
afbe19f8 762 RN is the register to branch to. */
bb903df0
PL
763
764static int
afbe19f8
PL
765emit_ret (uint32_t *buf, struct aarch64_register rn)
766{
e1c587c3 767 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
afbe19f8
PL
768}
769
770static int
771emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
772 struct aarch64_register rt,
773 struct aarch64_register rt2,
774 struct aarch64_register rn,
775 struct aarch64_memory_operand operand)
bb903df0
PL
776{
777 uint32_t opc;
778 uint32_t pre_index;
779 uint32_t write_back;
780
781 if (rt.is64)
782 opc = ENCODE (2, 2, 30);
783 else
784 opc = ENCODE (0, 2, 30);
785
786 switch (operand.type)
787 {
788 case MEMORY_OPERAND_OFFSET:
789 {
790 pre_index = ENCODE (1, 1, 24);
791 write_back = ENCODE (0, 1, 23);
792 break;
793 }
afbe19f8
PL
794 case MEMORY_OPERAND_POSTINDEX:
795 {
796 pre_index = ENCODE (0, 1, 24);
797 write_back = ENCODE (1, 1, 23);
798 break;
799 }
bb903df0
PL
800 case MEMORY_OPERAND_PREINDEX:
801 {
802 pre_index = ENCODE (1, 1, 24);
803 write_back = ENCODE (1, 1, 23);
804 break;
805 }
806 default:
807 return 0;
808 }
809
e1c587c3
YQ
810 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
811 | ENCODE (operand.index >> 3, 7, 15)
812 | ENCODE (rt2.num, 5, 10)
813 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
814}
815
afbe19f8
PL
816/* Write a STP instruction into *BUF.
817
818 STP rt, rt2, [rn, #offset]
819 STP rt, rt2, [rn, #index]!
820 STP rt, rt2, [rn], #index
821
822 RT and RT2 are the registers to store.
823 RN is the base address register.
824 OFFSET is the immediate to add to the base address. It is limited to a
825 -512 .. 504 range (7 bits << 3). */
826
827static int
828emit_stp (uint32_t *buf, struct aarch64_register rt,
829 struct aarch64_register rt2, struct aarch64_register rn,
830 struct aarch64_memory_operand operand)
831{
832 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
833}
834
835/* Write a LDP instruction into *BUF.
836
837 LDP rt, rt2, [rn, #offset]
838 LDP rt, rt2, [rn, #index]!
839 LDP rt, rt2, [rn], #index
840
841 RT and RT2 are the registers to store.
842 RN is the base address register.
843 OFFSET is the immediate to add to the base address. It is limited to a
844 -512 .. 504 range (7 bits << 3). */
845
846static int
847emit_ldp (uint32_t *buf, struct aarch64_register rt,
848 struct aarch64_register rt2, struct aarch64_register rn,
849 struct aarch64_memory_operand operand)
850{
851 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
852}
853
bb903df0
PL
854/* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
855
856 LDP qt, qt2, [rn, #offset]
857
858 RT and RT2 are the Q registers to store.
859 RN is the base address register.
860 OFFSET is the immediate to add to the base address. It is limited to
861 -1024 .. 1008 range (7 bits << 4). */
862
863static int
864emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
865 struct aarch64_register rn, int32_t offset)
866{
867 uint32_t opc = ENCODE (2, 2, 30);
868 uint32_t pre_index = ENCODE (1, 1, 24);
869
e1c587c3
YQ
870 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
871 | ENCODE (offset >> 4, 7, 15)
872 | ENCODE (rt2, 5, 10)
873 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
874}
875
876/* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
877
878 STP qt, qt2, [rn, #offset]
879
880 RT and RT2 are the Q registers to store.
881 RN is the base address register.
882 OFFSET is the immediate to add to the base address. It is limited to
883 -1024 .. 1008 range (7 bits << 4). */
884
885static int
886emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
887 struct aarch64_register rn, int32_t offset)
888{
889 uint32_t opc = ENCODE (2, 2, 30);
890 uint32_t pre_index = ENCODE (1, 1, 24);
891
e1c587c3 892 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
b6542f81
YQ
893 | ENCODE (offset >> 4, 7, 15)
894 | ENCODE (rt2, 5, 10)
895 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
896}
897
afbe19f8
PL
898/* Write a LDRH instruction into *BUF.
899
900 LDRH wt, [xn, #offset]
901 LDRH wt, [xn, #index]!
902 LDRH wt, [xn], #index
903
904 RT is the register to store.
905 RN is the base address register.
906 OFFSET is the immediate to add to the base address. It is limited to
907 0 .. 32760 range (12 bits << 3). */
908
909static int
910emit_ldrh (uint32_t *buf, struct aarch64_register rt,
911 struct aarch64_register rn,
912 struct aarch64_memory_operand operand)
913{
1c2e1515 914 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
afbe19f8
PL
915}
916
917/* Write a LDRB instruction into *BUF.
918
919 LDRB wt, [xn, #offset]
920 LDRB wt, [xn, #index]!
921 LDRB wt, [xn], #index
922
923 RT is the register to store.
924 RN is the base address register.
925 OFFSET is the immediate to add to the base address. It is limited to
926 0 .. 32760 range (12 bits << 3). */
927
928static int
929emit_ldrb (uint32_t *buf, struct aarch64_register rt,
930 struct aarch64_register rn,
931 struct aarch64_memory_operand operand)
932{
1c2e1515 933 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
afbe19f8
PL
934}
935
bb903df0 936
bb903df0
PL
937
938/* Write a STR instruction into *BUF.
939
940 STR rt, [rn, #offset]
941 STR rt, [rn, #index]!
afbe19f8 942 STR rt, [rn], #index
bb903df0
PL
943
944 RT is the register to store.
945 RN is the base address register.
946 OFFSET is the immediate to add to the base address. It is limited to
947 0 .. 32760 range (12 bits << 3). */
948
949static int
950emit_str (uint32_t *buf, struct aarch64_register rt,
951 struct aarch64_register rn,
952 struct aarch64_memory_operand operand)
953{
1c2e1515 954 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
bb903df0
PL
955}
956
957/* Helper function emitting an exclusive load or store instruction. */
958
959static int
960emit_load_store_exclusive (uint32_t *buf, uint32_t size,
961 enum aarch64_opcodes opcode,
962 struct aarch64_register rs,
963 struct aarch64_register rt,
964 struct aarch64_register rt2,
965 struct aarch64_register rn)
966{
e1c587c3
YQ
967 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
968 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
969 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
970}
971
972/* Write a LAXR instruction into *BUF.
973
974 LDAXR rt, [xn]
975
976 RT is the destination register.
977 RN is the base address register. */
978
979static int
980emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
981 struct aarch64_register rn)
982{
983 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
984 xzr, rn);
985}
986
987/* Write a STXR instruction into *BUF.
988
989 STXR ws, rt, [xn]
990
991 RS is the result register, it indicates if the store succeeded or not.
992 RT is the destination register.
993 RN is the base address register. */
994
995static int
996emit_stxr (uint32_t *buf, struct aarch64_register rs,
997 struct aarch64_register rt, struct aarch64_register rn)
998{
999 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1000 xzr, rn);
1001}
1002
1003/* Write a STLR instruction into *BUF.
1004
1005 STLR rt, [xn]
1006
1007 RT is the register to store.
1008 RN is the base address register. */
1009
1010static int
1011emit_stlr (uint32_t *buf, struct aarch64_register rt,
1012 struct aarch64_register rn)
1013{
1014 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1015 xzr, rn);
1016}
1017
1018/* Helper function for data processing instructions with register sources. */
1019
1020static int
231c0592 1021emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
bb903df0
PL
1022 struct aarch64_register rd,
1023 struct aarch64_register rn,
1024 struct aarch64_register rm)
1025{
1026 uint32_t size = ENCODE (rd.is64, 1, 31);
1027
e1c587c3
YQ
1028 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1029 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1030}
1031
1032/* Helper function for data processing instructions taking either a register
1033 or an immediate. */
1034
1035static int
1036emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1037 struct aarch64_register rd,
1038 struct aarch64_register rn,
1039 struct aarch64_operand operand)
1040{
1041 uint32_t size = ENCODE (rd.is64, 1, 31);
1042 /* The opcode is different for register and immediate source operands. */
1043 uint32_t operand_opcode;
1044
1045 if (operand.type == OPERAND_IMMEDIATE)
1046 {
1047 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1048 operand_opcode = ENCODE (8, 4, 25);
1049
e1c587c3
YQ
1050 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1051 | ENCODE (operand.imm, 12, 10)
1052 | ENCODE (rn.num, 5, 5)
1053 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1054 }
1055 else
1056 {
1057 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1058 operand_opcode = ENCODE (5, 4, 25);
1059
1060 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1061 rn, operand.reg);
1062 }
1063}
1064
1065/* Write an ADD instruction into *BUF.
1066
1067 ADD rd, rn, #imm
1068 ADD rd, rn, rm
1069
1070 This function handles both an immediate and register add.
1071
1072 RD is the destination register.
1073 RN is the input register.
1074 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1075 OPERAND_REGISTER. */
1076
1077static int
1078emit_add (uint32_t *buf, struct aarch64_register rd,
1079 struct aarch64_register rn, struct aarch64_operand operand)
1080{
1081 return emit_data_processing (buf, ADD, rd, rn, operand);
1082}
1083
1084/* Write a SUB instruction into *BUF.
1085
1086 SUB rd, rn, #imm
1087 SUB rd, rn, rm
1088
1089 This function handles both an immediate and register sub.
1090
1091 RD is the destination register.
1092 RN is the input register.
1093 IMM is the immediate to substract to RN. */
1094
1095static int
1096emit_sub (uint32_t *buf, struct aarch64_register rd,
1097 struct aarch64_register rn, struct aarch64_operand operand)
1098{
1099 return emit_data_processing (buf, SUB, rd, rn, operand);
1100}
1101
1102/* Write a MOV instruction into *BUF.
1103
1104 MOV rd, #imm
1105 MOV rd, rm
1106
1107 This function handles both a wide immediate move and a register move,
1108 with the condition that the source register is not xzr. xzr and the
1109 stack pointer share the same encoding and this function only supports
1110 the stack pointer.
1111
1112 RD is the destination register.
1113 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1114 OPERAND_REGISTER. */
1115
1116static int
1117emit_mov (uint32_t *buf, struct aarch64_register rd,
1118 struct aarch64_operand operand)
1119{
1120 if (operand.type == OPERAND_IMMEDIATE)
1121 {
1122 uint32_t size = ENCODE (rd.is64, 1, 31);
1123 /* Do not shift the immediate. */
1124 uint32_t shift = ENCODE (0, 2, 21);
1125
e1c587c3
YQ
1126 return aarch64_emit_insn (buf, MOV | size | shift
1127 | ENCODE (operand.imm, 16, 5)
1128 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1129 }
1130 else
1131 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1132}
1133
1134/* Write a MOVK instruction into *BUF.
1135
1136 MOVK rd, #imm, lsl #shift
1137
1138 RD is the destination register.
1139 IMM is the immediate.
1140 SHIFT is the logical shift left to apply to IMM. */
1141
1142static int
7781c06f
YQ
1143emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1144 unsigned shift)
bb903df0
PL
1145{
1146 uint32_t size = ENCODE (rd.is64, 1, 31);
1147
e1c587c3
YQ
1148 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1149 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1150}
1151
1152/* Write instructions into *BUF in order to move ADDR into a register.
1153 ADDR can be a 64-bit value.
1154
1155 This function will emit a series of MOV and MOVK instructions, such as:
1156
1157 MOV xd, #(addr)
1158 MOVK xd, #(addr >> 16), lsl #16
1159 MOVK xd, #(addr >> 32), lsl #32
1160 MOVK xd, #(addr >> 48), lsl #48 */
1161
1162static int
1163emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1164{
1165 uint32_t *p = buf;
1166
1167 /* The MOV (wide immediate) instruction clears to top bits of the
1168 register. */
1169 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1170
1171 if ((addr >> 16) != 0)
1172 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1173 else
1174 return p - buf;
1175
1176 if ((addr >> 32) != 0)
1177 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1178 else
1179 return p - buf;
1180
1181 if ((addr >> 48) != 0)
1182 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1183
1184 return p - buf;
1185}
1186
afbe19f8
PL
1187/* Write a SUBS instruction into *BUF.
1188
1189 SUBS rd, rn, rm
1190
1191 This instruction update the condition flags.
1192
1193 RD is the destination register.
1194 RN and RM are the source registers. */
1195
1196static int
1197emit_subs (uint32_t *buf, struct aarch64_register rd,
1198 struct aarch64_register rn, struct aarch64_operand operand)
1199{
1200 return emit_data_processing (buf, SUBS, rd, rn, operand);
1201}
1202
1203/* Write a CMP instruction into *BUF.
1204
1205 CMP rn, rm
1206
1207 This instruction is an alias of SUBS xzr, rn, rm.
1208
1209 RN and RM are the registers to compare. */
1210
1211static int
1212emit_cmp (uint32_t *buf, struct aarch64_register rn,
1213 struct aarch64_operand operand)
1214{
1215 return emit_subs (buf, xzr, rn, operand);
1216}
1217
1218/* Write a AND instruction into *BUF.
1219
1220 AND rd, rn, rm
1221
1222 RD is the destination register.
1223 RN and RM are the source registers. */
1224
1225static int
1226emit_and (uint32_t *buf, struct aarch64_register rd,
1227 struct aarch64_register rn, struct aarch64_register rm)
1228{
1229 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1230}
1231
1232/* Write a ORR instruction into *BUF.
1233
1234 ORR rd, rn, rm
1235
1236 RD is the destination register.
1237 RN and RM are the source registers. */
1238
1239static int
1240emit_orr (uint32_t *buf, struct aarch64_register rd,
1241 struct aarch64_register rn, struct aarch64_register rm)
1242{
1243 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1244}
1245
1246/* Write a ORN instruction into *BUF.
1247
1248 ORN rd, rn, rm
1249
1250 RD is the destination register.
1251 RN and RM are the source registers. */
1252
1253static int
1254emit_orn (uint32_t *buf, struct aarch64_register rd,
1255 struct aarch64_register rn, struct aarch64_register rm)
1256{
1257 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1258}
1259
1260/* Write a EOR instruction into *BUF.
1261
1262 EOR rd, rn, rm
1263
1264 RD is the destination register.
1265 RN and RM are the source registers. */
1266
1267static int
1268emit_eor (uint32_t *buf, struct aarch64_register rd,
1269 struct aarch64_register rn, struct aarch64_register rm)
1270{
1271 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1272}
1273
1274/* Write a MVN instruction into *BUF.
1275
1276 MVN rd, rm
1277
1278 This is an alias for ORN rd, xzr, rm.
1279
1280 RD is the destination register.
1281 RM is the source register. */
1282
1283static int
1284emit_mvn (uint32_t *buf, struct aarch64_register rd,
1285 struct aarch64_register rm)
1286{
1287 return emit_orn (buf, rd, xzr, rm);
1288}
1289
1290/* Write a LSLV instruction into *BUF.
1291
1292 LSLV rd, rn, rm
1293
1294 RD is the destination register.
1295 RN and RM are the source registers. */
1296
1297static int
1298emit_lslv (uint32_t *buf, struct aarch64_register rd,
1299 struct aarch64_register rn, struct aarch64_register rm)
1300{
1301 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1302}
1303
1304/* Write a LSRV instruction into *BUF.
1305
1306 LSRV rd, rn, rm
1307
1308 RD is the destination register.
1309 RN and RM are the source registers. */
1310
1311static int
1312emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1313 struct aarch64_register rn, struct aarch64_register rm)
1314{
1315 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1316}
1317
1318/* Write a ASRV instruction into *BUF.
1319
1320 ASRV rd, rn, rm
1321
1322 RD is the destination register.
1323 RN and RM are the source registers. */
1324
1325static int
1326emit_asrv (uint32_t *buf, struct aarch64_register rd,
1327 struct aarch64_register rn, struct aarch64_register rm)
1328{
1329 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1330}
1331
1332/* Write a MUL instruction into *BUF.
1333
1334 MUL rd, rn, rm
1335
1336 RD is the destination register.
1337 RN and RM are the source registers. */
1338
1339static int
1340emit_mul (uint32_t *buf, struct aarch64_register rd,
1341 struct aarch64_register rn, struct aarch64_register rm)
1342{
1343 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1344}
1345
bb903df0
PL
1346/* Write a MRS instruction into *BUF. The register size is 64-bit.
1347
1348 MRS xt, system_reg
1349
1350 RT is the destination register.
1351 SYSTEM_REG is special purpose register to read. */
1352
1353static int
1354emit_mrs (uint32_t *buf, struct aarch64_register rt,
1355 enum aarch64_system_control_registers system_reg)
1356{
e1c587c3
YQ
1357 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1358 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1359}
1360
1361/* Write a MSR instruction into *BUF. The register size is 64-bit.
1362
1363 MSR system_reg, xt
1364
1365 SYSTEM_REG is special purpose register to write.
1366 RT is the input register. */
1367
1368static int
1369emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1370 struct aarch64_register rt)
1371{
e1c587c3
YQ
1372 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1373 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1374}
1375
1376/* Write a SEVL instruction into *BUF.
1377
1378 This is a hint instruction telling the hardware to trigger an event. */
1379
1380static int
1381emit_sevl (uint32_t *buf)
1382{
e1c587c3 1383 return aarch64_emit_insn (buf, SEVL);
bb903df0
PL
1384}
1385
1386/* Write a WFE instruction into *BUF.
1387
1388 This is a hint instruction telling the hardware to wait for an event. */
1389
1390static int
1391emit_wfe (uint32_t *buf)
1392{
e1c587c3 1393 return aarch64_emit_insn (buf, WFE);
bb903df0
PL
1394}
1395
afbe19f8
PL
1396/* Write a SBFM instruction into *BUF.
1397
1398 SBFM rd, rn, #immr, #imms
1399
1400 This instruction moves the bits from #immr to #imms into the
1401 destination, sign extending the result.
1402
1403 RD is the destination register.
1404 RN is the source register.
1405 IMMR is the bit number to start at (least significant bit).
1406 IMMS is the bit number to stop at (most significant bit). */
1407
1408static int
1409emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1410 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1411{
1412 uint32_t size = ENCODE (rd.is64, 1, 31);
1413 uint32_t n = ENCODE (rd.is64, 1, 22);
1414
e1c587c3
YQ
1415 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1416 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1417 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1418}
1419
1420/* Write a SBFX instruction into *BUF.
1421
1422 SBFX rd, rn, #lsb, #width
1423
1424 This instruction moves #width bits from #lsb into the destination, sign
1425 extending the result. This is an alias for:
1426
1427 SBFM rd, rn, #lsb, #(lsb + width - 1)
1428
1429 RD is the destination register.
1430 RN is the source register.
1431 LSB is the bit number to start at (least significant bit).
1432 WIDTH is the number of bits to move. */
1433
1434static int
1435emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1436 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1437{
1438 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1439}
1440
1441/* Write a UBFM instruction into *BUF.
1442
1443 UBFM rd, rn, #immr, #imms
1444
1445 This instruction moves the bits from #immr to #imms into the
1446 destination, extending the result with zeros.
1447
1448 RD is the destination register.
1449 RN is the source register.
1450 IMMR is the bit number to start at (least significant bit).
1451 IMMS is the bit number to stop at (most significant bit). */
1452
1453static int
1454emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1455 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1456{
1457 uint32_t size = ENCODE (rd.is64, 1, 31);
1458 uint32_t n = ENCODE (rd.is64, 1, 22);
1459
e1c587c3
YQ
1460 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1461 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1462 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1463}
1464
1465/* Write a UBFX instruction into *BUF.
1466
1467 UBFX rd, rn, #lsb, #width
1468
1469 This instruction moves #width bits from #lsb into the destination,
1470 extending the result with zeros. This is an alias for:
1471
1472 UBFM rd, rn, #lsb, #(lsb + width - 1)
1473
1474 RD is the destination register.
1475 RN is the source register.
1476 LSB is the bit number to start at (least significant bit).
1477 WIDTH is the number of bits to move. */
1478
1479static int
1480emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1481 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1482{
1483 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1484}
1485
1486/* Write a CSINC instruction into *BUF.
1487
1488 CSINC rd, rn, rm, cond
1489
1490 This instruction conditionally increments rn or rm and places the result
1491 in rd. rn is chosen is the condition is true.
1492
1493 RD is the destination register.
1494 RN and RM are the source registers.
1495 COND is the encoded condition. */
1496
1497static int
1498emit_csinc (uint32_t *buf, struct aarch64_register rd,
1499 struct aarch64_register rn, struct aarch64_register rm,
1500 unsigned cond)
1501{
1502 uint32_t size = ENCODE (rd.is64, 1, 31);
1503
e1c587c3
YQ
1504 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1505 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1506 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1507}
1508
1509/* Write a CSET instruction into *BUF.
1510
1511 CSET rd, cond
1512
1513 This instruction conditionally write 1 or 0 in the destination register.
1514 1 is written if the condition is true. This is an alias for:
1515
1516 CSINC rd, xzr, xzr, !cond
1517
1518 Note that the condition needs to be inverted.
1519
1520 RD is the destination register.
1521 RN and RM are the source registers.
1522 COND is the encoded condition. */
1523
1524static int
1525emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1526{
1527 /* The least significant bit of the condition needs toggling in order to
1528 invert it. */
1529 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1530}
1531
bb903df0
PL
1532/* Write LEN instructions from BUF into the inferior memory at *TO.
1533
1534 Note instructions are always little endian on AArch64, unlike data. */
1535
1536static void
1537append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1538{
1539 size_t byte_len = len * sizeof (uint32_t);
1540#if (__BYTE_ORDER == __BIG_ENDIAN)
1541 uint32_t *le_buf = xmalloc (byte_len);
1542 size_t i;
1543
1544 for (i = 0; i < len; i++)
1545 le_buf[i] = htole32 (buf[i]);
1546
1547 write_inferior_memory (*to, (const unsigned char *) le_buf, byte_len);
1548
1549 xfree (le_buf);
1550#else
1551 write_inferior_memory (*to, (const unsigned char *) buf, byte_len);
1552#endif
1553
1554 *to += byte_len;
1555}
1556
0badd99f
YQ
1557/* Sub-class of struct aarch64_insn_data, store information of
1558 instruction relocation for fast tracepoint. Visitor can
1559 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1560 the relocated instructions in buffer pointed by INSN_PTR. */
bb903df0 1561
0badd99f
YQ
1562struct aarch64_insn_relocation_data
1563{
1564 struct aarch64_insn_data base;
1565
1566 /* The new address the instruction is relocated to. */
1567 CORE_ADDR new_addr;
1568 /* Pointer to the buffer of relocated instruction(s). */
1569 uint32_t *insn_ptr;
1570};
1571
1572/* Implementation of aarch64_insn_visitor method "b". */
1573
1574static void
1575aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1576 struct aarch64_insn_data *data)
1577{
1578 struct aarch64_insn_relocation_data *insn_reloc
1579 = (struct aarch64_insn_relocation_data *) data;
1580 int32_t new_offset
1581 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1582
1583 if (can_encode_int32 (new_offset, 28))
1584 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1585}
1586
1587/* Implementation of aarch64_insn_visitor method "b_cond". */
1588
1589static void
1590aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1591 struct aarch64_insn_data *data)
1592{
1593 struct aarch64_insn_relocation_data *insn_reloc
1594 = (struct aarch64_insn_relocation_data *) data;
1595 int32_t new_offset
1596 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1597
1598 if (can_encode_int32 (new_offset, 21))
1599 {
1600 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1601 new_offset);
bb903df0 1602 }
0badd99f 1603 else if (can_encode_int32 (new_offset, 28))
bb903df0 1604 {
0badd99f
YQ
1605 /* The offset is out of range for a conditional branch
1606 instruction but not for a unconditional branch. We can use
1607 the following instructions instead:
bb903df0 1608
0badd99f
YQ
1609 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1610 B NOT_TAKEN ; Else jump over TAKEN and continue.
1611 TAKEN:
1612 B #(offset - 8)
1613 NOT_TAKEN:
1614
1615 */
1616
1617 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1618 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1619 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
bb903df0 1620 }
0badd99f 1621}
bb903df0 1622
0badd99f
YQ
1623/* Implementation of aarch64_insn_visitor method "cb". */
1624
1625static void
1626aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1627 const unsigned rn, int is64,
1628 struct aarch64_insn_data *data)
1629{
1630 struct aarch64_insn_relocation_data *insn_reloc
1631 = (struct aarch64_insn_relocation_data *) data;
1632 int32_t new_offset
1633 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1634
1635 if (can_encode_int32 (new_offset, 21))
1636 {
1637 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1638 aarch64_register (rn, is64), new_offset);
bb903df0 1639 }
0badd99f 1640 else if (can_encode_int32 (new_offset, 28))
bb903df0 1641 {
0badd99f
YQ
1642 /* The offset is out of range for a compare and branch
1643 instruction but not for a unconditional branch. We can use
1644 the following instructions instead:
1645
1646 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1647 B NOT_TAKEN ; Else jump over TAKEN and continue.
1648 TAKEN:
1649 B #(offset - 8)
1650 NOT_TAKEN:
1651
1652 */
1653 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1654 aarch64_register (rn, is64), 8);
1655 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1656 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1657 }
1658}
bb903df0 1659
0badd99f 1660/* Implementation of aarch64_insn_visitor method "tb". */
bb903df0 1661
0badd99f
YQ
1662static void
1663aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1664 const unsigned rt, unsigned bit,
1665 struct aarch64_insn_data *data)
1666{
1667 struct aarch64_insn_relocation_data *insn_reloc
1668 = (struct aarch64_insn_relocation_data *) data;
1669 int32_t new_offset
1670 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1671
1672 if (can_encode_int32 (new_offset, 16))
1673 {
1674 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1675 aarch64_register (rt, 1), new_offset);
bb903df0 1676 }
0badd99f 1677 else if (can_encode_int32 (new_offset, 28))
bb903df0 1678 {
0badd99f
YQ
1679 /* The offset is out of range for a test bit and branch
1680 instruction but not for a unconditional branch. We can use
1681 the following instructions instead:
1682
1683 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1684 B NOT_TAKEN ; Else jump over TAKEN and continue.
1685 TAKEN:
1686 B #(offset - 8)
1687 NOT_TAKEN:
1688
1689 */
1690 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1691 aarch64_register (rt, 1), 8);
1692 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1693 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1694 new_offset - 8);
1695 }
1696}
bb903df0 1697
0badd99f 1698/* Implementation of aarch64_insn_visitor method "adr". */
bb903df0 1699
0badd99f
YQ
1700static void
1701aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1702 const int is_adrp,
1703 struct aarch64_insn_data *data)
1704{
1705 struct aarch64_insn_relocation_data *insn_reloc
1706 = (struct aarch64_insn_relocation_data *) data;
1707 /* We know exactly the address the ADR{P,} instruction will compute.
1708 We can just write it to the destination register. */
1709 CORE_ADDR address = data->insn_addr + offset;
bb903df0 1710
0badd99f
YQ
1711 if (is_adrp)
1712 {
1713 /* Clear the lower 12 bits of the offset to get the 4K page. */
1714 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1715 aarch64_register (rd, 1),
1716 address & ~0xfff);
1717 }
1718 else
1719 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1720 aarch64_register (rd, 1), address);
1721}
bb903df0 1722
0badd99f 1723/* Implementation of aarch64_insn_visitor method "ldr_literal". */
bb903df0 1724
0badd99f
YQ
1725static void
1726aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1727 const unsigned rt, const int is64,
1728 struct aarch64_insn_data *data)
1729{
1730 struct aarch64_insn_relocation_data *insn_reloc
1731 = (struct aarch64_insn_relocation_data *) data;
1732 CORE_ADDR address = data->insn_addr + offset;
1733
1734 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1735 aarch64_register (rt, 1), address);
1736
1737 /* We know exactly what address to load from, and what register we
1738 can use:
1739
1740 MOV xd, #(oldloc + offset)
1741 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1742 ...
1743
1744 LDR xd, [xd] ; or LDRSW xd, [xd]
1745
1746 */
1747
1748 if (is_sw)
1749 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1750 aarch64_register (rt, 1),
1751 aarch64_register (rt, 1),
1752 offset_memory_operand (0));
bb903df0 1753 else
0badd99f
YQ
1754 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1755 aarch64_register (rt, is64),
1756 aarch64_register (rt, 1),
1757 offset_memory_operand (0));
1758}
1759
1760/* Implementation of aarch64_insn_visitor method "others". */
1761
1762static void
1763aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1764 struct aarch64_insn_data *data)
1765{
1766 struct aarch64_insn_relocation_data *insn_reloc
1767 = (struct aarch64_insn_relocation_data *) data;
bb903df0 1768
0badd99f
YQ
1769 /* The instruction is not PC relative. Just re-emit it at the new
1770 location. */
e1c587c3 1771 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
0badd99f
YQ
1772}
1773
1774static const struct aarch64_insn_visitor visitor =
1775{
1776 aarch64_ftrace_insn_reloc_b,
1777 aarch64_ftrace_insn_reloc_b_cond,
1778 aarch64_ftrace_insn_reloc_cb,
1779 aarch64_ftrace_insn_reloc_tb,
1780 aarch64_ftrace_insn_reloc_adr,
1781 aarch64_ftrace_insn_reloc_ldr_literal,
1782 aarch64_ftrace_insn_reloc_others,
1783};
1784
bb903df0
PL
1785/* Implementation of linux_target_ops method
1786 "install_fast_tracepoint_jump_pad". */
1787
1788static int
1789aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1790 CORE_ADDR tpaddr,
1791 CORE_ADDR collector,
1792 CORE_ADDR lockaddr,
1793 ULONGEST orig_size,
1794 CORE_ADDR *jump_entry,
1795 CORE_ADDR *trampoline,
1796 ULONGEST *trampoline_size,
1797 unsigned char *jjump_pad_insn,
1798 ULONGEST *jjump_pad_insn_size,
1799 CORE_ADDR *adjusted_insn_addr,
1800 CORE_ADDR *adjusted_insn_addr_end,
1801 char *err)
1802{
1803 uint32_t buf[256];
1804 uint32_t *p = buf;
1805 int32_t offset;
1806 int i;
70b439f0 1807 uint32_t insn;
bb903df0 1808 CORE_ADDR buildaddr = *jump_entry;
0badd99f 1809 struct aarch64_insn_relocation_data insn_data;
bb903df0
PL
1810
1811 /* We need to save the current state on the stack both to restore it
1812 later and to collect register values when the tracepoint is hit.
1813
1814 The saved registers are pushed in a layout that needs to be in sync
1815 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1816 the supply_fast_tracepoint_registers function will fill in the
1817 register cache from a pointer to saved registers on the stack we build
1818 here.
1819
1820 For simplicity, we set the size of each cell on the stack to 16 bytes.
1821 This way one cell can hold any register type, from system registers
1822 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1823 has to be 16 bytes aligned anyway.
1824
1825 Note that the CPSR register does not exist on AArch64. Instead we
1826 can access system bits describing the process state with the
1827 MRS/MSR instructions, namely the condition flags. We save them as
1828 if they are part of a CPSR register because that's how GDB
1829 interprets these system bits. At the moment, only the condition
1830 flags are saved in CPSR (NZCV).
1831
1832 Stack layout, each cell is 16 bytes (descending):
1833
1834 High *-------- SIMD&FP registers from 31 down to 0. --------*
1835 | q31 |
1836 . .
1837 . . 32 cells
1838 . .
1839 | q0 |
1840 *---- General purpose registers from 30 down to 0. ----*
1841 | x30 |
1842 . .
1843 . . 31 cells
1844 . .
1845 | x0 |
1846 *------------- Special purpose registers. -------------*
1847 | SP |
1848 | PC |
1849 | CPSR (NZCV) | 5 cells
1850 | FPSR |
1851 | FPCR | <- SP + 16
1852 *------------- collecting_t object --------------------*
1853 | TPIDR_EL0 | struct tracepoint * |
1854 Low *------------------------------------------------------*
1855
1856 After this stack is set up, we issue a call to the collector, passing
1857 it the saved registers at (SP + 16). */
1858
1859 /* Push SIMD&FP registers on the stack:
1860
1861 SUB sp, sp, #(32 * 16)
1862
1863 STP q30, q31, [sp, #(30 * 16)]
1864 ...
1865 STP q0, q1, [sp]
1866
1867 */
1868 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
1869 for (i = 30; i >= 0; i -= 2)
1870 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
1871
1872 /* Push general puspose registers on the stack. Note that we do not need
1873 to push x31 as it represents the xzr register and not the stack
1874 pointer in a STR instruction.
1875
1876 SUB sp, sp, #(31 * 16)
1877
1878 STR x30, [sp, #(30 * 16)]
1879 ...
1880 STR x0, [sp]
1881
1882 */
1883 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
1884 for (i = 30; i >= 0; i -= 1)
1885 p += emit_str (p, aarch64_register (i, 1), sp,
1886 offset_memory_operand (i * 16));
1887
1888 /* Make space for 5 more cells.
1889
1890 SUB sp, sp, #(5 * 16)
1891
1892 */
1893 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
1894
1895
1896 /* Save SP:
1897
1898 ADD x4, sp, #((32 + 31 + 5) * 16)
1899 STR x4, [sp, #(4 * 16)]
1900
1901 */
1902 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
1903 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
1904
1905 /* Save PC (tracepoint address):
1906
1907 MOV x3, #(tpaddr)
1908 ...
1909
1910 STR x3, [sp, #(3 * 16)]
1911
1912 */
1913
1914 p += emit_mov_addr (p, x3, tpaddr);
1915 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
1916
1917 /* Save CPSR (NZCV), FPSR and FPCR:
1918
1919 MRS x2, nzcv
1920 MRS x1, fpsr
1921 MRS x0, fpcr
1922
1923 STR x2, [sp, #(2 * 16)]
1924 STR x1, [sp, #(1 * 16)]
1925 STR x0, [sp, #(0 * 16)]
1926
1927 */
1928 p += emit_mrs (p, x2, NZCV);
1929 p += emit_mrs (p, x1, FPSR);
1930 p += emit_mrs (p, x0, FPCR);
1931 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
1932 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
1933 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
1934
1935 /* Push the collecting_t object. It consist of the address of the
1936 tracepoint and an ID for the current thread. We get the latter by
1937 reading the tpidr_el0 system register. It corresponds to the
1938 NT_ARM_TLS register accessible with ptrace.
1939
1940 MOV x0, #(tpoint)
1941 ...
1942
1943 MRS x1, tpidr_el0
1944
1945 STP x0, x1, [sp, #-16]!
1946
1947 */
1948
1949 p += emit_mov_addr (p, x0, tpoint);
1950 p += emit_mrs (p, x1, TPIDR_EL0);
1951 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
1952
1953 /* Spin-lock:
1954
1955 The shared memory for the lock is at lockaddr. It will hold zero
1956 if no-one is holding the lock, otherwise it contains the address of
1957 the collecting_t object on the stack of the thread which acquired it.
1958
1959 At this stage, the stack pointer points to this thread's collecting_t
1960 object.
1961
1962 We use the following registers:
1963 - x0: Address of the lock.
1964 - x1: Pointer to collecting_t object.
1965 - x2: Scratch register.
1966
1967 MOV x0, #(lockaddr)
1968 ...
1969 MOV x1, sp
1970
1971 ; Trigger an event local to this core. So the following WFE
1972 ; instruction is ignored.
1973 SEVL
1974 again:
1975 ; Wait for an event. The event is triggered by either the SEVL
1976 ; or STLR instructions (store release).
1977 WFE
1978
1979 ; Atomically read at lockaddr. This marks the memory location as
1980 ; exclusive. This instruction also has memory constraints which
1981 ; make sure all previous data reads and writes are done before
1982 ; executing it.
1983 LDAXR x2, [x0]
1984
1985 ; Try again if another thread holds the lock.
1986 CBNZ x2, again
1987
1988 ; We can lock it! Write the address of the collecting_t object.
1989 ; This instruction will fail if the memory location is not marked
1990 ; as exclusive anymore. If it succeeds, it will remove the
1991 ; exclusive mark on the memory location. This way, if another
1992 ; thread executes this instruction before us, we will fail and try
1993 ; all over again.
1994 STXR w2, x1, [x0]
1995 CBNZ w2, again
1996
1997 */
1998
1999 p += emit_mov_addr (p, x0, lockaddr);
2000 p += emit_mov (p, x1, register_operand (sp));
2001
2002 p += emit_sevl (p);
2003 p += emit_wfe (p);
2004 p += emit_ldaxr (p, x2, x0);
2005 p += emit_cb (p, 1, w2, -2 * 4);
2006 p += emit_stxr (p, w2, x1, x0);
2007 p += emit_cb (p, 1, x2, -4 * 4);
2008
2009 /* Call collector (struct tracepoint *, unsigned char *):
2010
2011 MOV x0, #(tpoint)
2012 ...
2013
2014 ; Saved registers start after the collecting_t object.
2015 ADD x1, sp, #16
2016
2017 ; We use an intra-procedure-call scratch register.
2018 MOV ip0, #(collector)
2019 ...
2020
2021 ; And call back to C!
2022 BLR ip0
2023
2024 */
2025
2026 p += emit_mov_addr (p, x0, tpoint);
2027 p += emit_add (p, x1, sp, immediate_operand (16));
2028
2029 p += emit_mov_addr (p, ip0, collector);
2030 p += emit_blr (p, ip0);
2031
2032 /* Release the lock.
2033
2034 MOV x0, #(lockaddr)
2035 ...
2036
2037 ; This instruction is a normal store with memory ordering
2038 ; constraints. Thanks to this we do not have to put a data
2039 ; barrier instruction to make sure all data read and writes are done
2040 ; before this instruction is executed. Furthermore, this instrucion
2041 ; will trigger an event, letting other threads know they can grab
2042 ; the lock.
2043 STLR xzr, [x0]
2044
2045 */
2046 p += emit_mov_addr (p, x0, lockaddr);
2047 p += emit_stlr (p, xzr, x0);
2048
2049 /* Free collecting_t object:
2050
2051 ADD sp, sp, #16
2052
2053 */
2054 p += emit_add (p, sp, sp, immediate_operand (16));
2055
2056 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2057 registers from the stack.
2058
2059 LDR x2, [sp, #(2 * 16)]
2060 LDR x1, [sp, #(1 * 16)]
2061 LDR x0, [sp, #(0 * 16)]
2062
2063 MSR NZCV, x2
2064 MSR FPSR, x1
2065 MSR FPCR, x0
2066
2067 ADD sp, sp #(5 * 16)
2068
2069 */
2070 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2071 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2072 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2073 p += emit_msr (p, NZCV, x2);
2074 p += emit_msr (p, FPSR, x1);
2075 p += emit_msr (p, FPCR, x0);
2076
2077 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2078
2079 /* Pop general purpose registers:
2080
2081 LDR x0, [sp]
2082 ...
2083 LDR x30, [sp, #(30 * 16)]
2084
2085 ADD sp, sp, #(31 * 16)
2086
2087 */
2088 for (i = 0; i <= 30; i += 1)
2089 p += emit_ldr (p, aarch64_register (i, 1), sp,
2090 offset_memory_operand (i * 16));
2091 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2092
2093 /* Pop SIMD&FP registers:
2094
2095 LDP q0, q1, [sp]
2096 ...
2097 LDP q30, q31, [sp, #(30 * 16)]
2098
2099 ADD sp, sp, #(32 * 16)
2100
2101 */
2102 for (i = 0; i <= 30; i += 2)
2103 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2104 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2105
2106 /* Write the code into the inferior memory. */
2107 append_insns (&buildaddr, p - buf, buf);
2108
2109 /* Now emit the relocated instruction. */
2110 *adjusted_insn_addr = buildaddr;
70b439f0 2111 target_read_uint32 (tpaddr, &insn);
0badd99f
YQ
2112
2113 insn_data.base.insn_addr = tpaddr;
2114 insn_data.new_addr = buildaddr;
2115 insn_data.insn_ptr = buf;
2116
2117 aarch64_relocate_instruction (insn, &visitor,
2118 (struct aarch64_insn_data *) &insn_data);
2119
bb903df0 2120 /* We may not have been able to relocate the instruction. */
0badd99f 2121 if (insn_data.insn_ptr == buf)
bb903df0
PL
2122 {
2123 sprintf (err,
2124 "E.Could not relocate instruction from %s to %s.",
2125 core_addr_to_string_nz (tpaddr),
2126 core_addr_to_string_nz (buildaddr));
2127 return 1;
2128 }
dfaffe9d 2129 else
0badd99f 2130 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
dfaffe9d 2131 *adjusted_insn_addr_end = buildaddr;
bb903df0
PL
2132
2133 /* Go back to the start of the buffer. */
2134 p = buf;
2135
2136 /* Emit a branch back from the jump pad. */
2137 offset = (tpaddr + orig_size - buildaddr);
2138 if (!can_encode_int32 (offset, 28))
2139 {
2140 sprintf (err,
2141 "E.Jump back from jump pad too far from tracepoint "
2142 "(offset 0x%" PRIx32 " cannot be encoded in 28 bits).",
2143 offset);
2144 return 1;
2145 }
2146
2147 p += emit_b (p, 0, offset);
2148 append_insns (&buildaddr, p - buf, buf);
2149
2150 /* Give the caller a branch instruction into the jump pad. */
2151 offset = (*jump_entry - tpaddr);
2152 if (!can_encode_int32 (offset, 28))
2153 {
2154 sprintf (err,
2155 "E.Jump pad too far from tracepoint "
2156 "(offset 0x%" PRIx32 " cannot be encoded in 28 bits).",
2157 offset);
2158 return 1;
2159 }
2160
2161 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2162 *jjump_pad_insn_size = 4;
2163
2164 /* Return the end address of our pad. */
2165 *jump_entry = buildaddr;
2166
2167 return 0;
2168}
2169
afbe19f8
PL
2170/* Helper function writing LEN instructions from START into
2171 current_insn_ptr. */
2172
2173static void
2174emit_ops_insns (const uint32_t *start, int len)
2175{
2176 CORE_ADDR buildaddr = current_insn_ptr;
2177
2178 if (debug_threads)
2179 debug_printf ("Adding %d instrucions at %s\n",
2180 len, paddress (buildaddr));
2181
2182 append_insns (&buildaddr, len, start);
2183 current_insn_ptr = buildaddr;
2184}
2185
2186/* Pop a register from the stack. */
2187
2188static int
2189emit_pop (uint32_t *buf, struct aarch64_register rt)
2190{
2191 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2192}
2193
2194/* Push a register on the stack. */
2195
2196static int
2197emit_push (uint32_t *buf, struct aarch64_register rt)
2198{
2199 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2200}
2201
2202/* Implementation of emit_ops method "emit_prologue". */
2203
2204static void
2205aarch64_emit_prologue (void)
2206{
2207 uint32_t buf[16];
2208 uint32_t *p = buf;
2209
2210 /* This function emit a prologue for the following function prototype:
2211
2212 enum eval_result_type f (unsigned char *regs,
2213 ULONGEST *value);
2214
2215 The first argument is a buffer of raw registers. The second
2216 argument is the result of
2217 evaluating the expression, which will be set to whatever is on top of
2218 the stack at the end.
2219
2220 The stack set up by the prologue is as such:
2221
2222 High *------------------------------------------------------*
2223 | LR |
2224 | FP | <- FP
2225 | x1 (ULONGEST *value) |
2226 | x0 (unsigned char *regs) |
2227 Low *------------------------------------------------------*
2228
2229 As we are implementing a stack machine, each opcode can expand the
2230 stack so we never know how far we are from the data saved by this
2231 prologue. In order to be able refer to value and regs later, we save
2232 the current stack pointer in the frame pointer. This way, it is not
2233 clobbered when calling C functions.
2234
2235 Finally, throughtout every operation, we are using register x0 as the
2236 top of the stack, and x1 as a scratch register. */
2237
2238 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2239 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2240 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2241
2242 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2243
2244
2245 emit_ops_insns (buf, p - buf);
2246}
2247
2248/* Implementation of emit_ops method "emit_epilogue". */
2249
2250static void
2251aarch64_emit_epilogue (void)
2252{
2253 uint32_t buf[16];
2254 uint32_t *p = buf;
2255
2256 /* Store the result of the expression (x0) in *value. */
2257 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2258 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2259 p += emit_str (p, x0, x1, offset_memory_operand (0));
2260
2261 /* Restore the previous state. */
2262 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2263 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2264
2265 /* Return expr_eval_no_error. */
2266 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2267 p += emit_ret (p, lr);
2268
2269 emit_ops_insns (buf, p - buf);
2270}
2271
2272/* Implementation of emit_ops method "emit_add". */
2273
2274static void
2275aarch64_emit_add (void)
2276{
2277 uint32_t buf[16];
2278 uint32_t *p = buf;
2279
2280 p += emit_pop (p, x1);
2281 p += emit_add (p, x0, x0, register_operand (x1));
2282
2283 emit_ops_insns (buf, p - buf);
2284}
2285
2286/* Implementation of emit_ops method "emit_sub". */
2287
2288static void
2289aarch64_emit_sub (void)
2290{
2291 uint32_t buf[16];
2292 uint32_t *p = buf;
2293
2294 p += emit_pop (p, x1);
2295 p += emit_sub (p, x0, x0, register_operand (x1));
2296
2297 emit_ops_insns (buf, p - buf);
2298}
2299
2300/* Implementation of emit_ops method "emit_mul". */
2301
2302static void
2303aarch64_emit_mul (void)
2304{
2305 uint32_t buf[16];
2306 uint32_t *p = buf;
2307
2308 p += emit_pop (p, x1);
2309 p += emit_mul (p, x0, x1, x0);
2310
2311 emit_ops_insns (buf, p - buf);
2312}
2313
2314/* Implementation of emit_ops method "emit_lsh". */
2315
2316static void
2317aarch64_emit_lsh (void)
2318{
2319 uint32_t buf[16];
2320 uint32_t *p = buf;
2321
2322 p += emit_pop (p, x1);
2323 p += emit_lslv (p, x0, x1, x0);
2324
2325 emit_ops_insns (buf, p - buf);
2326}
2327
2328/* Implementation of emit_ops method "emit_rsh_signed". */
2329
2330static void
2331aarch64_emit_rsh_signed (void)
2332{
2333 uint32_t buf[16];
2334 uint32_t *p = buf;
2335
2336 p += emit_pop (p, x1);
2337 p += emit_asrv (p, x0, x1, x0);
2338
2339 emit_ops_insns (buf, p - buf);
2340}
2341
2342/* Implementation of emit_ops method "emit_rsh_unsigned". */
2343
2344static void
2345aarch64_emit_rsh_unsigned (void)
2346{
2347 uint32_t buf[16];
2348 uint32_t *p = buf;
2349
2350 p += emit_pop (p, x1);
2351 p += emit_lsrv (p, x0, x1, x0);
2352
2353 emit_ops_insns (buf, p - buf);
2354}
2355
2356/* Implementation of emit_ops method "emit_ext". */
2357
2358static void
2359aarch64_emit_ext (int arg)
2360{
2361 uint32_t buf[16];
2362 uint32_t *p = buf;
2363
2364 p += emit_sbfx (p, x0, x0, 0, arg);
2365
2366 emit_ops_insns (buf, p - buf);
2367}
2368
2369/* Implementation of emit_ops method "emit_log_not". */
2370
2371static void
2372aarch64_emit_log_not (void)
2373{
2374 uint32_t buf[16];
2375 uint32_t *p = buf;
2376
2377 /* If the top of the stack is 0, replace it with 1. Else replace it with
2378 0. */
2379
2380 p += emit_cmp (p, x0, immediate_operand (0));
2381 p += emit_cset (p, x0, EQ);
2382
2383 emit_ops_insns (buf, p - buf);
2384}
2385
2386/* Implementation of emit_ops method "emit_bit_and". */
2387
2388static void
2389aarch64_emit_bit_and (void)
2390{
2391 uint32_t buf[16];
2392 uint32_t *p = buf;
2393
2394 p += emit_pop (p, x1);
2395 p += emit_and (p, x0, x0, x1);
2396
2397 emit_ops_insns (buf, p - buf);
2398}
2399
2400/* Implementation of emit_ops method "emit_bit_or". */
2401
2402static void
2403aarch64_emit_bit_or (void)
2404{
2405 uint32_t buf[16];
2406 uint32_t *p = buf;
2407
2408 p += emit_pop (p, x1);
2409 p += emit_orr (p, x0, x0, x1);
2410
2411 emit_ops_insns (buf, p - buf);
2412}
2413
2414/* Implementation of emit_ops method "emit_bit_xor". */
2415
2416static void
2417aarch64_emit_bit_xor (void)
2418{
2419 uint32_t buf[16];
2420 uint32_t *p = buf;
2421
2422 p += emit_pop (p, x1);
2423 p += emit_eor (p, x0, x0, x1);
2424
2425 emit_ops_insns (buf, p - buf);
2426}
2427
2428/* Implementation of emit_ops method "emit_bit_not". */
2429
2430static void
2431aarch64_emit_bit_not (void)
2432{
2433 uint32_t buf[16];
2434 uint32_t *p = buf;
2435
2436 p += emit_mvn (p, x0, x0);
2437
2438 emit_ops_insns (buf, p - buf);
2439}
2440
2441/* Implementation of emit_ops method "emit_equal". */
2442
2443static void
2444aarch64_emit_equal (void)
2445{
2446 uint32_t buf[16];
2447 uint32_t *p = buf;
2448
2449 p += emit_pop (p, x1);
2450 p += emit_cmp (p, x0, register_operand (x1));
2451 p += emit_cset (p, x0, EQ);
2452
2453 emit_ops_insns (buf, p - buf);
2454}
2455
2456/* Implementation of emit_ops method "emit_less_signed". */
2457
2458static void
2459aarch64_emit_less_signed (void)
2460{
2461 uint32_t buf[16];
2462 uint32_t *p = buf;
2463
2464 p += emit_pop (p, x1);
2465 p += emit_cmp (p, x1, register_operand (x0));
2466 p += emit_cset (p, x0, LT);
2467
2468 emit_ops_insns (buf, p - buf);
2469}
2470
2471/* Implementation of emit_ops method "emit_less_unsigned". */
2472
2473static void
2474aarch64_emit_less_unsigned (void)
2475{
2476 uint32_t buf[16];
2477 uint32_t *p = buf;
2478
2479 p += emit_pop (p, x1);
2480 p += emit_cmp (p, x1, register_operand (x0));
2481 p += emit_cset (p, x0, LO);
2482
2483 emit_ops_insns (buf, p - buf);
2484}
2485
2486/* Implementation of emit_ops method "emit_ref". */
2487
2488static void
2489aarch64_emit_ref (int size)
2490{
2491 uint32_t buf[16];
2492 uint32_t *p = buf;
2493
2494 switch (size)
2495 {
2496 case 1:
2497 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2498 break;
2499 case 2:
2500 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2501 break;
2502 case 4:
2503 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2504 break;
2505 case 8:
2506 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2507 break;
2508 default:
2509 /* Unknown size, bail on compilation. */
2510 emit_error = 1;
2511 break;
2512 }
2513
2514 emit_ops_insns (buf, p - buf);
2515}
2516
2517/* Implementation of emit_ops method "emit_if_goto". */
2518
2519static void
2520aarch64_emit_if_goto (int *offset_p, int *size_p)
2521{
2522 uint32_t buf[16];
2523 uint32_t *p = buf;
2524
2525 /* The Z flag is set or cleared here. */
2526 p += emit_cmp (p, x0, immediate_operand (0));
2527 /* This instruction must not change the Z flag. */
2528 p += emit_pop (p, x0);
2529 /* Branch over the next instruction if x0 == 0. */
2530 p += emit_bcond (p, EQ, 8);
2531
2532 /* The NOP instruction will be patched with an unconditional branch. */
2533 if (offset_p)
2534 *offset_p = (p - buf) * 4;
2535 if (size_p)
2536 *size_p = 4;
2537 p += emit_nop (p);
2538
2539 emit_ops_insns (buf, p - buf);
2540}
2541
2542/* Implementation of emit_ops method "emit_goto". */
2543
2544static void
2545aarch64_emit_goto (int *offset_p, int *size_p)
2546{
2547 uint32_t buf[16];
2548 uint32_t *p = buf;
2549
2550 /* The NOP instruction will be patched with an unconditional branch. */
2551 if (offset_p)
2552 *offset_p = 0;
2553 if (size_p)
2554 *size_p = 4;
2555 p += emit_nop (p);
2556
2557 emit_ops_insns (buf, p - buf);
2558}
2559
2560/* Implementation of emit_ops method "write_goto_address". */
2561
2562void
2563aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2564{
2565 uint32_t insn;
2566
2567 emit_b (&insn, 0, to - from);
2568 append_insns (&from, 1, &insn);
2569}
2570
2571/* Implementation of emit_ops method "emit_const". */
2572
2573static void
2574aarch64_emit_const (LONGEST num)
2575{
2576 uint32_t buf[16];
2577 uint32_t *p = buf;
2578
2579 p += emit_mov_addr (p, x0, num);
2580
2581 emit_ops_insns (buf, p - buf);
2582}
2583
2584/* Implementation of emit_ops method "emit_call". */
2585
2586static void
2587aarch64_emit_call (CORE_ADDR fn)
2588{
2589 uint32_t buf[16];
2590 uint32_t *p = buf;
2591
2592 p += emit_mov_addr (p, ip0, fn);
2593 p += emit_blr (p, ip0);
2594
2595 emit_ops_insns (buf, p - buf);
2596}
2597
2598/* Implementation of emit_ops method "emit_reg". */
2599
2600static void
2601aarch64_emit_reg (int reg)
2602{
2603 uint32_t buf[16];
2604 uint32_t *p = buf;
2605
2606 /* Set x0 to unsigned char *regs. */
2607 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2608 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2609 p += emit_mov (p, x1, immediate_operand (reg));
2610
2611 emit_ops_insns (buf, p - buf);
2612
2613 aarch64_emit_call (get_raw_reg_func_addr ());
2614}
2615
2616/* Implementation of emit_ops method "emit_pop". */
2617
2618static void
2619aarch64_emit_pop (void)
2620{
2621 uint32_t buf[16];
2622 uint32_t *p = buf;
2623
2624 p += emit_pop (p, x0);
2625
2626 emit_ops_insns (buf, p - buf);
2627}
2628
2629/* Implementation of emit_ops method "emit_stack_flush". */
2630
2631static void
2632aarch64_emit_stack_flush (void)
2633{
2634 uint32_t buf[16];
2635 uint32_t *p = buf;
2636
2637 p += emit_push (p, x0);
2638
2639 emit_ops_insns (buf, p - buf);
2640}
2641
2642/* Implementation of emit_ops method "emit_zero_ext". */
2643
2644static void
2645aarch64_emit_zero_ext (int arg)
2646{
2647 uint32_t buf[16];
2648 uint32_t *p = buf;
2649
2650 p += emit_ubfx (p, x0, x0, 0, arg);
2651
2652 emit_ops_insns (buf, p - buf);
2653}
2654
2655/* Implementation of emit_ops method "emit_swap". */
2656
2657static void
2658aarch64_emit_swap (void)
2659{
2660 uint32_t buf[16];
2661 uint32_t *p = buf;
2662
2663 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2664 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2665 p += emit_mov (p, x0, register_operand (x1));
2666
2667 emit_ops_insns (buf, p - buf);
2668}
2669
2670/* Implementation of emit_ops method "emit_stack_adjust". */
2671
2672static void
2673aarch64_emit_stack_adjust (int n)
2674{
2675 /* This is not needed with our design. */
2676 uint32_t buf[16];
2677 uint32_t *p = buf;
2678
2679 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2680
2681 emit_ops_insns (buf, p - buf);
2682}
2683
2684/* Implementation of emit_ops method "emit_int_call_1". */
2685
2686static void
2687aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2688{
2689 uint32_t buf[16];
2690 uint32_t *p = buf;
2691
2692 p += emit_mov (p, x0, immediate_operand (arg1));
2693
2694 emit_ops_insns (buf, p - buf);
2695
2696 aarch64_emit_call (fn);
2697}
2698
2699/* Implementation of emit_ops method "emit_void_call_2". */
2700
2701static void
2702aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2703{
2704 uint32_t buf[16];
2705 uint32_t *p = buf;
2706
2707 /* Push x0 on the stack. */
2708 aarch64_emit_stack_flush ();
2709
2710 /* Setup arguments for the function call:
2711
2712 x0: arg1
2713 x1: top of the stack
2714
2715 MOV x1, x0
2716 MOV x0, #arg1 */
2717
2718 p += emit_mov (p, x1, register_operand (x0));
2719 p += emit_mov (p, x0, immediate_operand (arg1));
2720
2721 emit_ops_insns (buf, p - buf);
2722
2723 aarch64_emit_call (fn);
2724
2725 /* Restore x0. */
2726 aarch64_emit_pop ();
2727}
2728
2729/* Implementation of emit_ops method "emit_eq_goto". */
2730
2731static void
2732aarch64_emit_eq_goto (int *offset_p, int *size_p)
2733{
2734 uint32_t buf[16];
2735 uint32_t *p = buf;
2736
2737 p += emit_pop (p, x1);
2738 p += emit_cmp (p, x1, register_operand (x0));
2739 /* Branch over the next instruction if x0 != x1. */
2740 p += emit_bcond (p, NE, 8);
2741 /* The NOP instruction will be patched with an unconditional branch. */
2742 if (offset_p)
2743 *offset_p = (p - buf) * 4;
2744 if (size_p)
2745 *size_p = 4;
2746 p += emit_nop (p);
2747
2748 emit_ops_insns (buf, p - buf);
2749}
2750
2751/* Implementation of emit_ops method "emit_ne_goto". */
2752
2753static void
2754aarch64_emit_ne_goto (int *offset_p, int *size_p)
2755{
2756 uint32_t buf[16];
2757 uint32_t *p = buf;
2758
2759 p += emit_pop (p, x1);
2760 p += emit_cmp (p, x1, register_operand (x0));
2761 /* Branch over the next instruction if x0 == x1. */
2762 p += emit_bcond (p, EQ, 8);
2763 /* The NOP instruction will be patched with an unconditional branch. */
2764 if (offset_p)
2765 *offset_p = (p - buf) * 4;
2766 if (size_p)
2767 *size_p = 4;
2768 p += emit_nop (p);
2769
2770 emit_ops_insns (buf, p - buf);
2771}
2772
2773/* Implementation of emit_ops method "emit_lt_goto". */
2774
2775static void
2776aarch64_emit_lt_goto (int *offset_p, int *size_p)
2777{
2778 uint32_t buf[16];
2779 uint32_t *p = buf;
2780
2781 p += emit_pop (p, x1);
2782 p += emit_cmp (p, x1, register_operand (x0));
2783 /* Branch over the next instruction if x0 >= x1. */
2784 p += emit_bcond (p, GE, 8);
2785 /* The NOP instruction will be patched with an unconditional branch. */
2786 if (offset_p)
2787 *offset_p = (p - buf) * 4;
2788 if (size_p)
2789 *size_p = 4;
2790 p += emit_nop (p);
2791
2792 emit_ops_insns (buf, p - buf);
2793}
2794
2795/* Implementation of emit_ops method "emit_le_goto". */
2796
2797static void
2798aarch64_emit_le_goto (int *offset_p, int *size_p)
2799{
2800 uint32_t buf[16];
2801 uint32_t *p = buf;
2802
2803 p += emit_pop (p, x1);
2804 p += emit_cmp (p, x1, register_operand (x0));
2805 /* Branch over the next instruction if x0 > x1. */
2806 p += emit_bcond (p, GT, 8);
2807 /* The NOP instruction will be patched with an unconditional branch. */
2808 if (offset_p)
2809 *offset_p = (p - buf) * 4;
2810 if (size_p)
2811 *size_p = 4;
2812 p += emit_nop (p);
2813
2814 emit_ops_insns (buf, p - buf);
2815}
2816
2817/* Implementation of emit_ops method "emit_gt_goto". */
2818
2819static void
2820aarch64_emit_gt_goto (int *offset_p, int *size_p)
2821{
2822 uint32_t buf[16];
2823 uint32_t *p = buf;
2824
2825 p += emit_pop (p, x1);
2826 p += emit_cmp (p, x1, register_operand (x0));
2827 /* Branch over the next instruction if x0 <= x1. */
2828 p += emit_bcond (p, LE, 8);
2829 /* The NOP instruction will be patched with an unconditional branch. */
2830 if (offset_p)
2831 *offset_p = (p - buf) * 4;
2832 if (size_p)
2833 *size_p = 4;
2834 p += emit_nop (p);
2835
2836 emit_ops_insns (buf, p - buf);
2837}
2838
2839/* Implementation of emit_ops method "emit_ge_got". */
2840
2841static void
2842aarch64_emit_ge_got (int *offset_p, int *size_p)
2843{
2844 uint32_t buf[16];
2845 uint32_t *p = buf;
2846
2847 p += emit_pop (p, x1);
2848 p += emit_cmp (p, x1, register_operand (x0));
2849 /* Branch over the next instruction if x0 <= x1. */
2850 p += emit_bcond (p, LT, 8);
2851 /* The NOP instruction will be patched with an unconditional branch. */
2852 if (offset_p)
2853 *offset_p = (p - buf) * 4;
2854 if (size_p)
2855 *size_p = 4;
2856 p += emit_nop (p);
2857
2858 emit_ops_insns (buf, p - buf);
2859}
2860
2861static struct emit_ops aarch64_emit_ops_impl =
2862{
2863 aarch64_emit_prologue,
2864 aarch64_emit_epilogue,
2865 aarch64_emit_add,
2866 aarch64_emit_sub,
2867 aarch64_emit_mul,
2868 aarch64_emit_lsh,
2869 aarch64_emit_rsh_signed,
2870 aarch64_emit_rsh_unsigned,
2871 aarch64_emit_ext,
2872 aarch64_emit_log_not,
2873 aarch64_emit_bit_and,
2874 aarch64_emit_bit_or,
2875 aarch64_emit_bit_xor,
2876 aarch64_emit_bit_not,
2877 aarch64_emit_equal,
2878 aarch64_emit_less_signed,
2879 aarch64_emit_less_unsigned,
2880 aarch64_emit_ref,
2881 aarch64_emit_if_goto,
2882 aarch64_emit_goto,
2883 aarch64_write_goto_address,
2884 aarch64_emit_const,
2885 aarch64_emit_call,
2886 aarch64_emit_reg,
2887 aarch64_emit_pop,
2888 aarch64_emit_stack_flush,
2889 aarch64_emit_zero_ext,
2890 aarch64_emit_swap,
2891 aarch64_emit_stack_adjust,
2892 aarch64_emit_int_call_1,
2893 aarch64_emit_void_call_2,
2894 aarch64_emit_eq_goto,
2895 aarch64_emit_ne_goto,
2896 aarch64_emit_lt_goto,
2897 aarch64_emit_le_goto,
2898 aarch64_emit_gt_goto,
2899 aarch64_emit_ge_got,
2900};
2901
2902/* Implementation of linux_target_ops method "emit_ops". */
2903
2904static struct emit_ops *
2905aarch64_emit_ops (void)
2906{
2907 return &aarch64_emit_ops_impl;
2908}
2909
bb903df0
PL
2910/* Implementation of linux_target_ops method
2911 "get_min_fast_tracepoint_insn_len". */
2912
2913static int
2914aarch64_get_min_fast_tracepoint_insn_len (void)
2915{
2916 return 4;
2917}
2918
d1d0aea1
PL
2919/* Implementation of linux_target_ops method "supports_range_stepping". */
2920
2921static int
2922aarch64_supports_range_stepping (void)
2923{
2924 return 1;
2925}
2926
dd373349
AT
2927/* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2928
2929static const gdb_byte *
2930aarch64_sw_breakpoint_from_kind (int kind, int *size)
2931{
17b1509a
YQ
2932 if (is_64bit_tdesc ())
2933 {
2934 *size = aarch64_breakpoint_len;
2935 return aarch64_breakpoint;
2936 }
2937 else
2938 return arm_sw_breakpoint_from_kind (kind, size);
2939}
2940
2941/* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
2942
2943static int
2944aarch64_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
2945{
2946 if (is_64bit_tdesc ())
2947 return aarch64_breakpoint_len;
2948 else
2949 return arm_breakpoint_kind_from_pc (pcptr);
2950}
2951
2952/* Implementation of the linux_target_ops method
2953 "breakpoint_kind_from_current_state". */
2954
2955static int
2956aarch64_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
2957{
2958 if (is_64bit_tdesc ())
2959 return aarch64_breakpoint_len;
2960 else
2961 return arm_breakpoint_kind_from_current_state (pcptr);
dd373349
AT
2962}
2963
7d00775e
AT
2964/* Support for hardware single step. */
2965
2966static int
2967aarch64_supports_hardware_single_step (void)
2968{
2969 return 1;
2970}
2971
176eb98c
MS
2972struct linux_target_ops the_low_target =
2973{
2974 aarch64_arch_setup,
3aee8918 2975 aarch64_regs_info,
176eb98c
MS
2976 aarch64_cannot_fetch_register,
2977 aarch64_cannot_store_register,
421530db 2978 NULL, /* fetch_register */
176eb98c
MS
2979 aarch64_get_pc,
2980 aarch64_set_pc,
17b1509a 2981 aarch64_breakpoint_kind_from_pc,
dd373349 2982 aarch64_sw_breakpoint_from_kind,
421530db
PL
2983 NULL, /* breakpoint_reinsert_addr */
2984 0, /* decr_pc_after_break */
176eb98c 2985 aarch64_breakpoint_at,
802e8e6d 2986 aarch64_supports_z_point_type,
176eb98c
MS
2987 aarch64_insert_point,
2988 aarch64_remove_point,
2989 aarch64_stopped_by_watchpoint,
2990 aarch64_stopped_data_address,
421530db
PL
2991 NULL, /* collect_ptrace_register */
2992 NULL, /* supply_ptrace_register */
ade90bde 2993 aarch64_linux_siginfo_fixup,
176eb98c
MS
2994 aarch64_linux_new_process,
2995 aarch64_linux_new_thread,
3a8a0396 2996 aarch64_linux_new_fork,
176eb98c 2997 aarch64_linux_prepare_to_resume,
421530db 2998 NULL, /* process_qsupported */
7671bf47 2999 aarch64_supports_tracepoints,
bb903df0
PL
3000 aarch64_get_thread_area,
3001 aarch64_install_fast_tracepoint_jump_pad,
afbe19f8 3002 aarch64_emit_ops,
bb903df0 3003 aarch64_get_min_fast_tracepoint_insn_len,
d1d0aea1 3004 aarch64_supports_range_stepping,
17b1509a 3005 aarch64_breakpoint_kind_from_current_state,
7d00775e 3006 aarch64_supports_hardware_single_step,
176eb98c 3007};
3aee8918
PA
3008
3009void
3010initialize_low_arch (void)
3011{
3012 init_registers_aarch64 ();
3013
3b53ae99
YQ
3014 initialize_low_arch_aarch32 ();
3015
3aee8918
PA
3016 initialize_regsets_info (&aarch64_regsets_info);
3017}
This page took 0.51013 seconds and 4 git commands to generate.