102b61ef9cb1f39f7a74a12a7ad4af83be7d27e1
[deliverable/binutils-gdb.git] / gdbserver / linux-aarch64-low.cc
1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "server.h"
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
29 #include "ax.h"
30 #include "tracepoint.h"
31 #include "debug.h"
32
33 #include <signal.h>
34 #include <sys/user.h>
35 #include "nat/gdb_ptrace.h"
36 #include <asm/ptrace.h>
37 #include <inttypes.h>
38 #include <endian.h>
39 #include <sys/uio.h>
40
41 #include "gdb_proc_service.h"
42 #include "arch/aarch64.h"
43 #include "linux-aarch32-tdesc.h"
44 #include "linux-aarch64-tdesc.h"
45 #include "nat/aarch64-sve-linux-ptrace.h"
46 #include "tdesc.h"
47
48 #ifdef HAVE_SYS_REG_H
49 #include <sys/reg.h>
50 #endif
51
52 /* Linux target op definitions for the AArch64 architecture. */
53
54 class aarch64_target : public linux_process_target
55 {
56 public:
57
58 };
59
60 /* The singleton target ops object. */
61
62 static aarch64_target the_aarch64_target;
63
64 /* Per-process arch-specific data we want to keep. */
65
66 struct arch_process_info
67 {
68 /* Hardware breakpoint/watchpoint data.
69 The reason for them to be per-process rather than per-thread is
70 due to the lack of information in the gdbserver environment;
71 gdbserver is not told that whether a requested hardware
72 breakpoint/watchpoint is thread specific or not, so it has to set
73 each hw bp/wp for every thread in the current process. The
74 higher level bp/wp management in gdb will resume a thread if a hw
75 bp/wp trap is not expected for it. Since the hw bp/wp setting is
76 same for each thread, it is reasonable for the data to live here.
77 */
78 struct aarch64_debug_reg_state debug_reg_state;
79 };
80
81 /* Return true if the size of register 0 is 8 byte. */
82
83 static int
84 is_64bit_tdesc (void)
85 {
86 struct regcache *regcache = get_thread_regcache (current_thread, 0);
87
88 return register_size (regcache->tdesc, 0) == 8;
89 }
90
91 /* Return true if the regcache contains the number of SVE registers. */
92
93 static bool
94 is_sve_tdesc (void)
95 {
96 struct regcache *regcache = get_thread_regcache (current_thread, 0);
97
98 return tdesc_contains_feature (regcache->tdesc, "org.gnu.gdb.aarch64.sve");
99 }
100
101 static void
102 aarch64_fill_gregset (struct regcache *regcache, void *buf)
103 {
104 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
105 int i;
106
107 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
108 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
109 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
110 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
111 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
112 }
113
114 static void
115 aarch64_store_gregset (struct regcache *regcache, const void *buf)
116 {
117 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
118 int i;
119
120 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
121 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
122 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
123 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
124 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
125 }
126
127 static void
128 aarch64_fill_fpregset (struct regcache *regcache, void *buf)
129 {
130 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
131 int i;
132
133 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
134 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
135 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
136 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
137 }
138
139 static void
140 aarch64_store_fpregset (struct regcache *regcache, const void *buf)
141 {
142 const struct user_fpsimd_state *regset
143 = (const struct user_fpsimd_state *) buf;
144 int i;
145
146 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
147 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
148 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
149 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
150 }
151
152 /* Store the pauth registers to regcache. */
153
154 static void
155 aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
156 {
157 uint64_t *pauth_regset = (uint64_t *) buf;
158 int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
159
160 if (pauth_base == 0)
161 return;
162
163 supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
164 &pauth_regset[0]);
165 supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
166 &pauth_regset[1]);
167 }
168
169 /* Implementation of linux_target_ops method "get_pc". */
170
171 static CORE_ADDR
172 aarch64_get_pc (struct regcache *regcache)
173 {
174 if (register_size (regcache->tdesc, 0) == 8)
175 return linux_get_pc_64bit (regcache);
176 else
177 return linux_get_pc_32bit (regcache);
178 }
179
180 /* Implementation of linux_target_ops method "set_pc". */
181
182 static void
183 aarch64_set_pc (struct regcache *regcache, CORE_ADDR pc)
184 {
185 if (register_size (regcache->tdesc, 0) == 8)
186 linux_set_pc_64bit (regcache, pc);
187 else
188 linux_set_pc_32bit (regcache, pc);
189 }
190
191 #define aarch64_breakpoint_len 4
192
193 /* AArch64 BRK software debug mode instruction.
194 This instruction needs to match gdb/aarch64-tdep.c
195 (aarch64_default_breakpoint). */
196 static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
197
198 /* Implementation of linux_target_ops method "breakpoint_at". */
199
200 static int
201 aarch64_breakpoint_at (CORE_ADDR where)
202 {
203 if (is_64bit_tdesc ())
204 {
205 gdb_byte insn[aarch64_breakpoint_len];
206
207 the_target->read_memory (where, (unsigned char *) &insn,
208 aarch64_breakpoint_len);
209 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
210 return 1;
211
212 return 0;
213 }
214 else
215 return arm_breakpoint_at (where);
216 }
217
218 static void
219 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
220 {
221 int i;
222
223 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
224 {
225 state->dr_addr_bp[i] = 0;
226 state->dr_ctrl_bp[i] = 0;
227 state->dr_ref_count_bp[i] = 0;
228 }
229
230 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
231 {
232 state->dr_addr_wp[i] = 0;
233 state->dr_ctrl_wp[i] = 0;
234 state->dr_ref_count_wp[i] = 0;
235 }
236 }
237
238 /* Return the pointer to the debug register state structure in the
239 current process' arch-specific data area. */
240
241 struct aarch64_debug_reg_state *
242 aarch64_get_debug_reg_state (pid_t pid)
243 {
244 struct process_info *proc = find_process_pid (pid);
245
246 return &proc->priv->arch_private->debug_reg_state;
247 }
248
249 /* Implementation of linux_target_ops method "supports_z_point_type". */
250
251 static int
252 aarch64_supports_z_point_type (char z_type)
253 {
254 switch (z_type)
255 {
256 case Z_PACKET_SW_BP:
257 case Z_PACKET_HW_BP:
258 case Z_PACKET_WRITE_WP:
259 case Z_PACKET_READ_WP:
260 case Z_PACKET_ACCESS_WP:
261 return 1;
262 default:
263 return 0;
264 }
265 }
266
267 /* Implementation of linux_target_ops method "insert_point".
268
269 It actually only records the info of the to-be-inserted bp/wp;
270 the actual insertion will happen when threads are resumed. */
271
272 static int
273 aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
274 int len, struct raw_breakpoint *bp)
275 {
276 int ret;
277 enum target_hw_bp_type targ_type;
278 struct aarch64_debug_reg_state *state
279 = aarch64_get_debug_reg_state (pid_of (current_thread));
280
281 if (show_debug_regs)
282 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
283 (unsigned long) addr, len);
284
285 /* Determine the type from the raw breakpoint type. */
286 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
287
288 if (targ_type != hw_execute)
289 {
290 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
291 ret = aarch64_handle_watchpoint (targ_type, addr, len,
292 1 /* is_insert */, state);
293 else
294 ret = -1;
295 }
296 else
297 {
298 if (len == 3)
299 {
300 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
301 instruction. Set it to 2 to correctly encode length bit
302 mask in hardware/watchpoint control register. */
303 len = 2;
304 }
305 ret = aarch64_handle_breakpoint (targ_type, addr, len,
306 1 /* is_insert */, state);
307 }
308
309 if (show_debug_regs)
310 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
311 targ_type);
312
313 return ret;
314 }
315
316 /* Implementation of linux_target_ops method "remove_point".
317
318 It actually only records the info of the to-be-removed bp/wp,
319 the actual removal will be done when threads are resumed. */
320
321 static int
322 aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
323 int len, struct raw_breakpoint *bp)
324 {
325 int ret;
326 enum target_hw_bp_type targ_type;
327 struct aarch64_debug_reg_state *state
328 = aarch64_get_debug_reg_state (pid_of (current_thread));
329
330 if (show_debug_regs)
331 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
332 (unsigned long) addr, len);
333
334 /* Determine the type from the raw breakpoint type. */
335 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
336
337 /* Set up state pointers. */
338 if (targ_type != hw_execute)
339 ret =
340 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
341 state);
342 else
343 {
344 if (len == 3)
345 {
346 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
347 instruction. Set it to 2 to correctly encode length bit
348 mask in hardware/watchpoint control register. */
349 len = 2;
350 }
351 ret = aarch64_handle_breakpoint (targ_type, addr, len,
352 0 /* is_insert */, state);
353 }
354
355 if (show_debug_regs)
356 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
357 targ_type);
358
359 return ret;
360 }
361
362 /* Implementation of linux_target_ops method "stopped_data_address". */
363
364 static CORE_ADDR
365 aarch64_stopped_data_address (void)
366 {
367 siginfo_t siginfo;
368 int pid, i;
369 struct aarch64_debug_reg_state *state;
370
371 pid = lwpid_of (current_thread);
372
373 /* Get the siginfo. */
374 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
375 return (CORE_ADDR) 0;
376
377 /* Need to be a hardware breakpoint/watchpoint trap. */
378 if (siginfo.si_signo != SIGTRAP
379 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
380 return (CORE_ADDR) 0;
381
382 /* Check if the address matches any watched address. */
383 state = aarch64_get_debug_reg_state (pid_of (current_thread));
384 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
385 {
386 const unsigned int offset
387 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
388 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
389 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
390 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
391 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
392 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
393
394 if (state->dr_ref_count_wp[i]
395 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
396 && addr_trap >= addr_watch_aligned
397 && addr_trap < addr_watch + len)
398 {
399 /* ADDR_TRAP reports the first address of the memory range
400 accessed by the CPU, regardless of what was the memory
401 range watched. Thus, a large CPU access that straddles
402 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
403 ADDR_TRAP that is lower than the
404 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
405
406 addr: | 4 | 5 | 6 | 7 | 8 |
407 |---- range watched ----|
408 |----------- range accessed ------------|
409
410 In this case, ADDR_TRAP will be 4.
411
412 To match a watchpoint known to GDB core, we must never
413 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
414 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
415 positive on kernels older than 4.10. See PR
416 external/20207. */
417 return addr_orig;
418 }
419 }
420
421 return (CORE_ADDR) 0;
422 }
423
424 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
425
426 static int
427 aarch64_stopped_by_watchpoint (void)
428 {
429 if (aarch64_stopped_data_address () != 0)
430 return 1;
431 else
432 return 0;
433 }
434
435 /* Fetch the thread-local storage pointer for libthread_db. */
436
437 ps_err_e
438 ps_get_thread_area (struct ps_prochandle *ph,
439 lwpid_t lwpid, int idx, void **base)
440 {
441 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
442 is_64bit_tdesc ());
443 }
444
445 /* Implementation of linux_target_ops method "siginfo_fixup". */
446
447 static int
448 aarch64_linux_siginfo_fixup (siginfo_t *native, gdb_byte *inf, int direction)
449 {
450 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
451 if (!is_64bit_tdesc ())
452 {
453 if (direction == 0)
454 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
455 native);
456 else
457 aarch64_siginfo_from_compat_siginfo (native,
458 (struct compat_siginfo *) inf);
459
460 return 1;
461 }
462
463 return 0;
464 }
465
466 /* Implementation of linux_target_ops method "new_process". */
467
468 static struct arch_process_info *
469 aarch64_linux_new_process (void)
470 {
471 struct arch_process_info *info = XCNEW (struct arch_process_info);
472
473 aarch64_init_debug_reg_state (&info->debug_reg_state);
474
475 return info;
476 }
477
478 /* Implementation of linux_target_ops method "delete_process". */
479
480 static void
481 aarch64_linux_delete_process (struct arch_process_info *info)
482 {
483 xfree (info);
484 }
485
486 /* Implementation of linux_target_ops method "linux_new_fork". */
487
488 static void
489 aarch64_linux_new_fork (struct process_info *parent,
490 struct process_info *child)
491 {
492 /* These are allocated by linux_add_process. */
493 gdb_assert (parent->priv != NULL
494 && parent->priv->arch_private != NULL);
495 gdb_assert (child->priv != NULL
496 && child->priv->arch_private != NULL);
497
498 /* Linux kernel before 2.6.33 commit
499 72f674d203cd230426437cdcf7dd6f681dad8b0d
500 will inherit hardware debug registers from parent
501 on fork/vfork/clone. Newer Linux kernels create such tasks with
502 zeroed debug registers.
503
504 GDB core assumes the child inherits the watchpoints/hw
505 breakpoints of the parent, and will remove them all from the
506 forked off process. Copy the debug registers mirrors into the
507 new process so that all breakpoints and watchpoints can be
508 removed together. The debug registers mirror will become zeroed
509 in the end before detaching the forked off process, thus making
510 this compatible with older Linux kernels too. */
511
512 *child->priv->arch_private = *parent->priv->arch_private;
513 }
514
515 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
516 #define AARCH64_HWCAP_PACA (1 << 30)
517
518 /* Implementation of linux_target_ops method "arch_setup". */
519
520 static void
521 aarch64_arch_setup (void)
522 {
523 unsigned int machine;
524 int is_elf64;
525 int tid;
526
527 tid = lwpid_of (current_thread);
528
529 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
530
531 if (is_elf64)
532 {
533 uint64_t vq = aarch64_sve_get_vq (tid);
534 unsigned long hwcap = linux_get_hwcap (8);
535 bool pauth_p = hwcap & AARCH64_HWCAP_PACA;
536
537 current_process ()->tdesc = aarch64_linux_read_description (vq, pauth_p);
538 }
539 else
540 current_process ()->tdesc = aarch32_linux_read_description ();
541
542 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
543 }
544
545 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
546
547 static void
548 aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
549 {
550 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
551 }
552
553 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
554
555 static void
556 aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
557 {
558 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
559 }
560
561 static struct regset_info aarch64_regsets[] =
562 {
563 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
564 sizeof (struct user_pt_regs), GENERAL_REGS,
565 aarch64_fill_gregset, aarch64_store_gregset },
566 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
567 sizeof (struct user_fpsimd_state), FP_REGS,
568 aarch64_fill_fpregset, aarch64_store_fpregset
569 },
570 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
571 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
572 NULL, aarch64_store_pauthregset },
573 NULL_REGSET
574 };
575
576 static struct regsets_info aarch64_regsets_info =
577 {
578 aarch64_regsets, /* regsets */
579 0, /* num_regsets */
580 NULL, /* disabled_regsets */
581 };
582
583 static struct regs_info regs_info_aarch64 =
584 {
585 NULL, /* regset_bitmap */
586 NULL, /* usrregs */
587 &aarch64_regsets_info,
588 };
589
590 static struct regset_info aarch64_sve_regsets[] =
591 {
592 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
593 sizeof (struct user_pt_regs), GENERAL_REGS,
594 aarch64_fill_gregset, aarch64_store_gregset },
595 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
596 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
597 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
598 },
599 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
600 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
601 NULL, aarch64_store_pauthregset },
602 NULL_REGSET
603 };
604
605 static struct regsets_info aarch64_sve_regsets_info =
606 {
607 aarch64_sve_regsets, /* regsets. */
608 0, /* num_regsets. */
609 NULL, /* disabled_regsets. */
610 };
611
612 static struct regs_info regs_info_aarch64_sve =
613 {
614 NULL, /* regset_bitmap. */
615 NULL, /* usrregs. */
616 &aarch64_sve_regsets_info,
617 };
618
619 /* Implementation of linux_target_ops method "regs_info". */
620
621 static const struct regs_info *
622 aarch64_regs_info (void)
623 {
624 if (!is_64bit_tdesc ())
625 return &regs_info_aarch32;
626
627 if (is_sve_tdesc ())
628 return &regs_info_aarch64_sve;
629
630 return &regs_info_aarch64;
631 }
632
633 /* Implementation of linux_target_ops method "supports_tracepoints". */
634
635 static int
636 aarch64_supports_tracepoints (void)
637 {
638 if (current_thread == NULL)
639 return 1;
640 else
641 {
642 /* We don't support tracepoints on aarch32 now. */
643 return is_64bit_tdesc ();
644 }
645 }
646
647 /* Implementation of linux_target_ops method "get_thread_area". */
648
649 static int
650 aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
651 {
652 struct iovec iovec;
653 uint64_t reg;
654
655 iovec.iov_base = &reg;
656 iovec.iov_len = sizeof (reg);
657
658 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
659 return -1;
660
661 *addrp = reg;
662
663 return 0;
664 }
665
666 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
667
668 static void
669 aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
670 {
671 int use_64bit = register_size (regcache->tdesc, 0) == 8;
672
673 if (use_64bit)
674 {
675 long l_sysno;
676
677 collect_register_by_name (regcache, "x8", &l_sysno);
678 *sysno = (int) l_sysno;
679 }
680 else
681 collect_register_by_name (regcache, "r7", sysno);
682 }
683
684 /* List of condition codes that we need. */
685
686 enum aarch64_condition_codes
687 {
688 EQ = 0x0,
689 NE = 0x1,
690 LO = 0x3,
691 GE = 0xa,
692 LT = 0xb,
693 GT = 0xc,
694 LE = 0xd,
695 };
696
697 enum aarch64_operand_type
698 {
699 OPERAND_IMMEDIATE,
700 OPERAND_REGISTER,
701 };
702
703 /* Representation of an operand. At this time, it only supports register
704 and immediate types. */
705
706 struct aarch64_operand
707 {
708 /* Type of the operand. */
709 enum aarch64_operand_type type;
710
711 /* Value of the operand according to the type. */
712 union
713 {
714 uint32_t imm;
715 struct aarch64_register reg;
716 };
717 };
718
719 /* List of registers that we are currently using, we can add more here as
720 we need to use them. */
721
722 /* General purpose scratch registers (64 bit). */
723 static const struct aarch64_register x0 = { 0, 1 };
724 static const struct aarch64_register x1 = { 1, 1 };
725 static const struct aarch64_register x2 = { 2, 1 };
726 static const struct aarch64_register x3 = { 3, 1 };
727 static const struct aarch64_register x4 = { 4, 1 };
728
729 /* General purpose scratch registers (32 bit). */
730 static const struct aarch64_register w0 = { 0, 0 };
731 static const struct aarch64_register w2 = { 2, 0 };
732
733 /* Intra-procedure scratch registers. */
734 static const struct aarch64_register ip0 = { 16, 1 };
735
736 /* Special purpose registers. */
737 static const struct aarch64_register fp = { 29, 1 };
738 static const struct aarch64_register lr = { 30, 1 };
739 static const struct aarch64_register sp = { 31, 1 };
740 static const struct aarch64_register xzr = { 31, 1 };
741
742 /* Dynamically allocate a new register. If we know the register
743 statically, we should make it a global as above instead of using this
744 helper function. */
745
746 static struct aarch64_register
747 aarch64_register (unsigned num, int is64)
748 {
749 return (struct aarch64_register) { num, is64 };
750 }
751
752 /* Helper function to create a register operand, for instructions with
753 different types of operands.
754
755 For example:
756 p += emit_mov (p, x0, register_operand (x1)); */
757
758 static struct aarch64_operand
759 register_operand (struct aarch64_register reg)
760 {
761 struct aarch64_operand operand;
762
763 operand.type = OPERAND_REGISTER;
764 operand.reg = reg;
765
766 return operand;
767 }
768
769 /* Helper function to create an immediate operand, for instructions with
770 different types of operands.
771
772 For example:
773 p += emit_mov (p, x0, immediate_operand (12)); */
774
775 static struct aarch64_operand
776 immediate_operand (uint32_t imm)
777 {
778 struct aarch64_operand operand;
779
780 operand.type = OPERAND_IMMEDIATE;
781 operand.imm = imm;
782
783 return operand;
784 }
785
786 /* Helper function to create an offset memory operand.
787
788 For example:
789 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
790
791 static struct aarch64_memory_operand
792 offset_memory_operand (int32_t offset)
793 {
794 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
795 }
796
797 /* Helper function to create a pre-index memory operand.
798
799 For example:
800 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
801
802 static struct aarch64_memory_operand
803 preindex_memory_operand (int32_t index)
804 {
805 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
806 }
807
808 /* Helper function to create a post-index memory operand.
809
810 For example:
811 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
812
813 static struct aarch64_memory_operand
814 postindex_memory_operand (int32_t index)
815 {
816 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
817 }
818
819 /* System control registers. These special registers can be written and
820 read with the MRS and MSR instructions.
821
822 - NZCV: Condition flags. GDB refers to this register under the CPSR
823 name.
824 - FPSR: Floating-point status register.
825 - FPCR: Floating-point control registers.
826 - TPIDR_EL0: Software thread ID register. */
827
828 enum aarch64_system_control_registers
829 {
830 /* op0 op1 crn crm op2 */
831 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
832 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
833 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
834 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
835 };
836
837 /* Write a BLR instruction into *BUF.
838
839 BLR rn
840
841 RN is the register to branch to. */
842
843 static int
844 emit_blr (uint32_t *buf, struct aarch64_register rn)
845 {
846 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
847 }
848
849 /* Write a RET instruction into *BUF.
850
851 RET xn
852
853 RN is the register to branch to. */
854
855 static int
856 emit_ret (uint32_t *buf, struct aarch64_register rn)
857 {
858 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
859 }
860
861 static int
862 emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
863 struct aarch64_register rt,
864 struct aarch64_register rt2,
865 struct aarch64_register rn,
866 struct aarch64_memory_operand operand)
867 {
868 uint32_t opc;
869 uint32_t pre_index;
870 uint32_t write_back;
871
872 if (rt.is64)
873 opc = ENCODE (2, 2, 30);
874 else
875 opc = ENCODE (0, 2, 30);
876
877 switch (operand.type)
878 {
879 case MEMORY_OPERAND_OFFSET:
880 {
881 pre_index = ENCODE (1, 1, 24);
882 write_back = ENCODE (0, 1, 23);
883 break;
884 }
885 case MEMORY_OPERAND_POSTINDEX:
886 {
887 pre_index = ENCODE (0, 1, 24);
888 write_back = ENCODE (1, 1, 23);
889 break;
890 }
891 case MEMORY_OPERAND_PREINDEX:
892 {
893 pre_index = ENCODE (1, 1, 24);
894 write_back = ENCODE (1, 1, 23);
895 break;
896 }
897 default:
898 return 0;
899 }
900
901 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
902 | ENCODE (operand.index >> 3, 7, 15)
903 | ENCODE (rt2.num, 5, 10)
904 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
905 }
906
907 /* Write a STP instruction into *BUF.
908
909 STP rt, rt2, [rn, #offset]
910 STP rt, rt2, [rn, #index]!
911 STP rt, rt2, [rn], #index
912
913 RT and RT2 are the registers to store.
914 RN is the base address register.
915 OFFSET is the immediate to add to the base address. It is limited to a
916 -512 .. 504 range (7 bits << 3). */
917
918 static int
919 emit_stp (uint32_t *buf, struct aarch64_register rt,
920 struct aarch64_register rt2, struct aarch64_register rn,
921 struct aarch64_memory_operand operand)
922 {
923 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
924 }
925
926 /* Write a LDP instruction into *BUF.
927
928 LDP rt, rt2, [rn, #offset]
929 LDP rt, rt2, [rn, #index]!
930 LDP rt, rt2, [rn], #index
931
932 RT and RT2 are the registers to store.
933 RN is the base address register.
934 OFFSET is the immediate to add to the base address. It is limited to a
935 -512 .. 504 range (7 bits << 3). */
936
937 static int
938 emit_ldp (uint32_t *buf, struct aarch64_register rt,
939 struct aarch64_register rt2, struct aarch64_register rn,
940 struct aarch64_memory_operand operand)
941 {
942 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
943 }
944
945 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
946
947 LDP qt, qt2, [rn, #offset]
948
949 RT and RT2 are the Q registers to store.
950 RN is the base address register.
951 OFFSET is the immediate to add to the base address. It is limited to
952 -1024 .. 1008 range (7 bits << 4). */
953
954 static int
955 emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
956 struct aarch64_register rn, int32_t offset)
957 {
958 uint32_t opc = ENCODE (2, 2, 30);
959 uint32_t pre_index = ENCODE (1, 1, 24);
960
961 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
962 | ENCODE (offset >> 4, 7, 15)
963 | ENCODE (rt2, 5, 10)
964 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
965 }
966
967 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
968
969 STP qt, qt2, [rn, #offset]
970
971 RT and RT2 are the Q registers to store.
972 RN is the base address register.
973 OFFSET is the immediate to add to the base address. It is limited to
974 -1024 .. 1008 range (7 bits << 4). */
975
976 static int
977 emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
978 struct aarch64_register rn, int32_t offset)
979 {
980 uint32_t opc = ENCODE (2, 2, 30);
981 uint32_t pre_index = ENCODE (1, 1, 24);
982
983 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
984 | ENCODE (offset >> 4, 7, 15)
985 | ENCODE (rt2, 5, 10)
986 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
987 }
988
989 /* Write a LDRH instruction into *BUF.
990
991 LDRH wt, [xn, #offset]
992 LDRH wt, [xn, #index]!
993 LDRH wt, [xn], #index
994
995 RT is the register to store.
996 RN is the base address register.
997 OFFSET is the immediate to add to the base address. It is limited to
998 0 .. 32760 range (12 bits << 3). */
999
1000 static int
1001 emit_ldrh (uint32_t *buf, struct aarch64_register rt,
1002 struct aarch64_register rn,
1003 struct aarch64_memory_operand operand)
1004 {
1005 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
1006 }
1007
1008 /* Write a LDRB instruction into *BUF.
1009
1010 LDRB wt, [xn, #offset]
1011 LDRB wt, [xn, #index]!
1012 LDRB wt, [xn], #index
1013
1014 RT is the register to store.
1015 RN is the base address register.
1016 OFFSET is the immediate to add to the base address. It is limited to
1017 0 .. 32760 range (12 bits << 3). */
1018
1019 static int
1020 emit_ldrb (uint32_t *buf, struct aarch64_register rt,
1021 struct aarch64_register rn,
1022 struct aarch64_memory_operand operand)
1023 {
1024 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
1025 }
1026
1027
1028
1029 /* Write a STR instruction into *BUF.
1030
1031 STR rt, [rn, #offset]
1032 STR rt, [rn, #index]!
1033 STR rt, [rn], #index
1034
1035 RT is the register to store.
1036 RN is the base address register.
1037 OFFSET is the immediate to add to the base address. It is limited to
1038 0 .. 32760 range (12 bits << 3). */
1039
1040 static int
1041 emit_str (uint32_t *buf, struct aarch64_register rt,
1042 struct aarch64_register rn,
1043 struct aarch64_memory_operand operand)
1044 {
1045 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
1046 }
1047
1048 /* Helper function emitting an exclusive load or store instruction. */
1049
1050 static int
1051 emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1052 enum aarch64_opcodes opcode,
1053 struct aarch64_register rs,
1054 struct aarch64_register rt,
1055 struct aarch64_register rt2,
1056 struct aarch64_register rn)
1057 {
1058 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1059 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1060 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
1061 }
1062
1063 /* Write a LAXR instruction into *BUF.
1064
1065 LDAXR rt, [xn]
1066
1067 RT is the destination register.
1068 RN is the base address register. */
1069
1070 static int
1071 emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1072 struct aarch64_register rn)
1073 {
1074 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1075 xzr, rn);
1076 }
1077
1078 /* Write a STXR instruction into *BUF.
1079
1080 STXR ws, rt, [xn]
1081
1082 RS is the result register, it indicates if the store succeeded or not.
1083 RT is the destination register.
1084 RN is the base address register. */
1085
1086 static int
1087 emit_stxr (uint32_t *buf, struct aarch64_register rs,
1088 struct aarch64_register rt, struct aarch64_register rn)
1089 {
1090 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1091 xzr, rn);
1092 }
1093
1094 /* Write a STLR instruction into *BUF.
1095
1096 STLR rt, [xn]
1097
1098 RT is the register to store.
1099 RN is the base address register. */
1100
1101 static int
1102 emit_stlr (uint32_t *buf, struct aarch64_register rt,
1103 struct aarch64_register rn)
1104 {
1105 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1106 xzr, rn);
1107 }
1108
1109 /* Helper function for data processing instructions with register sources. */
1110
1111 static int
1112 emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
1113 struct aarch64_register rd,
1114 struct aarch64_register rn,
1115 struct aarch64_register rm)
1116 {
1117 uint32_t size = ENCODE (rd.is64, 1, 31);
1118
1119 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1120 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
1121 }
1122
1123 /* Helper function for data processing instructions taking either a register
1124 or an immediate. */
1125
1126 static int
1127 emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1128 struct aarch64_register rd,
1129 struct aarch64_register rn,
1130 struct aarch64_operand operand)
1131 {
1132 uint32_t size = ENCODE (rd.is64, 1, 31);
1133 /* The opcode is different for register and immediate source operands. */
1134 uint32_t operand_opcode;
1135
1136 if (operand.type == OPERAND_IMMEDIATE)
1137 {
1138 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1139 operand_opcode = ENCODE (8, 4, 25);
1140
1141 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1142 | ENCODE (operand.imm, 12, 10)
1143 | ENCODE (rn.num, 5, 5)
1144 | ENCODE (rd.num, 5, 0));
1145 }
1146 else
1147 {
1148 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1149 operand_opcode = ENCODE (5, 4, 25);
1150
1151 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1152 rn, operand.reg);
1153 }
1154 }
1155
1156 /* Write an ADD instruction into *BUF.
1157
1158 ADD rd, rn, #imm
1159 ADD rd, rn, rm
1160
1161 This function handles both an immediate and register add.
1162
1163 RD is the destination register.
1164 RN is the input register.
1165 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1166 OPERAND_REGISTER. */
1167
1168 static int
1169 emit_add (uint32_t *buf, struct aarch64_register rd,
1170 struct aarch64_register rn, struct aarch64_operand operand)
1171 {
1172 return emit_data_processing (buf, ADD, rd, rn, operand);
1173 }
1174
1175 /* Write a SUB instruction into *BUF.
1176
1177 SUB rd, rn, #imm
1178 SUB rd, rn, rm
1179
1180 This function handles both an immediate and register sub.
1181
1182 RD is the destination register.
1183 RN is the input register.
1184 IMM is the immediate to substract to RN. */
1185
1186 static int
1187 emit_sub (uint32_t *buf, struct aarch64_register rd,
1188 struct aarch64_register rn, struct aarch64_operand operand)
1189 {
1190 return emit_data_processing (buf, SUB, rd, rn, operand);
1191 }
1192
1193 /* Write a MOV instruction into *BUF.
1194
1195 MOV rd, #imm
1196 MOV rd, rm
1197
1198 This function handles both a wide immediate move and a register move,
1199 with the condition that the source register is not xzr. xzr and the
1200 stack pointer share the same encoding and this function only supports
1201 the stack pointer.
1202
1203 RD is the destination register.
1204 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1205 OPERAND_REGISTER. */
1206
1207 static int
1208 emit_mov (uint32_t *buf, struct aarch64_register rd,
1209 struct aarch64_operand operand)
1210 {
1211 if (operand.type == OPERAND_IMMEDIATE)
1212 {
1213 uint32_t size = ENCODE (rd.is64, 1, 31);
1214 /* Do not shift the immediate. */
1215 uint32_t shift = ENCODE (0, 2, 21);
1216
1217 return aarch64_emit_insn (buf, MOV | size | shift
1218 | ENCODE (operand.imm, 16, 5)
1219 | ENCODE (rd.num, 5, 0));
1220 }
1221 else
1222 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1223 }
1224
1225 /* Write a MOVK instruction into *BUF.
1226
1227 MOVK rd, #imm, lsl #shift
1228
1229 RD is the destination register.
1230 IMM is the immediate.
1231 SHIFT is the logical shift left to apply to IMM. */
1232
1233 static int
1234 emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1235 unsigned shift)
1236 {
1237 uint32_t size = ENCODE (rd.is64, 1, 31);
1238
1239 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1240 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
1241 }
1242
1243 /* Write instructions into *BUF in order to move ADDR into a register.
1244 ADDR can be a 64-bit value.
1245
1246 This function will emit a series of MOV and MOVK instructions, such as:
1247
1248 MOV xd, #(addr)
1249 MOVK xd, #(addr >> 16), lsl #16
1250 MOVK xd, #(addr >> 32), lsl #32
1251 MOVK xd, #(addr >> 48), lsl #48 */
1252
1253 static int
1254 emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1255 {
1256 uint32_t *p = buf;
1257
1258 /* The MOV (wide immediate) instruction clears to top bits of the
1259 register. */
1260 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1261
1262 if ((addr >> 16) != 0)
1263 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1264 else
1265 return p - buf;
1266
1267 if ((addr >> 32) != 0)
1268 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1269 else
1270 return p - buf;
1271
1272 if ((addr >> 48) != 0)
1273 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1274
1275 return p - buf;
1276 }
1277
1278 /* Write a SUBS instruction into *BUF.
1279
1280 SUBS rd, rn, rm
1281
1282 This instruction update the condition flags.
1283
1284 RD is the destination register.
1285 RN and RM are the source registers. */
1286
1287 static int
1288 emit_subs (uint32_t *buf, struct aarch64_register rd,
1289 struct aarch64_register rn, struct aarch64_operand operand)
1290 {
1291 return emit_data_processing (buf, SUBS, rd, rn, operand);
1292 }
1293
1294 /* Write a CMP instruction into *BUF.
1295
1296 CMP rn, rm
1297
1298 This instruction is an alias of SUBS xzr, rn, rm.
1299
1300 RN and RM are the registers to compare. */
1301
1302 static int
1303 emit_cmp (uint32_t *buf, struct aarch64_register rn,
1304 struct aarch64_operand operand)
1305 {
1306 return emit_subs (buf, xzr, rn, operand);
1307 }
1308
1309 /* Write a AND instruction into *BUF.
1310
1311 AND rd, rn, rm
1312
1313 RD is the destination register.
1314 RN and RM are the source registers. */
1315
1316 static int
1317 emit_and (uint32_t *buf, struct aarch64_register rd,
1318 struct aarch64_register rn, struct aarch64_register rm)
1319 {
1320 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1321 }
1322
1323 /* Write a ORR instruction into *BUF.
1324
1325 ORR rd, rn, rm
1326
1327 RD is the destination register.
1328 RN and RM are the source registers. */
1329
1330 static int
1331 emit_orr (uint32_t *buf, struct aarch64_register rd,
1332 struct aarch64_register rn, struct aarch64_register rm)
1333 {
1334 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1335 }
1336
1337 /* Write a ORN instruction into *BUF.
1338
1339 ORN rd, rn, rm
1340
1341 RD is the destination register.
1342 RN and RM are the source registers. */
1343
1344 static int
1345 emit_orn (uint32_t *buf, struct aarch64_register rd,
1346 struct aarch64_register rn, struct aarch64_register rm)
1347 {
1348 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1349 }
1350
1351 /* Write a EOR instruction into *BUF.
1352
1353 EOR rd, rn, rm
1354
1355 RD is the destination register.
1356 RN and RM are the source registers. */
1357
1358 static int
1359 emit_eor (uint32_t *buf, struct aarch64_register rd,
1360 struct aarch64_register rn, struct aarch64_register rm)
1361 {
1362 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1363 }
1364
1365 /* Write a MVN instruction into *BUF.
1366
1367 MVN rd, rm
1368
1369 This is an alias for ORN rd, xzr, rm.
1370
1371 RD is the destination register.
1372 RM is the source register. */
1373
1374 static int
1375 emit_mvn (uint32_t *buf, struct aarch64_register rd,
1376 struct aarch64_register rm)
1377 {
1378 return emit_orn (buf, rd, xzr, rm);
1379 }
1380
1381 /* Write a LSLV instruction into *BUF.
1382
1383 LSLV rd, rn, rm
1384
1385 RD is the destination register.
1386 RN and RM are the source registers. */
1387
1388 static int
1389 emit_lslv (uint32_t *buf, struct aarch64_register rd,
1390 struct aarch64_register rn, struct aarch64_register rm)
1391 {
1392 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1393 }
1394
1395 /* Write a LSRV instruction into *BUF.
1396
1397 LSRV rd, rn, rm
1398
1399 RD is the destination register.
1400 RN and RM are the source registers. */
1401
1402 static int
1403 emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1404 struct aarch64_register rn, struct aarch64_register rm)
1405 {
1406 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1407 }
1408
1409 /* Write a ASRV instruction into *BUF.
1410
1411 ASRV rd, rn, rm
1412
1413 RD is the destination register.
1414 RN and RM are the source registers. */
1415
1416 static int
1417 emit_asrv (uint32_t *buf, struct aarch64_register rd,
1418 struct aarch64_register rn, struct aarch64_register rm)
1419 {
1420 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1421 }
1422
1423 /* Write a MUL instruction into *BUF.
1424
1425 MUL rd, rn, rm
1426
1427 RD is the destination register.
1428 RN and RM are the source registers. */
1429
1430 static int
1431 emit_mul (uint32_t *buf, struct aarch64_register rd,
1432 struct aarch64_register rn, struct aarch64_register rm)
1433 {
1434 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1435 }
1436
1437 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1438
1439 MRS xt, system_reg
1440
1441 RT is the destination register.
1442 SYSTEM_REG is special purpose register to read. */
1443
1444 static int
1445 emit_mrs (uint32_t *buf, struct aarch64_register rt,
1446 enum aarch64_system_control_registers system_reg)
1447 {
1448 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1449 | ENCODE (rt.num, 5, 0));
1450 }
1451
1452 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1453
1454 MSR system_reg, xt
1455
1456 SYSTEM_REG is special purpose register to write.
1457 RT is the input register. */
1458
1459 static int
1460 emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1461 struct aarch64_register rt)
1462 {
1463 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1464 | ENCODE (rt.num, 5, 0));
1465 }
1466
1467 /* Write a SEVL instruction into *BUF.
1468
1469 This is a hint instruction telling the hardware to trigger an event. */
1470
1471 static int
1472 emit_sevl (uint32_t *buf)
1473 {
1474 return aarch64_emit_insn (buf, SEVL);
1475 }
1476
1477 /* Write a WFE instruction into *BUF.
1478
1479 This is a hint instruction telling the hardware to wait for an event. */
1480
1481 static int
1482 emit_wfe (uint32_t *buf)
1483 {
1484 return aarch64_emit_insn (buf, WFE);
1485 }
1486
1487 /* Write a SBFM instruction into *BUF.
1488
1489 SBFM rd, rn, #immr, #imms
1490
1491 This instruction moves the bits from #immr to #imms into the
1492 destination, sign extending the result.
1493
1494 RD is the destination register.
1495 RN is the source register.
1496 IMMR is the bit number to start at (least significant bit).
1497 IMMS is the bit number to stop at (most significant bit). */
1498
1499 static int
1500 emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1501 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1502 {
1503 uint32_t size = ENCODE (rd.is64, 1, 31);
1504 uint32_t n = ENCODE (rd.is64, 1, 22);
1505
1506 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1507 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1508 | ENCODE (rd.num, 5, 0));
1509 }
1510
1511 /* Write a SBFX instruction into *BUF.
1512
1513 SBFX rd, rn, #lsb, #width
1514
1515 This instruction moves #width bits from #lsb into the destination, sign
1516 extending the result. This is an alias for:
1517
1518 SBFM rd, rn, #lsb, #(lsb + width - 1)
1519
1520 RD is the destination register.
1521 RN is the source register.
1522 LSB is the bit number to start at (least significant bit).
1523 WIDTH is the number of bits to move. */
1524
1525 static int
1526 emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1527 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1528 {
1529 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1530 }
1531
1532 /* Write a UBFM instruction into *BUF.
1533
1534 UBFM rd, rn, #immr, #imms
1535
1536 This instruction moves the bits from #immr to #imms into the
1537 destination, extending the result with zeros.
1538
1539 RD is the destination register.
1540 RN is the source register.
1541 IMMR is the bit number to start at (least significant bit).
1542 IMMS is the bit number to stop at (most significant bit). */
1543
1544 static int
1545 emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1546 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1547 {
1548 uint32_t size = ENCODE (rd.is64, 1, 31);
1549 uint32_t n = ENCODE (rd.is64, 1, 22);
1550
1551 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1552 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1553 | ENCODE (rd.num, 5, 0));
1554 }
1555
1556 /* Write a UBFX instruction into *BUF.
1557
1558 UBFX rd, rn, #lsb, #width
1559
1560 This instruction moves #width bits from #lsb into the destination,
1561 extending the result with zeros. This is an alias for:
1562
1563 UBFM rd, rn, #lsb, #(lsb + width - 1)
1564
1565 RD is the destination register.
1566 RN is the source register.
1567 LSB is the bit number to start at (least significant bit).
1568 WIDTH is the number of bits to move. */
1569
1570 static int
1571 emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1572 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1573 {
1574 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1575 }
1576
1577 /* Write a CSINC instruction into *BUF.
1578
1579 CSINC rd, rn, rm, cond
1580
1581 This instruction conditionally increments rn or rm and places the result
1582 in rd. rn is chosen is the condition is true.
1583
1584 RD is the destination register.
1585 RN and RM are the source registers.
1586 COND is the encoded condition. */
1587
1588 static int
1589 emit_csinc (uint32_t *buf, struct aarch64_register rd,
1590 struct aarch64_register rn, struct aarch64_register rm,
1591 unsigned cond)
1592 {
1593 uint32_t size = ENCODE (rd.is64, 1, 31);
1594
1595 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1596 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1597 | ENCODE (rd.num, 5, 0));
1598 }
1599
1600 /* Write a CSET instruction into *BUF.
1601
1602 CSET rd, cond
1603
1604 This instruction conditionally write 1 or 0 in the destination register.
1605 1 is written if the condition is true. This is an alias for:
1606
1607 CSINC rd, xzr, xzr, !cond
1608
1609 Note that the condition needs to be inverted.
1610
1611 RD is the destination register.
1612 RN and RM are the source registers.
1613 COND is the encoded condition. */
1614
1615 static int
1616 emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1617 {
1618 /* The least significant bit of the condition needs toggling in order to
1619 invert it. */
1620 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1621 }
1622
1623 /* Write LEN instructions from BUF into the inferior memory at *TO.
1624
1625 Note instructions are always little endian on AArch64, unlike data. */
1626
1627 static void
1628 append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1629 {
1630 size_t byte_len = len * sizeof (uint32_t);
1631 #if (__BYTE_ORDER == __BIG_ENDIAN)
1632 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
1633 size_t i;
1634
1635 for (i = 0; i < len; i++)
1636 le_buf[i] = htole32 (buf[i]);
1637
1638 target_write_memory (*to, (const unsigned char *) le_buf, byte_len);
1639
1640 xfree (le_buf);
1641 #else
1642 target_write_memory (*to, (const unsigned char *) buf, byte_len);
1643 #endif
1644
1645 *to += byte_len;
1646 }
1647
1648 /* Sub-class of struct aarch64_insn_data, store information of
1649 instruction relocation for fast tracepoint. Visitor can
1650 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1651 the relocated instructions in buffer pointed by INSN_PTR. */
1652
1653 struct aarch64_insn_relocation_data
1654 {
1655 struct aarch64_insn_data base;
1656
1657 /* The new address the instruction is relocated to. */
1658 CORE_ADDR new_addr;
1659 /* Pointer to the buffer of relocated instruction(s). */
1660 uint32_t *insn_ptr;
1661 };
1662
1663 /* Implementation of aarch64_insn_visitor method "b". */
1664
1665 static void
1666 aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1667 struct aarch64_insn_data *data)
1668 {
1669 struct aarch64_insn_relocation_data *insn_reloc
1670 = (struct aarch64_insn_relocation_data *) data;
1671 int64_t new_offset
1672 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1673
1674 if (can_encode_int32 (new_offset, 28))
1675 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1676 }
1677
1678 /* Implementation of aarch64_insn_visitor method "b_cond". */
1679
1680 static void
1681 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1682 struct aarch64_insn_data *data)
1683 {
1684 struct aarch64_insn_relocation_data *insn_reloc
1685 = (struct aarch64_insn_relocation_data *) data;
1686 int64_t new_offset
1687 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1688
1689 if (can_encode_int32 (new_offset, 21))
1690 {
1691 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1692 new_offset);
1693 }
1694 else if (can_encode_int32 (new_offset, 28))
1695 {
1696 /* The offset is out of range for a conditional branch
1697 instruction but not for a unconditional branch. We can use
1698 the following instructions instead:
1699
1700 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1701 B NOT_TAKEN ; Else jump over TAKEN and continue.
1702 TAKEN:
1703 B #(offset - 8)
1704 NOT_TAKEN:
1705
1706 */
1707
1708 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1709 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1710 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1711 }
1712 }
1713
1714 /* Implementation of aarch64_insn_visitor method "cb". */
1715
1716 static void
1717 aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1718 const unsigned rn, int is64,
1719 struct aarch64_insn_data *data)
1720 {
1721 struct aarch64_insn_relocation_data *insn_reloc
1722 = (struct aarch64_insn_relocation_data *) data;
1723 int64_t new_offset
1724 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1725
1726 if (can_encode_int32 (new_offset, 21))
1727 {
1728 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1729 aarch64_register (rn, is64), new_offset);
1730 }
1731 else if (can_encode_int32 (new_offset, 28))
1732 {
1733 /* The offset is out of range for a compare and branch
1734 instruction but not for a unconditional branch. We can use
1735 the following instructions instead:
1736
1737 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1738 B NOT_TAKEN ; Else jump over TAKEN and continue.
1739 TAKEN:
1740 B #(offset - 8)
1741 NOT_TAKEN:
1742
1743 */
1744 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1745 aarch64_register (rn, is64), 8);
1746 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1747 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1748 }
1749 }
1750
1751 /* Implementation of aarch64_insn_visitor method "tb". */
1752
1753 static void
1754 aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1755 const unsigned rt, unsigned bit,
1756 struct aarch64_insn_data *data)
1757 {
1758 struct aarch64_insn_relocation_data *insn_reloc
1759 = (struct aarch64_insn_relocation_data *) data;
1760 int64_t new_offset
1761 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1762
1763 if (can_encode_int32 (new_offset, 16))
1764 {
1765 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1766 aarch64_register (rt, 1), new_offset);
1767 }
1768 else if (can_encode_int32 (new_offset, 28))
1769 {
1770 /* The offset is out of range for a test bit and branch
1771 instruction but not for a unconditional branch. We can use
1772 the following instructions instead:
1773
1774 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1775 B NOT_TAKEN ; Else jump over TAKEN and continue.
1776 TAKEN:
1777 B #(offset - 8)
1778 NOT_TAKEN:
1779
1780 */
1781 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1782 aarch64_register (rt, 1), 8);
1783 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1784 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1785 new_offset - 8);
1786 }
1787 }
1788
1789 /* Implementation of aarch64_insn_visitor method "adr". */
1790
1791 static void
1792 aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1793 const int is_adrp,
1794 struct aarch64_insn_data *data)
1795 {
1796 struct aarch64_insn_relocation_data *insn_reloc
1797 = (struct aarch64_insn_relocation_data *) data;
1798 /* We know exactly the address the ADR{P,} instruction will compute.
1799 We can just write it to the destination register. */
1800 CORE_ADDR address = data->insn_addr + offset;
1801
1802 if (is_adrp)
1803 {
1804 /* Clear the lower 12 bits of the offset to get the 4K page. */
1805 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1806 aarch64_register (rd, 1),
1807 address & ~0xfff);
1808 }
1809 else
1810 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1811 aarch64_register (rd, 1), address);
1812 }
1813
1814 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1815
1816 static void
1817 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1818 const unsigned rt, const int is64,
1819 struct aarch64_insn_data *data)
1820 {
1821 struct aarch64_insn_relocation_data *insn_reloc
1822 = (struct aarch64_insn_relocation_data *) data;
1823 CORE_ADDR address = data->insn_addr + offset;
1824
1825 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1826 aarch64_register (rt, 1), address);
1827
1828 /* We know exactly what address to load from, and what register we
1829 can use:
1830
1831 MOV xd, #(oldloc + offset)
1832 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1833 ...
1834
1835 LDR xd, [xd] ; or LDRSW xd, [xd]
1836
1837 */
1838
1839 if (is_sw)
1840 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1841 aarch64_register (rt, 1),
1842 aarch64_register (rt, 1),
1843 offset_memory_operand (0));
1844 else
1845 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1846 aarch64_register (rt, is64),
1847 aarch64_register (rt, 1),
1848 offset_memory_operand (0));
1849 }
1850
1851 /* Implementation of aarch64_insn_visitor method "others". */
1852
1853 static void
1854 aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1855 struct aarch64_insn_data *data)
1856 {
1857 struct aarch64_insn_relocation_data *insn_reloc
1858 = (struct aarch64_insn_relocation_data *) data;
1859
1860 /* The instruction is not PC relative. Just re-emit it at the new
1861 location. */
1862 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
1863 }
1864
1865 static const struct aarch64_insn_visitor visitor =
1866 {
1867 aarch64_ftrace_insn_reloc_b,
1868 aarch64_ftrace_insn_reloc_b_cond,
1869 aarch64_ftrace_insn_reloc_cb,
1870 aarch64_ftrace_insn_reloc_tb,
1871 aarch64_ftrace_insn_reloc_adr,
1872 aarch64_ftrace_insn_reloc_ldr_literal,
1873 aarch64_ftrace_insn_reloc_others,
1874 };
1875
1876 /* Implementation of linux_target_ops method
1877 "install_fast_tracepoint_jump_pad". */
1878
1879 static int
1880 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1881 CORE_ADDR tpaddr,
1882 CORE_ADDR collector,
1883 CORE_ADDR lockaddr,
1884 ULONGEST orig_size,
1885 CORE_ADDR *jump_entry,
1886 CORE_ADDR *trampoline,
1887 ULONGEST *trampoline_size,
1888 unsigned char *jjump_pad_insn,
1889 ULONGEST *jjump_pad_insn_size,
1890 CORE_ADDR *adjusted_insn_addr,
1891 CORE_ADDR *adjusted_insn_addr_end,
1892 char *err)
1893 {
1894 uint32_t buf[256];
1895 uint32_t *p = buf;
1896 int64_t offset;
1897 int i;
1898 uint32_t insn;
1899 CORE_ADDR buildaddr = *jump_entry;
1900 struct aarch64_insn_relocation_data insn_data;
1901
1902 /* We need to save the current state on the stack both to restore it
1903 later and to collect register values when the tracepoint is hit.
1904
1905 The saved registers are pushed in a layout that needs to be in sync
1906 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1907 the supply_fast_tracepoint_registers function will fill in the
1908 register cache from a pointer to saved registers on the stack we build
1909 here.
1910
1911 For simplicity, we set the size of each cell on the stack to 16 bytes.
1912 This way one cell can hold any register type, from system registers
1913 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1914 has to be 16 bytes aligned anyway.
1915
1916 Note that the CPSR register does not exist on AArch64. Instead we
1917 can access system bits describing the process state with the
1918 MRS/MSR instructions, namely the condition flags. We save them as
1919 if they are part of a CPSR register because that's how GDB
1920 interprets these system bits. At the moment, only the condition
1921 flags are saved in CPSR (NZCV).
1922
1923 Stack layout, each cell is 16 bytes (descending):
1924
1925 High *-------- SIMD&FP registers from 31 down to 0. --------*
1926 | q31 |
1927 . .
1928 . . 32 cells
1929 . .
1930 | q0 |
1931 *---- General purpose registers from 30 down to 0. ----*
1932 | x30 |
1933 . .
1934 . . 31 cells
1935 . .
1936 | x0 |
1937 *------------- Special purpose registers. -------------*
1938 | SP |
1939 | PC |
1940 | CPSR (NZCV) | 5 cells
1941 | FPSR |
1942 | FPCR | <- SP + 16
1943 *------------- collecting_t object --------------------*
1944 | TPIDR_EL0 | struct tracepoint * |
1945 Low *------------------------------------------------------*
1946
1947 After this stack is set up, we issue a call to the collector, passing
1948 it the saved registers at (SP + 16). */
1949
1950 /* Push SIMD&FP registers on the stack:
1951
1952 SUB sp, sp, #(32 * 16)
1953
1954 STP q30, q31, [sp, #(30 * 16)]
1955 ...
1956 STP q0, q1, [sp]
1957
1958 */
1959 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
1960 for (i = 30; i >= 0; i -= 2)
1961 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
1962
1963 /* Push general purpose registers on the stack. Note that we do not need
1964 to push x31 as it represents the xzr register and not the stack
1965 pointer in a STR instruction.
1966
1967 SUB sp, sp, #(31 * 16)
1968
1969 STR x30, [sp, #(30 * 16)]
1970 ...
1971 STR x0, [sp]
1972
1973 */
1974 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
1975 for (i = 30; i >= 0; i -= 1)
1976 p += emit_str (p, aarch64_register (i, 1), sp,
1977 offset_memory_operand (i * 16));
1978
1979 /* Make space for 5 more cells.
1980
1981 SUB sp, sp, #(5 * 16)
1982
1983 */
1984 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
1985
1986
1987 /* Save SP:
1988
1989 ADD x4, sp, #((32 + 31 + 5) * 16)
1990 STR x4, [sp, #(4 * 16)]
1991
1992 */
1993 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
1994 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
1995
1996 /* Save PC (tracepoint address):
1997
1998 MOV x3, #(tpaddr)
1999 ...
2000
2001 STR x3, [sp, #(3 * 16)]
2002
2003 */
2004
2005 p += emit_mov_addr (p, x3, tpaddr);
2006 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
2007
2008 /* Save CPSR (NZCV), FPSR and FPCR:
2009
2010 MRS x2, nzcv
2011 MRS x1, fpsr
2012 MRS x0, fpcr
2013
2014 STR x2, [sp, #(2 * 16)]
2015 STR x1, [sp, #(1 * 16)]
2016 STR x0, [sp, #(0 * 16)]
2017
2018 */
2019 p += emit_mrs (p, x2, NZCV);
2020 p += emit_mrs (p, x1, FPSR);
2021 p += emit_mrs (p, x0, FPCR);
2022 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2023 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2024 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2025
2026 /* Push the collecting_t object. It consist of the address of the
2027 tracepoint and an ID for the current thread. We get the latter by
2028 reading the tpidr_el0 system register. It corresponds to the
2029 NT_ARM_TLS register accessible with ptrace.
2030
2031 MOV x0, #(tpoint)
2032 ...
2033
2034 MRS x1, tpidr_el0
2035
2036 STP x0, x1, [sp, #-16]!
2037
2038 */
2039
2040 p += emit_mov_addr (p, x0, tpoint);
2041 p += emit_mrs (p, x1, TPIDR_EL0);
2042 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2043
2044 /* Spin-lock:
2045
2046 The shared memory for the lock is at lockaddr. It will hold zero
2047 if no-one is holding the lock, otherwise it contains the address of
2048 the collecting_t object on the stack of the thread which acquired it.
2049
2050 At this stage, the stack pointer points to this thread's collecting_t
2051 object.
2052
2053 We use the following registers:
2054 - x0: Address of the lock.
2055 - x1: Pointer to collecting_t object.
2056 - x2: Scratch register.
2057
2058 MOV x0, #(lockaddr)
2059 ...
2060 MOV x1, sp
2061
2062 ; Trigger an event local to this core. So the following WFE
2063 ; instruction is ignored.
2064 SEVL
2065 again:
2066 ; Wait for an event. The event is triggered by either the SEVL
2067 ; or STLR instructions (store release).
2068 WFE
2069
2070 ; Atomically read at lockaddr. This marks the memory location as
2071 ; exclusive. This instruction also has memory constraints which
2072 ; make sure all previous data reads and writes are done before
2073 ; executing it.
2074 LDAXR x2, [x0]
2075
2076 ; Try again if another thread holds the lock.
2077 CBNZ x2, again
2078
2079 ; We can lock it! Write the address of the collecting_t object.
2080 ; This instruction will fail if the memory location is not marked
2081 ; as exclusive anymore. If it succeeds, it will remove the
2082 ; exclusive mark on the memory location. This way, if another
2083 ; thread executes this instruction before us, we will fail and try
2084 ; all over again.
2085 STXR w2, x1, [x0]
2086 CBNZ w2, again
2087
2088 */
2089
2090 p += emit_mov_addr (p, x0, lockaddr);
2091 p += emit_mov (p, x1, register_operand (sp));
2092
2093 p += emit_sevl (p);
2094 p += emit_wfe (p);
2095 p += emit_ldaxr (p, x2, x0);
2096 p += emit_cb (p, 1, w2, -2 * 4);
2097 p += emit_stxr (p, w2, x1, x0);
2098 p += emit_cb (p, 1, x2, -4 * 4);
2099
2100 /* Call collector (struct tracepoint *, unsigned char *):
2101
2102 MOV x0, #(tpoint)
2103 ...
2104
2105 ; Saved registers start after the collecting_t object.
2106 ADD x1, sp, #16
2107
2108 ; We use an intra-procedure-call scratch register.
2109 MOV ip0, #(collector)
2110 ...
2111
2112 ; And call back to C!
2113 BLR ip0
2114
2115 */
2116
2117 p += emit_mov_addr (p, x0, tpoint);
2118 p += emit_add (p, x1, sp, immediate_operand (16));
2119
2120 p += emit_mov_addr (p, ip0, collector);
2121 p += emit_blr (p, ip0);
2122
2123 /* Release the lock.
2124
2125 MOV x0, #(lockaddr)
2126 ...
2127
2128 ; This instruction is a normal store with memory ordering
2129 ; constraints. Thanks to this we do not have to put a data
2130 ; barrier instruction to make sure all data read and writes are done
2131 ; before this instruction is executed. Furthermore, this instruction
2132 ; will trigger an event, letting other threads know they can grab
2133 ; the lock.
2134 STLR xzr, [x0]
2135
2136 */
2137 p += emit_mov_addr (p, x0, lockaddr);
2138 p += emit_stlr (p, xzr, x0);
2139
2140 /* Free collecting_t object:
2141
2142 ADD sp, sp, #16
2143
2144 */
2145 p += emit_add (p, sp, sp, immediate_operand (16));
2146
2147 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2148 registers from the stack.
2149
2150 LDR x2, [sp, #(2 * 16)]
2151 LDR x1, [sp, #(1 * 16)]
2152 LDR x0, [sp, #(0 * 16)]
2153
2154 MSR NZCV, x2
2155 MSR FPSR, x1
2156 MSR FPCR, x0
2157
2158 ADD sp, sp #(5 * 16)
2159
2160 */
2161 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2162 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2163 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2164 p += emit_msr (p, NZCV, x2);
2165 p += emit_msr (p, FPSR, x1);
2166 p += emit_msr (p, FPCR, x0);
2167
2168 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2169
2170 /* Pop general purpose registers:
2171
2172 LDR x0, [sp]
2173 ...
2174 LDR x30, [sp, #(30 * 16)]
2175
2176 ADD sp, sp, #(31 * 16)
2177
2178 */
2179 for (i = 0; i <= 30; i += 1)
2180 p += emit_ldr (p, aarch64_register (i, 1), sp,
2181 offset_memory_operand (i * 16));
2182 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2183
2184 /* Pop SIMD&FP registers:
2185
2186 LDP q0, q1, [sp]
2187 ...
2188 LDP q30, q31, [sp, #(30 * 16)]
2189
2190 ADD sp, sp, #(32 * 16)
2191
2192 */
2193 for (i = 0; i <= 30; i += 2)
2194 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2195 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2196
2197 /* Write the code into the inferior memory. */
2198 append_insns (&buildaddr, p - buf, buf);
2199
2200 /* Now emit the relocated instruction. */
2201 *adjusted_insn_addr = buildaddr;
2202 target_read_uint32 (tpaddr, &insn);
2203
2204 insn_data.base.insn_addr = tpaddr;
2205 insn_data.new_addr = buildaddr;
2206 insn_data.insn_ptr = buf;
2207
2208 aarch64_relocate_instruction (insn, &visitor,
2209 (struct aarch64_insn_data *) &insn_data);
2210
2211 /* We may not have been able to relocate the instruction. */
2212 if (insn_data.insn_ptr == buf)
2213 {
2214 sprintf (err,
2215 "E.Could not relocate instruction from %s to %s.",
2216 core_addr_to_string_nz (tpaddr),
2217 core_addr_to_string_nz (buildaddr));
2218 return 1;
2219 }
2220 else
2221 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
2222 *adjusted_insn_addr_end = buildaddr;
2223
2224 /* Go back to the start of the buffer. */
2225 p = buf;
2226
2227 /* Emit a branch back from the jump pad. */
2228 offset = (tpaddr + orig_size - buildaddr);
2229 if (!can_encode_int32 (offset, 28))
2230 {
2231 sprintf (err,
2232 "E.Jump back from jump pad too far from tracepoint "
2233 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2234 offset);
2235 return 1;
2236 }
2237
2238 p += emit_b (p, 0, offset);
2239 append_insns (&buildaddr, p - buf, buf);
2240
2241 /* Give the caller a branch instruction into the jump pad. */
2242 offset = (*jump_entry - tpaddr);
2243 if (!can_encode_int32 (offset, 28))
2244 {
2245 sprintf (err,
2246 "E.Jump pad too far from tracepoint "
2247 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2248 offset);
2249 return 1;
2250 }
2251
2252 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2253 *jjump_pad_insn_size = 4;
2254
2255 /* Return the end address of our pad. */
2256 *jump_entry = buildaddr;
2257
2258 return 0;
2259 }
2260
2261 /* Helper function writing LEN instructions from START into
2262 current_insn_ptr. */
2263
2264 static void
2265 emit_ops_insns (const uint32_t *start, int len)
2266 {
2267 CORE_ADDR buildaddr = current_insn_ptr;
2268
2269 if (debug_threads)
2270 debug_printf ("Adding %d instrucions at %s\n",
2271 len, paddress (buildaddr));
2272
2273 append_insns (&buildaddr, len, start);
2274 current_insn_ptr = buildaddr;
2275 }
2276
2277 /* Pop a register from the stack. */
2278
2279 static int
2280 emit_pop (uint32_t *buf, struct aarch64_register rt)
2281 {
2282 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2283 }
2284
2285 /* Push a register on the stack. */
2286
2287 static int
2288 emit_push (uint32_t *buf, struct aarch64_register rt)
2289 {
2290 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2291 }
2292
2293 /* Implementation of emit_ops method "emit_prologue". */
2294
2295 static void
2296 aarch64_emit_prologue (void)
2297 {
2298 uint32_t buf[16];
2299 uint32_t *p = buf;
2300
2301 /* This function emit a prologue for the following function prototype:
2302
2303 enum eval_result_type f (unsigned char *regs,
2304 ULONGEST *value);
2305
2306 The first argument is a buffer of raw registers. The second
2307 argument is the result of
2308 evaluating the expression, which will be set to whatever is on top of
2309 the stack at the end.
2310
2311 The stack set up by the prologue is as such:
2312
2313 High *------------------------------------------------------*
2314 | LR |
2315 | FP | <- FP
2316 | x1 (ULONGEST *value) |
2317 | x0 (unsigned char *regs) |
2318 Low *------------------------------------------------------*
2319
2320 As we are implementing a stack machine, each opcode can expand the
2321 stack so we never know how far we are from the data saved by this
2322 prologue. In order to be able refer to value and regs later, we save
2323 the current stack pointer in the frame pointer. This way, it is not
2324 clobbered when calling C functions.
2325
2326 Finally, throughout every operation, we are using register x0 as the
2327 top of the stack, and x1 as a scratch register. */
2328
2329 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2330 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2331 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2332
2333 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2334
2335
2336 emit_ops_insns (buf, p - buf);
2337 }
2338
2339 /* Implementation of emit_ops method "emit_epilogue". */
2340
2341 static void
2342 aarch64_emit_epilogue (void)
2343 {
2344 uint32_t buf[16];
2345 uint32_t *p = buf;
2346
2347 /* Store the result of the expression (x0) in *value. */
2348 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2349 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2350 p += emit_str (p, x0, x1, offset_memory_operand (0));
2351
2352 /* Restore the previous state. */
2353 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2354 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2355
2356 /* Return expr_eval_no_error. */
2357 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2358 p += emit_ret (p, lr);
2359
2360 emit_ops_insns (buf, p - buf);
2361 }
2362
2363 /* Implementation of emit_ops method "emit_add". */
2364
2365 static void
2366 aarch64_emit_add (void)
2367 {
2368 uint32_t buf[16];
2369 uint32_t *p = buf;
2370
2371 p += emit_pop (p, x1);
2372 p += emit_add (p, x0, x1, register_operand (x0));
2373
2374 emit_ops_insns (buf, p - buf);
2375 }
2376
2377 /* Implementation of emit_ops method "emit_sub". */
2378
2379 static void
2380 aarch64_emit_sub (void)
2381 {
2382 uint32_t buf[16];
2383 uint32_t *p = buf;
2384
2385 p += emit_pop (p, x1);
2386 p += emit_sub (p, x0, x1, register_operand (x0));
2387
2388 emit_ops_insns (buf, p - buf);
2389 }
2390
2391 /* Implementation of emit_ops method "emit_mul". */
2392
2393 static void
2394 aarch64_emit_mul (void)
2395 {
2396 uint32_t buf[16];
2397 uint32_t *p = buf;
2398
2399 p += emit_pop (p, x1);
2400 p += emit_mul (p, x0, x1, x0);
2401
2402 emit_ops_insns (buf, p - buf);
2403 }
2404
2405 /* Implementation of emit_ops method "emit_lsh". */
2406
2407 static void
2408 aarch64_emit_lsh (void)
2409 {
2410 uint32_t buf[16];
2411 uint32_t *p = buf;
2412
2413 p += emit_pop (p, x1);
2414 p += emit_lslv (p, x0, x1, x0);
2415
2416 emit_ops_insns (buf, p - buf);
2417 }
2418
2419 /* Implementation of emit_ops method "emit_rsh_signed". */
2420
2421 static void
2422 aarch64_emit_rsh_signed (void)
2423 {
2424 uint32_t buf[16];
2425 uint32_t *p = buf;
2426
2427 p += emit_pop (p, x1);
2428 p += emit_asrv (p, x0, x1, x0);
2429
2430 emit_ops_insns (buf, p - buf);
2431 }
2432
2433 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2434
2435 static void
2436 aarch64_emit_rsh_unsigned (void)
2437 {
2438 uint32_t buf[16];
2439 uint32_t *p = buf;
2440
2441 p += emit_pop (p, x1);
2442 p += emit_lsrv (p, x0, x1, x0);
2443
2444 emit_ops_insns (buf, p - buf);
2445 }
2446
2447 /* Implementation of emit_ops method "emit_ext". */
2448
2449 static void
2450 aarch64_emit_ext (int arg)
2451 {
2452 uint32_t buf[16];
2453 uint32_t *p = buf;
2454
2455 p += emit_sbfx (p, x0, x0, 0, arg);
2456
2457 emit_ops_insns (buf, p - buf);
2458 }
2459
2460 /* Implementation of emit_ops method "emit_log_not". */
2461
2462 static void
2463 aarch64_emit_log_not (void)
2464 {
2465 uint32_t buf[16];
2466 uint32_t *p = buf;
2467
2468 /* If the top of the stack is 0, replace it with 1. Else replace it with
2469 0. */
2470
2471 p += emit_cmp (p, x0, immediate_operand (0));
2472 p += emit_cset (p, x0, EQ);
2473
2474 emit_ops_insns (buf, p - buf);
2475 }
2476
2477 /* Implementation of emit_ops method "emit_bit_and". */
2478
2479 static void
2480 aarch64_emit_bit_and (void)
2481 {
2482 uint32_t buf[16];
2483 uint32_t *p = buf;
2484
2485 p += emit_pop (p, x1);
2486 p += emit_and (p, x0, x0, x1);
2487
2488 emit_ops_insns (buf, p - buf);
2489 }
2490
2491 /* Implementation of emit_ops method "emit_bit_or". */
2492
2493 static void
2494 aarch64_emit_bit_or (void)
2495 {
2496 uint32_t buf[16];
2497 uint32_t *p = buf;
2498
2499 p += emit_pop (p, x1);
2500 p += emit_orr (p, x0, x0, x1);
2501
2502 emit_ops_insns (buf, p - buf);
2503 }
2504
2505 /* Implementation of emit_ops method "emit_bit_xor". */
2506
2507 static void
2508 aarch64_emit_bit_xor (void)
2509 {
2510 uint32_t buf[16];
2511 uint32_t *p = buf;
2512
2513 p += emit_pop (p, x1);
2514 p += emit_eor (p, x0, x0, x1);
2515
2516 emit_ops_insns (buf, p - buf);
2517 }
2518
2519 /* Implementation of emit_ops method "emit_bit_not". */
2520
2521 static void
2522 aarch64_emit_bit_not (void)
2523 {
2524 uint32_t buf[16];
2525 uint32_t *p = buf;
2526
2527 p += emit_mvn (p, x0, x0);
2528
2529 emit_ops_insns (buf, p - buf);
2530 }
2531
2532 /* Implementation of emit_ops method "emit_equal". */
2533
2534 static void
2535 aarch64_emit_equal (void)
2536 {
2537 uint32_t buf[16];
2538 uint32_t *p = buf;
2539
2540 p += emit_pop (p, x1);
2541 p += emit_cmp (p, x0, register_operand (x1));
2542 p += emit_cset (p, x0, EQ);
2543
2544 emit_ops_insns (buf, p - buf);
2545 }
2546
2547 /* Implementation of emit_ops method "emit_less_signed". */
2548
2549 static void
2550 aarch64_emit_less_signed (void)
2551 {
2552 uint32_t buf[16];
2553 uint32_t *p = buf;
2554
2555 p += emit_pop (p, x1);
2556 p += emit_cmp (p, x1, register_operand (x0));
2557 p += emit_cset (p, x0, LT);
2558
2559 emit_ops_insns (buf, p - buf);
2560 }
2561
2562 /* Implementation of emit_ops method "emit_less_unsigned". */
2563
2564 static void
2565 aarch64_emit_less_unsigned (void)
2566 {
2567 uint32_t buf[16];
2568 uint32_t *p = buf;
2569
2570 p += emit_pop (p, x1);
2571 p += emit_cmp (p, x1, register_operand (x0));
2572 p += emit_cset (p, x0, LO);
2573
2574 emit_ops_insns (buf, p - buf);
2575 }
2576
2577 /* Implementation of emit_ops method "emit_ref". */
2578
2579 static void
2580 aarch64_emit_ref (int size)
2581 {
2582 uint32_t buf[16];
2583 uint32_t *p = buf;
2584
2585 switch (size)
2586 {
2587 case 1:
2588 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2589 break;
2590 case 2:
2591 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2592 break;
2593 case 4:
2594 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2595 break;
2596 case 8:
2597 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2598 break;
2599 default:
2600 /* Unknown size, bail on compilation. */
2601 emit_error = 1;
2602 break;
2603 }
2604
2605 emit_ops_insns (buf, p - buf);
2606 }
2607
2608 /* Implementation of emit_ops method "emit_if_goto". */
2609
2610 static void
2611 aarch64_emit_if_goto (int *offset_p, int *size_p)
2612 {
2613 uint32_t buf[16];
2614 uint32_t *p = buf;
2615
2616 /* The Z flag is set or cleared here. */
2617 p += emit_cmp (p, x0, immediate_operand (0));
2618 /* This instruction must not change the Z flag. */
2619 p += emit_pop (p, x0);
2620 /* Branch over the next instruction if x0 == 0. */
2621 p += emit_bcond (p, EQ, 8);
2622
2623 /* The NOP instruction will be patched with an unconditional branch. */
2624 if (offset_p)
2625 *offset_p = (p - buf) * 4;
2626 if (size_p)
2627 *size_p = 4;
2628 p += emit_nop (p);
2629
2630 emit_ops_insns (buf, p - buf);
2631 }
2632
2633 /* Implementation of emit_ops method "emit_goto". */
2634
2635 static void
2636 aarch64_emit_goto (int *offset_p, int *size_p)
2637 {
2638 uint32_t buf[16];
2639 uint32_t *p = buf;
2640
2641 /* The NOP instruction will be patched with an unconditional branch. */
2642 if (offset_p)
2643 *offset_p = 0;
2644 if (size_p)
2645 *size_p = 4;
2646 p += emit_nop (p);
2647
2648 emit_ops_insns (buf, p - buf);
2649 }
2650
2651 /* Implementation of emit_ops method "write_goto_address". */
2652
2653 static void
2654 aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2655 {
2656 uint32_t insn;
2657
2658 emit_b (&insn, 0, to - from);
2659 append_insns (&from, 1, &insn);
2660 }
2661
2662 /* Implementation of emit_ops method "emit_const". */
2663
2664 static void
2665 aarch64_emit_const (LONGEST num)
2666 {
2667 uint32_t buf[16];
2668 uint32_t *p = buf;
2669
2670 p += emit_mov_addr (p, x0, num);
2671
2672 emit_ops_insns (buf, p - buf);
2673 }
2674
2675 /* Implementation of emit_ops method "emit_call". */
2676
2677 static void
2678 aarch64_emit_call (CORE_ADDR fn)
2679 {
2680 uint32_t buf[16];
2681 uint32_t *p = buf;
2682
2683 p += emit_mov_addr (p, ip0, fn);
2684 p += emit_blr (p, ip0);
2685
2686 emit_ops_insns (buf, p - buf);
2687 }
2688
2689 /* Implementation of emit_ops method "emit_reg". */
2690
2691 static void
2692 aarch64_emit_reg (int reg)
2693 {
2694 uint32_t buf[16];
2695 uint32_t *p = buf;
2696
2697 /* Set x0 to unsigned char *regs. */
2698 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2699 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2700 p += emit_mov (p, x1, immediate_operand (reg));
2701
2702 emit_ops_insns (buf, p - buf);
2703
2704 aarch64_emit_call (get_raw_reg_func_addr ());
2705 }
2706
2707 /* Implementation of emit_ops method "emit_pop". */
2708
2709 static void
2710 aarch64_emit_pop (void)
2711 {
2712 uint32_t buf[16];
2713 uint32_t *p = buf;
2714
2715 p += emit_pop (p, x0);
2716
2717 emit_ops_insns (buf, p - buf);
2718 }
2719
2720 /* Implementation of emit_ops method "emit_stack_flush". */
2721
2722 static void
2723 aarch64_emit_stack_flush (void)
2724 {
2725 uint32_t buf[16];
2726 uint32_t *p = buf;
2727
2728 p += emit_push (p, x0);
2729
2730 emit_ops_insns (buf, p - buf);
2731 }
2732
2733 /* Implementation of emit_ops method "emit_zero_ext". */
2734
2735 static void
2736 aarch64_emit_zero_ext (int arg)
2737 {
2738 uint32_t buf[16];
2739 uint32_t *p = buf;
2740
2741 p += emit_ubfx (p, x0, x0, 0, arg);
2742
2743 emit_ops_insns (buf, p - buf);
2744 }
2745
2746 /* Implementation of emit_ops method "emit_swap". */
2747
2748 static void
2749 aarch64_emit_swap (void)
2750 {
2751 uint32_t buf[16];
2752 uint32_t *p = buf;
2753
2754 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2755 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2756 p += emit_mov (p, x0, register_operand (x1));
2757
2758 emit_ops_insns (buf, p - buf);
2759 }
2760
2761 /* Implementation of emit_ops method "emit_stack_adjust". */
2762
2763 static void
2764 aarch64_emit_stack_adjust (int n)
2765 {
2766 /* This is not needed with our design. */
2767 uint32_t buf[16];
2768 uint32_t *p = buf;
2769
2770 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2771
2772 emit_ops_insns (buf, p - buf);
2773 }
2774
2775 /* Implementation of emit_ops method "emit_int_call_1". */
2776
2777 static void
2778 aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2779 {
2780 uint32_t buf[16];
2781 uint32_t *p = buf;
2782
2783 p += emit_mov (p, x0, immediate_operand (arg1));
2784
2785 emit_ops_insns (buf, p - buf);
2786
2787 aarch64_emit_call (fn);
2788 }
2789
2790 /* Implementation of emit_ops method "emit_void_call_2". */
2791
2792 static void
2793 aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2794 {
2795 uint32_t buf[16];
2796 uint32_t *p = buf;
2797
2798 /* Push x0 on the stack. */
2799 aarch64_emit_stack_flush ();
2800
2801 /* Setup arguments for the function call:
2802
2803 x0: arg1
2804 x1: top of the stack
2805
2806 MOV x1, x0
2807 MOV x0, #arg1 */
2808
2809 p += emit_mov (p, x1, register_operand (x0));
2810 p += emit_mov (p, x0, immediate_operand (arg1));
2811
2812 emit_ops_insns (buf, p - buf);
2813
2814 aarch64_emit_call (fn);
2815
2816 /* Restore x0. */
2817 aarch64_emit_pop ();
2818 }
2819
2820 /* Implementation of emit_ops method "emit_eq_goto". */
2821
2822 static void
2823 aarch64_emit_eq_goto (int *offset_p, int *size_p)
2824 {
2825 uint32_t buf[16];
2826 uint32_t *p = buf;
2827
2828 p += emit_pop (p, x1);
2829 p += emit_cmp (p, x1, register_operand (x0));
2830 /* Branch over the next instruction if x0 != x1. */
2831 p += emit_bcond (p, NE, 8);
2832 /* The NOP instruction will be patched with an unconditional branch. */
2833 if (offset_p)
2834 *offset_p = (p - buf) * 4;
2835 if (size_p)
2836 *size_p = 4;
2837 p += emit_nop (p);
2838
2839 emit_ops_insns (buf, p - buf);
2840 }
2841
2842 /* Implementation of emit_ops method "emit_ne_goto". */
2843
2844 static void
2845 aarch64_emit_ne_goto (int *offset_p, int *size_p)
2846 {
2847 uint32_t buf[16];
2848 uint32_t *p = buf;
2849
2850 p += emit_pop (p, x1);
2851 p += emit_cmp (p, x1, register_operand (x0));
2852 /* Branch over the next instruction if x0 == x1. */
2853 p += emit_bcond (p, EQ, 8);
2854 /* The NOP instruction will be patched with an unconditional branch. */
2855 if (offset_p)
2856 *offset_p = (p - buf) * 4;
2857 if (size_p)
2858 *size_p = 4;
2859 p += emit_nop (p);
2860
2861 emit_ops_insns (buf, p - buf);
2862 }
2863
2864 /* Implementation of emit_ops method "emit_lt_goto". */
2865
2866 static void
2867 aarch64_emit_lt_goto (int *offset_p, int *size_p)
2868 {
2869 uint32_t buf[16];
2870 uint32_t *p = buf;
2871
2872 p += emit_pop (p, x1);
2873 p += emit_cmp (p, x1, register_operand (x0));
2874 /* Branch over the next instruction if x0 >= x1. */
2875 p += emit_bcond (p, GE, 8);
2876 /* The NOP instruction will be patched with an unconditional branch. */
2877 if (offset_p)
2878 *offset_p = (p - buf) * 4;
2879 if (size_p)
2880 *size_p = 4;
2881 p += emit_nop (p);
2882
2883 emit_ops_insns (buf, p - buf);
2884 }
2885
2886 /* Implementation of emit_ops method "emit_le_goto". */
2887
2888 static void
2889 aarch64_emit_le_goto (int *offset_p, int *size_p)
2890 {
2891 uint32_t buf[16];
2892 uint32_t *p = buf;
2893
2894 p += emit_pop (p, x1);
2895 p += emit_cmp (p, x1, register_operand (x0));
2896 /* Branch over the next instruction if x0 > x1. */
2897 p += emit_bcond (p, GT, 8);
2898 /* The NOP instruction will be patched with an unconditional branch. */
2899 if (offset_p)
2900 *offset_p = (p - buf) * 4;
2901 if (size_p)
2902 *size_p = 4;
2903 p += emit_nop (p);
2904
2905 emit_ops_insns (buf, p - buf);
2906 }
2907
2908 /* Implementation of emit_ops method "emit_gt_goto". */
2909
2910 static void
2911 aarch64_emit_gt_goto (int *offset_p, int *size_p)
2912 {
2913 uint32_t buf[16];
2914 uint32_t *p = buf;
2915
2916 p += emit_pop (p, x1);
2917 p += emit_cmp (p, x1, register_operand (x0));
2918 /* Branch over the next instruction if x0 <= x1. */
2919 p += emit_bcond (p, LE, 8);
2920 /* The NOP instruction will be patched with an unconditional branch. */
2921 if (offset_p)
2922 *offset_p = (p - buf) * 4;
2923 if (size_p)
2924 *size_p = 4;
2925 p += emit_nop (p);
2926
2927 emit_ops_insns (buf, p - buf);
2928 }
2929
2930 /* Implementation of emit_ops method "emit_ge_got". */
2931
2932 static void
2933 aarch64_emit_ge_got (int *offset_p, int *size_p)
2934 {
2935 uint32_t buf[16];
2936 uint32_t *p = buf;
2937
2938 p += emit_pop (p, x1);
2939 p += emit_cmp (p, x1, register_operand (x0));
2940 /* Branch over the next instruction if x0 <= x1. */
2941 p += emit_bcond (p, LT, 8);
2942 /* The NOP instruction will be patched with an unconditional branch. */
2943 if (offset_p)
2944 *offset_p = (p - buf) * 4;
2945 if (size_p)
2946 *size_p = 4;
2947 p += emit_nop (p);
2948
2949 emit_ops_insns (buf, p - buf);
2950 }
2951
2952 static struct emit_ops aarch64_emit_ops_impl =
2953 {
2954 aarch64_emit_prologue,
2955 aarch64_emit_epilogue,
2956 aarch64_emit_add,
2957 aarch64_emit_sub,
2958 aarch64_emit_mul,
2959 aarch64_emit_lsh,
2960 aarch64_emit_rsh_signed,
2961 aarch64_emit_rsh_unsigned,
2962 aarch64_emit_ext,
2963 aarch64_emit_log_not,
2964 aarch64_emit_bit_and,
2965 aarch64_emit_bit_or,
2966 aarch64_emit_bit_xor,
2967 aarch64_emit_bit_not,
2968 aarch64_emit_equal,
2969 aarch64_emit_less_signed,
2970 aarch64_emit_less_unsigned,
2971 aarch64_emit_ref,
2972 aarch64_emit_if_goto,
2973 aarch64_emit_goto,
2974 aarch64_write_goto_address,
2975 aarch64_emit_const,
2976 aarch64_emit_call,
2977 aarch64_emit_reg,
2978 aarch64_emit_pop,
2979 aarch64_emit_stack_flush,
2980 aarch64_emit_zero_ext,
2981 aarch64_emit_swap,
2982 aarch64_emit_stack_adjust,
2983 aarch64_emit_int_call_1,
2984 aarch64_emit_void_call_2,
2985 aarch64_emit_eq_goto,
2986 aarch64_emit_ne_goto,
2987 aarch64_emit_lt_goto,
2988 aarch64_emit_le_goto,
2989 aarch64_emit_gt_goto,
2990 aarch64_emit_ge_got,
2991 };
2992
2993 /* Implementation of linux_target_ops method "emit_ops". */
2994
2995 static struct emit_ops *
2996 aarch64_emit_ops (void)
2997 {
2998 return &aarch64_emit_ops_impl;
2999 }
3000
3001 /* Implementation of linux_target_ops method
3002 "get_min_fast_tracepoint_insn_len". */
3003
3004 static int
3005 aarch64_get_min_fast_tracepoint_insn_len (void)
3006 {
3007 return 4;
3008 }
3009
3010 /* Implementation of linux_target_ops method "supports_range_stepping". */
3011
3012 static int
3013 aarch64_supports_range_stepping (void)
3014 {
3015 return 1;
3016 }
3017
3018 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
3019
3020 static const gdb_byte *
3021 aarch64_sw_breakpoint_from_kind (int kind, int *size)
3022 {
3023 if (is_64bit_tdesc ())
3024 {
3025 *size = aarch64_breakpoint_len;
3026 return aarch64_breakpoint;
3027 }
3028 else
3029 return arm_sw_breakpoint_from_kind (kind, size);
3030 }
3031
3032 /* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
3033
3034 static int
3035 aarch64_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
3036 {
3037 if (is_64bit_tdesc ())
3038 return aarch64_breakpoint_len;
3039 else
3040 return arm_breakpoint_kind_from_pc (pcptr);
3041 }
3042
3043 /* Implementation of the linux_target_ops method
3044 "breakpoint_kind_from_current_state". */
3045
3046 static int
3047 aarch64_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
3048 {
3049 if (is_64bit_tdesc ())
3050 return aarch64_breakpoint_len;
3051 else
3052 return arm_breakpoint_kind_from_current_state (pcptr);
3053 }
3054
3055 /* Support for hardware single step. */
3056
3057 static int
3058 aarch64_supports_hardware_single_step (void)
3059 {
3060 return 1;
3061 }
3062
3063 struct linux_target_ops the_low_target =
3064 {
3065 aarch64_arch_setup,
3066 aarch64_regs_info,
3067 NULL, /* cannot_fetch_register */
3068 NULL, /* cannot_store_register */
3069 NULL, /* fetch_register */
3070 aarch64_get_pc,
3071 aarch64_set_pc,
3072 aarch64_breakpoint_kind_from_pc,
3073 aarch64_sw_breakpoint_from_kind,
3074 NULL, /* get_next_pcs */
3075 0, /* decr_pc_after_break */
3076 aarch64_breakpoint_at,
3077 aarch64_supports_z_point_type,
3078 aarch64_insert_point,
3079 aarch64_remove_point,
3080 aarch64_stopped_by_watchpoint,
3081 aarch64_stopped_data_address,
3082 NULL, /* collect_ptrace_register */
3083 NULL, /* supply_ptrace_register */
3084 aarch64_linux_siginfo_fixup,
3085 aarch64_linux_new_process,
3086 aarch64_linux_delete_process,
3087 aarch64_linux_new_thread,
3088 aarch64_linux_delete_thread,
3089 aarch64_linux_new_fork,
3090 aarch64_linux_prepare_to_resume,
3091 NULL, /* process_qsupported */
3092 aarch64_supports_tracepoints,
3093 aarch64_get_thread_area,
3094 aarch64_install_fast_tracepoint_jump_pad,
3095 aarch64_emit_ops,
3096 aarch64_get_min_fast_tracepoint_insn_len,
3097 aarch64_supports_range_stepping,
3098 aarch64_breakpoint_kind_from_current_state,
3099 aarch64_supports_hardware_single_step,
3100 aarch64_get_syscall_trapinfo,
3101 };
3102
3103 /* The linux target ops object. */
3104
3105 linux_process_target *the_linux_target = &the_aarch64_target;
3106
3107 void
3108 initialize_low_arch (void)
3109 {
3110 initialize_low_arch_aarch32 ();
3111
3112 initialize_regsets_info (&aarch64_regsets_info);
3113 initialize_regsets_info (&aarch64_sve_regsets_info);
3114 }
This page took 0.090629 seconds and 3 git commands to generate.