gdbserver/linux-low: turn 'regs_info' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-aarch64-low.cc
1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "server.h"
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
29 #include "ax.h"
30 #include "tracepoint.h"
31 #include "debug.h"
32
33 #include <signal.h>
34 #include <sys/user.h>
35 #include "nat/gdb_ptrace.h"
36 #include <asm/ptrace.h>
37 #include <inttypes.h>
38 #include <endian.h>
39 #include <sys/uio.h>
40
41 #include "gdb_proc_service.h"
42 #include "arch/aarch64.h"
43 #include "linux-aarch32-tdesc.h"
44 #include "linux-aarch64-tdesc.h"
45 #include "nat/aarch64-sve-linux-ptrace.h"
46 #include "tdesc.h"
47
48 #ifdef HAVE_SYS_REG_H
49 #include <sys/reg.h>
50 #endif
51
52 /* Linux target op definitions for the AArch64 architecture. */
53
54 class aarch64_target : public linux_process_target
55 {
56 public:
57
58 const regs_info *get_regs_info () override;
59
60 protected:
61
62 void low_arch_setup () override;
63 };
64
65 /* The singleton target ops object. */
66
67 static aarch64_target the_aarch64_target;
68
69 /* Per-process arch-specific data we want to keep. */
70
71 struct arch_process_info
72 {
73 /* Hardware breakpoint/watchpoint data.
74 The reason for them to be per-process rather than per-thread is
75 due to the lack of information in the gdbserver environment;
76 gdbserver is not told that whether a requested hardware
77 breakpoint/watchpoint is thread specific or not, so it has to set
78 each hw bp/wp for every thread in the current process. The
79 higher level bp/wp management in gdb will resume a thread if a hw
80 bp/wp trap is not expected for it. Since the hw bp/wp setting is
81 same for each thread, it is reasonable for the data to live here.
82 */
83 struct aarch64_debug_reg_state debug_reg_state;
84 };
85
86 /* Return true if the size of register 0 is 8 byte. */
87
88 static int
89 is_64bit_tdesc (void)
90 {
91 struct regcache *regcache = get_thread_regcache (current_thread, 0);
92
93 return register_size (regcache->tdesc, 0) == 8;
94 }
95
96 /* Return true if the regcache contains the number of SVE registers. */
97
98 static bool
99 is_sve_tdesc (void)
100 {
101 struct regcache *regcache = get_thread_regcache (current_thread, 0);
102
103 return tdesc_contains_feature (regcache->tdesc, "org.gnu.gdb.aarch64.sve");
104 }
105
106 static void
107 aarch64_fill_gregset (struct regcache *regcache, void *buf)
108 {
109 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
110 int i;
111
112 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
113 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
114 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
115 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
116 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
117 }
118
119 static void
120 aarch64_store_gregset (struct regcache *regcache, const void *buf)
121 {
122 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
123 int i;
124
125 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
126 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
127 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
128 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
129 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
130 }
131
132 static void
133 aarch64_fill_fpregset (struct regcache *regcache, void *buf)
134 {
135 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
136 int i;
137
138 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
139 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
140 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
141 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
142 }
143
144 static void
145 aarch64_store_fpregset (struct regcache *regcache, const void *buf)
146 {
147 const struct user_fpsimd_state *regset
148 = (const struct user_fpsimd_state *) buf;
149 int i;
150
151 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
152 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
153 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
154 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
155 }
156
157 /* Store the pauth registers to regcache. */
158
159 static void
160 aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
161 {
162 uint64_t *pauth_regset = (uint64_t *) buf;
163 int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
164
165 if (pauth_base == 0)
166 return;
167
168 supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
169 &pauth_regset[0]);
170 supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
171 &pauth_regset[1]);
172 }
173
174 /* Implementation of linux_target_ops method "get_pc". */
175
176 static CORE_ADDR
177 aarch64_get_pc (struct regcache *regcache)
178 {
179 if (register_size (regcache->tdesc, 0) == 8)
180 return linux_get_pc_64bit (regcache);
181 else
182 return linux_get_pc_32bit (regcache);
183 }
184
185 /* Implementation of linux_target_ops method "set_pc". */
186
187 static void
188 aarch64_set_pc (struct regcache *regcache, CORE_ADDR pc)
189 {
190 if (register_size (regcache->tdesc, 0) == 8)
191 linux_set_pc_64bit (regcache, pc);
192 else
193 linux_set_pc_32bit (regcache, pc);
194 }
195
196 #define aarch64_breakpoint_len 4
197
198 /* AArch64 BRK software debug mode instruction.
199 This instruction needs to match gdb/aarch64-tdep.c
200 (aarch64_default_breakpoint). */
201 static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
202
203 /* Implementation of linux_target_ops method "breakpoint_at". */
204
205 static int
206 aarch64_breakpoint_at (CORE_ADDR where)
207 {
208 if (is_64bit_tdesc ())
209 {
210 gdb_byte insn[aarch64_breakpoint_len];
211
212 the_target->read_memory (where, (unsigned char *) &insn,
213 aarch64_breakpoint_len);
214 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
215 return 1;
216
217 return 0;
218 }
219 else
220 return arm_breakpoint_at (where);
221 }
222
223 static void
224 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
225 {
226 int i;
227
228 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
229 {
230 state->dr_addr_bp[i] = 0;
231 state->dr_ctrl_bp[i] = 0;
232 state->dr_ref_count_bp[i] = 0;
233 }
234
235 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
236 {
237 state->dr_addr_wp[i] = 0;
238 state->dr_ctrl_wp[i] = 0;
239 state->dr_ref_count_wp[i] = 0;
240 }
241 }
242
243 /* Return the pointer to the debug register state structure in the
244 current process' arch-specific data area. */
245
246 struct aarch64_debug_reg_state *
247 aarch64_get_debug_reg_state (pid_t pid)
248 {
249 struct process_info *proc = find_process_pid (pid);
250
251 return &proc->priv->arch_private->debug_reg_state;
252 }
253
254 /* Implementation of linux_target_ops method "supports_z_point_type". */
255
256 static int
257 aarch64_supports_z_point_type (char z_type)
258 {
259 switch (z_type)
260 {
261 case Z_PACKET_SW_BP:
262 case Z_PACKET_HW_BP:
263 case Z_PACKET_WRITE_WP:
264 case Z_PACKET_READ_WP:
265 case Z_PACKET_ACCESS_WP:
266 return 1;
267 default:
268 return 0;
269 }
270 }
271
272 /* Implementation of linux_target_ops method "insert_point".
273
274 It actually only records the info of the to-be-inserted bp/wp;
275 the actual insertion will happen when threads are resumed. */
276
277 static int
278 aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
279 int len, struct raw_breakpoint *bp)
280 {
281 int ret;
282 enum target_hw_bp_type targ_type;
283 struct aarch64_debug_reg_state *state
284 = aarch64_get_debug_reg_state (pid_of (current_thread));
285
286 if (show_debug_regs)
287 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
288 (unsigned long) addr, len);
289
290 /* Determine the type from the raw breakpoint type. */
291 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
292
293 if (targ_type != hw_execute)
294 {
295 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
296 ret = aarch64_handle_watchpoint (targ_type, addr, len,
297 1 /* is_insert */, state);
298 else
299 ret = -1;
300 }
301 else
302 {
303 if (len == 3)
304 {
305 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
306 instruction. Set it to 2 to correctly encode length bit
307 mask in hardware/watchpoint control register. */
308 len = 2;
309 }
310 ret = aarch64_handle_breakpoint (targ_type, addr, len,
311 1 /* is_insert */, state);
312 }
313
314 if (show_debug_regs)
315 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
316 targ_type);
317
318 return ret;
319 }
320
321 /* Implementation of linux_target_ops method "remove_point".
322
323 It actually only records the info of the to-be-removed bp/wp,
324 the actual removal will be done when threads are resumed. */
325
326 static int
327 aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
328 int len, struct raw_breakpoint *bp)
329 {
330 int ret;
331 enum target_hw_bp_type targ_type;
332 struct aarch64_debug_reg_state *state
333 = aarch64_get_debug_reg_state (pid_of (current_thread));
334
335 if (show_debug_regs)
336 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
337 (unsigned long) addr, len);
338
339 /* Determine the type from the raw breakpoint type. */
340 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
341
342 /* Set up state pointers. */
343 if (targ_type != hw_execute)
344 ret =
345 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
346 state);
347 else
348 {
349 if (len == 3)
350 {
351 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
352 instruction. Set it to 2 to correctly encode length bit
353 mask in hardware/watchpoint control register. */
354 len = 2;
355 }
356 ret = aarch64_handle_breakpoint (targ_type, addr, len,
357 0 /* is_insert */, state);
358 }
359
360 if (show_debug_regs)
361 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
362 targ_type);
363
364 return ret;
365 }
366
367 /* Implementation of linux_target_ops method "stopped_data_address". */
368
369 static CORE_ADDR
370 aarch64_stopped_data_address (void)
371 {
372 siginfo_t siginfo;
373 int pid, i;
374 struct aarch64_debug_reg_state *state;
375
376 pid = lwpid_of (current_thread);
377
378 /* Get the siginfo. */
379 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
380 return (CORE_ADDR) 0;
381
382 /* Need to be a hardware breakpoint/watchpoint trap. */
383 if (siginfo.si_signo != SIGTRAP
384 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
385 return (CORE_ADDR) 0;
386
387 /* Check if the address matches any watched address. */
388 state = aarch64_get_debug_reg_state (pid_of (current_thread));
389 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
390 {
391 const unsigned int offset
392 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
393 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
394 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
395 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
396 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
397 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
398
399 if (state->dr_ref_count_wp[i]
400 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
401 && addr_trap >= addr_watch_aligned
402 && addr_trap < addr_watch + len)
403 {
404 /* ADDR_TRAP reports the first address of the memory range
405 accessed by the CPU, regardless of what was the memory
406 range watched. Thus, a large CPU access that straddles
407 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
408 ADDR_TRAP that is lower than the
409 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
410
411 addr: | 4 | 5 | 6 | 7 | 8 |
412 |---- range watched ----|
413 |----------- range accessed ------------|
414
415 In this case, ADDR_TRAP will be 4.
416
417 To match a watchpoint known to GDB core, we must never
418 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
419 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
420 positive on kernels older than 4.10. See PR
421 external/20207. */
422 return addr_orig;
423 }
424 }
425
426 return (CORE_ADDR) 0;
427 }
428
429 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
430
431 static int
432 aarch64_stopped_by_watchpoint (void)
433 {
434 if (aarch64_stopped_data_address () != 0)
435 return 1;
436 else
437 return 0;
438 }
439
440 /* Fetch the thread-local storage pointer for libthread_db. */
441
442 ps_err_e
443 ps_get_thread_area (struct ps_prochandle *ph,
444 lwpid_t lwpid, int idx, void **base)
445 {
446 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
447 is_64bit_tdesc ());
448 }
449
450 /* Implementation of linux_target_ops method "siginfo_fixup". */
451
452 static int
453 aarch64_linux_siginfo_fixup (siginfo_t *native, gdb_byte *inf, int direction)
454 {
455 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
456 if (!is_64bit_tdesc ())
457 {
458 if (direction == 0)
459 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
460 native);
461 else
462 aarch64_siginfo_from_compat_siginfo (native,
463 (struct compat_siginfo *) inf);
464
465 return 1;
466 }
467
468 return 0;
469 }
470
471 /* Implementation of linux_target_ops method "new_process". */
472
473 static struct arch_process_info *
474 aarch64_linux_new_process (void)
475 {
476 struct arch_process_info *info = XCNEW (struct arch_process_info);
477
478 aarch64_init_debug_reg_state (&info->debug_reg_state);
479
480 return info;
481 }
482
483 /* Implementation of linux_target_ops method "delete_process". */
484
485 static void
486 aarch64_linux_delete_process (struct arch_process_info *info)
487 {
488 xfree (info);
489 }
490
491 /* Implementation of linux_target_ops method "linux_new_fork". */
492
493 static void
494 aarch64_linux_new_fork (struct process_info *parent,
495 struct process_info *child)
496 {
497 /* These are allocated by linux_add_process. */
498 gdb_assert (parent->priv != NULL
499 && parent->priv->arch_private != NULL);
500 gdb_assert (child->priv != NULL
501 && child->priv->arch_private != NULL);
502
503 /* Linux kernel before 2.6.33 commit
504 72f674d203cd230426437cdcf7dd6f681dad8b0d
505 will inherit hardware debug registers from parent
506 on fork/vfork/clone. Newer Linux kernels create such tasks with
507 zeroed debug registers.
508
509 GDB core assumes the child inherits the watchpoints/hw
510 breakpoints of the parent, and will remove them all from the
511 forked off process. Copy the debug registers mirrors into the
512 new process so that all breakpoints and watchpoints can be
513 removed together. The debug registers mirror will become zeroed
514 in the end before detaching the forked off process, thus making
515 this compatible with older Linux kernels too. */
516
517 *child->priv->arch_private = *parent->priv->arch_private;
518 }
519
520 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
521 #define AARCH64_HWCAP_PACA (1 << 30)
522
523 /* Implementation of linux target ops method "low_arch_setup". */
524
525 void
526 aarch64_target::low_arch_setup ()
527 {
528 unsigned int machine;
529 int is_elf64;
530 int tid;
531
532 tid = lwpid_of (current_thread);
533
534 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
535
536 if (is_elf64)
537 {
538 uint64_t vq = aarch64_sve_get_vq (tid);
539 unsigned long hwcap = linux_get_hwcap (8);
540 bool pauth_p = hwcap & AARCH64_HWCAP_PACA;
541
542 current_process ()->tdesc = aarch64_linux_read_description (vq, pauth_p);
543 }
544 else
545 current_process ()->tdesc = aarch32_linux_read_description ();
546
547 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
548 }
549
550 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
551
552 static void
553 aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
554 {
555 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
556 }
557
558 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
559
560 static void
561 aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
562 {
563 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
564 }
565
566 static struct regset_info aarch64_regsets[] =
567 {
568 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
569 sizeof (struct user_pt_regs), GENERAL_REGS,
570 aarch64_fill_gregset, aarch64_store_gregset },
571 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
572 sizeof (struct user_fpsimd_state), FP_REGS,
573 aarch64_fill_fpregset, aarch64_store_fpregset
574 },
575 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
576 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
577 NULL, aarch64_store_pauthregset },
578 NULL_REGSET
579 };
580
581 static struct regsets_info aarch64_regsets_info =
582 {
583 aarch64_regsets, /* regsets */
584 0, /* num_regsets */
585 NULL, /* disabled_regsets */
586 };
587
588 static struct regs_info regs_info_aarch64 =
589 {
590 NULL, /* regset_bitmap */
591 NULL, /* usrregs */
592 &aarch64_regsets_info,
593 };
594
595 static struct regset_info aarch64_sve_regsets[] =
596 {
597 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
598 sizeof (struct user_pt_regs), GENERAL_REGS,
599 aarch64_fill_gregset, aarch64_store_gregset },
600 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
601 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
602 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
603 },
604 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
605 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
606 NULL, aarch64_store_pauthregset },
607 NULL_REGSET
608 };
609
610 static struct regsets_info aarch64_sve_regsets_info =
611 {
612 aarch64_sve_regsets, /* regsets. */
613 0, /* num_regsets. */
614 NULL, /* disabled_regsets. */
615 };
616
617 static struct regs_info regs_info_aarch64_sve =
618 {
619 NULL, /* regset_bitmap. */
620 NULL, /* usrregs. */
621 &aarch64_sve_regsets_info,
622 };
623
624 /* Implementation of linux target ops method "get_regs_info". */
625
626 const regs_info *
627 aarch64_target::get_regs_info ()
628 {
629 if (!is_64bit_tdesc ())
630 return &regs_info_aarch32;
631
632 if (is_sve_tdesc ())
633 return &regs_info_aarch64_sve;
634
635 return &regs_info_aarch64;
636 }
637
638 /* Implementation of linux_target_ops method "supports_tracepoints". */
639
640 static int
641 aarch64_supports_tracepoints (void)
642 {
643 if (current_thread == NULL)
644 return 1;
645 else
646 {
647 /* We don't support tracepoints on aarch32 now. */
648 return is_64bit_tdesc ();
649 }
650 }
651
652 /* Implementation of linux_target_ops method "get_thread_area". */
653
654 static int
655 aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
656 {
657 struct iovec iovec;
658 uint64_t reg;
659
660 iovec.iov_base = &reg;
661 iovec.iov_len = sizeof (reg);
662
663 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
664 return -1;
665
666 *addrp = reg;
667
668 return 0;
669 }
670
671 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
672
673 static void
674 aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
675 {
676 int use_64bit = register_size (regcache->tdesc, 0) == 8;
677
678 if (use_64bit)
679 {
680 long l_sysno;
681
682 collect_register_by_name (regcache, "x8", &l_sysno);
683 *sysno = (int) l_sysno;
684 }
685 else
686 collect_register_by_name (regcache, "r7", sysno);
687 }
688
689 /* List of condition codes that we need. */
690
691 enum aarch64_condition_codes
692 {
693 EQ = 0x0,
694 NE = 0x1,
695 LO = 0x3,
696 GE = 0xa,
697 LT = 0xb,
698 GT = 0xc,
699 LE = 0xd,
700 };
701
702 enum aarch64_operand_type
703 {
704 OPERAND_IMMEDIATE,
705 OPERAND_REGISTER,
706 };
707
708 /* Representation of an operand. At this time, it only supports register
709 and immediate types. */
710
711 struct aarch64_operand
712 {
713 /* Type of the operand. */
714 enum aarch64_operand_type type;
715
716 /* Value of the operand according to the type. */
717 union
718 {
719 uint32_t imm;
720 struct aarch64_register reg;
721 };
722 };
723
724 /* List of registers that we are currently using, we can add more here as
725 we need to use them. */
726
727 /* General purpose scratch registers (64 bit). */
728 static const struct aarch64_register x0 = { 0, 1 };
729 static const struct aarch64_register x1 = { 1, 1 };
730 static const struct aarch64_register x2 = { 2, 1 };
731 static const struct aarch64_register x3 = { 3, 1 };
732 static const struct aarch64_register x4 = { 4, 1 };
733
734 /* General purpose scratch registers (32 bit). */
735 static const struct aarch64_register w0 = { 0, 0 };
736 static const struct aarch64_register w2 = { 2, 0 };
737
738 /* Intra-procedure scratch registers. */
739 static const struct aarch64_register ip0 = { 16, 1 };
740
741 /* Special purpose registers. */
742 static const struct aarch64_register fp = { 29, 1 };
743 static const struct aarch64_register lr = { 30, 1 };
744 static const struct aarch64_register sp = { 31, 1 };
745 static const struct aarch64_register xzr = { 31, 1 };
746
747 /* Dynamically allocate a new register. If we know the register
748 statically, we should make it a global as above instead of using this
749 helper function. */
750
751 static struct aarch64_register
752 aarch64_register (unsigned num, int is64)
753 {
754 return (struct aarch64_register) { num, is64 };
755 }
756
757 /* Helper function to create a register operand, for instructions with
758 different types of operands.
759
760 For example:
761 p += emit_mov (p, x0, register_operand (x1)); */
762
763 static struct aarch64_operand
764 register_operand (struct aarch64_register reg)
765 {
766 struct aarch64_operand operand;
767
768 operand.type = OPERAND_REGISTER;
769 operand.reg = reg;
770
771 return operand;
772 }
773
774 /* Helper function to create an immediate operand, for instructions with
775 different types of operands.
776
777 For example:
778 p += emit_mov (p, x0, immediate_operand (12)); */
779
780 static struct aarch64_operand
781 immediate_operand (uint32_t imm)
782 {
783 struct aarch64_operand operand;
784
785 operand.type = OPERAND_IMMEDIATE;
786 operand.imm = imm;
787
788 return operand;
789 }
790
791 /* Helper function to create an offset memory operand.
792
793 For example:
794 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
795
796 static struct aarch64_memory_operand
797 offset_memory_operand (int32_t offset)
798 {
799 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
800 }
801
802 /* Helper function to create a pre-index memory operand.
803
804 For example:
805 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
806
807 static struct aarch64_memory_operand
808 preindex_memory_operand (int32_t index)
809 {
810 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
811 }
812
813 /* Helper function to create a post-index memory operand.
814
815 For example:
816 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
817
818 static struct aarch64_memory_operand
819 postindex_memory_operand (int32_t index)
820 {
821 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
822 }
823
824 /* System control registers. These special registers can be written and
825 read with the MRS and MSR instructions.
826
827 - NZCV: Condition flags. GDB refers to this register under the CPSR
828 name.
829 - FPSR: Floating-point status register.
830 - FPCR: Floating-point control registers.
831 - TPIDR_EL0: Software thread ID register. */
832
833 enum aarch64_system_control_registers
834 {
835 /* op0 op1 crn crm op2 */
836 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
837 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
838 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
839 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
840 };
841
842 /* Write a BLR instruction into *BUF.
843
844 BLR rn
845
846 RN is the register to branch to. */
847
848 static int
849 emit_blr (uint32_t *buf, struct aarch64_register rn)
850 {
851 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
852 }
853
854 /* Write a RET instruction into *BUF.
855
856 RET xn
857
858 RN is the register to branch to. */
859
860 static int
861 emit_ret (uint32_t *buf, struct aarch64_register rn)
862 {
863 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
864 }
865
866 static int
867 emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
868 struct aarch64_register rt,
869 struct aarch64_register rt2,
870 struct aarch64_register rn,
871 struct aarch64_memory_operand operand)
872 {
873 uint32_t opc;
874 uint32_t pre_index;
875 uint32_t write_back;
876
877 if (rt.is64)
878 opc = ENCODE (2, 2, 30);
879 else
880 opc = ENCODE (0, 2, 30);
881
882 switch (operand.type)
883 {
884 case MEMORY_OPERAND_OFFSET:
885 {
886 pre_index = ENCODE (1, 1, 24);
887 write_back = ENCODE (0, 1, 23);
888 break;
889 }
890 case MEMORY_OPERAND_POSTINDEX:
891 {
892 pre_index = ENCODE (0, 1, 24);
893 write_back = ENCODE (1, 1, 23);
894 break;
895 }
896 case MEMORY_OPERAND_PREINDEX:
897 {
898 pre_index = ENCODE (1, 1, 24);
899 write_back = ENCODE (1, 1, 23);
900 break;
901 }
902 default:
903 return 0;
904 }
905
906 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
907 | ENCODE (operand.index >> 3, 7, 15)
908 | ENCODE (rt2.num, 5, 10)
909 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
910 }
911
912 /* Write a STP instruction into *BUF.
913
914 STP rt, rt2, [rn, #offset]
915 STP rt, rt2, [rn, #index]!
916 STP rt, rt2, [rn], #index
917
918 RT and RT2 are the registers to store.
919 RN is the base address register.
920 OFFSET is the immediate to add to the base address. It is limited to a
921 -512 .. 504 range (7 bits << 3). */
922
923 static int
924 emit_stp (uint32_t *buf, struct aarch64_register rt,
925 struct aarch64_register rt2, struct aarch64_register rn,
926 struct aarch64_memory_operand operand)
927 {
928 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
929 }
930
931 /* Write a LDP instruction into *BUF.
932
933 LDP rt, rt2, [rn, #offset]
934 LDP rt, rt2, [rn, #index]!
935 LDP rt, rt2, [rn], #index
936
937 RT and RT2 are the registers to store.
938 RN is the base address register.
939 OFFSET is the immediate to add to the base address. It is limited to a
940 -512 .. 504 range (7 bits << 3). */
941
942 static int
943 emit_ldp (uint32_t *buf, struct aarch64_register rt,
944 struct aarch64_register rt2, struct aarch64_register rn,
945 struct aarch64_memory_operand operand)
946 {
947 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
948 }
949
950 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
951
952 LDP qt, qt2, [rn, #offset]
953
954 RT and RT2 are the Q registers to store.
955 RN is the base address register.
956 OFFSET is the immediate to add to the base address. It is limited to
957 -1024 .. 1008 range (7 bits << 4). */
958
959 static int
960 emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
961 struct aarch64_register rn, int32_t offset)
962 {
963 uint32_t opc = ENCODE (2, 2, 30);
964 uint32_t pre_index = ENCODE (1, 1, 24);
965
966 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
967 | ENCODE (offset >> 4, 7, 15)
968 | ENCODE (rt2, 5, 10)
969 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
970 }
971
972 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
973
974 STP qt, qt2, [rn, #offset]
975
976 RT and RT2 are the Q registers to store.
977 RN is the base address register.
978 OFFSET is the immediate to add to the base address. It is limited to
979 -1024 .. 1008 range (7 bits << 4). */
980
981 static int
982 emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
983 struct aarch64_register rn, int32_t offset)
984 {
985 uint32_t opc = ENCODE (2, 2, 30);
986 uint32_t pre_index = ENCODE (1, 1, 24);
987
988 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
989 | ENCODE (offset >> 4, 7, 15)
990 | ENCODE (rt2, 5, 10)
991 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
992 }
993
994 /* Write a LDRH instruction into *BUF.
995
996 LDRH wt, [xn, #offset]
997 LDRH wt, [xn, #index]!
998 LDRH wt, [xn], #index
999
1000 RT is the register to store.
1001 RN is the base address register.
1002 OFFSET is the immediate to add to the base address. It is limited to
1003 0 .. 32760 range (12 bits << 3). */
1004
1005 static int
1006 emit_ldrh (uint32_t *buf, struct aarch64_register rt,
1007 struct aarch64_register rn,
1008 struct aarch64_memory_operand operand)
1009 {
1010 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
1011 }
1012
1013 /* Write a LDRB instruction into *BUF.
1014
1015 LDRB wt, [xn, #offset]
1016 LDRB wt, [xn, #index]!
1017 LDRB wt, [xn], #index
1018
1019 RT is the register to store.
1020 RN is the base address register.
1021 OFFSET is the immediate to add to the base address. It is limited to
1022 0 .. 32760 range (12 bits << 3). */
1023
1024 static int
1025 emit_ldrb (uint32_t *buf, struct aarch64_register rt,
1026 struct aarch64_register rn,
1027 struct aarch64_memory_operand operand)
1028 {
1029 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
1030 }
1031
1032
1033
1034 /* Write a STR instruction into *BUF.
1035
1036 STR rt, [rn, #offset]
1037 STR rt, [rn, #index]!
1038 STR rt, [rn], #index
1039
1040 RT is the register to store.
1041 RN is the base address register.
1042 OFFSET is the immediate to add to the base address. It is limited to
1043 0 .. 32760 range (12 bits << 3). */
1044
1045 static int
1046 emit_str (uint32_t *buf, struct aarch64_register rt,
1047 struct aarch64_register rn,
1048 struct aarch64_memory_operand operand)
1049 {
1050 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
1051 }
1052
1053 /* Helper function emitting an exclusive load or store instruction. */
1054
1055 static int
1056 emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1057 enum aarch64_opcodes opcode,
1058 struct aarch64_register rs,
1059 struct aarch64_register rt,
1060 struct aarch64_register rt2,
1061 struct aarch64_register rn)
1062 {
1063 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1064 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1065 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
1066 }
1067
1068 /* Write a LAXR instruction into *BUF.
1069
1070 LDAXR rt, [xn]
1071
1072 RT is the destination register.
1073 RN is the base address register. */
1074
1075 static int
1076 emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1077 struct aarch64_register rn)
1078 {
1079 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1080 xzr, rn);
1081 }
1082
1083 /* Write a STXR instruction into *BUF.
1084
1085 STXR ws, rt, [xn]
1086
1087 RS is the result register, it indicates if the store succeeded or not.
1088 RT is the destination register.
1089 RN is the base address register. */
1090
1091 static int
1092 emit_stxr (uint32_t *buf, struct aarch64_register rs,
1093 struct aarch64_register rt, struct aarch64_register rn)
1094 {
1095 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1096 xzr, rn);
1097 }
1098
1099 /* Write a STLR instruction into *BUF.
1100
1101 STLR rt, [xn]
1102
1103 RT is the register to store.
1104 RN is the base address register. */
1105
1106 static int
1107 emit_stlr (uint32_t *buf, struct aarch64_register rt,
1108 struct aarch64_register rn)
1109 {
1110 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1111 xzr, rn);
1112 }
1113
1114 /* Helper function for data processing instructions with register sources. */
1115
1116 static int
1117 emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
1118 struct aarch64_register rd,
1119 struct aarch64_register rn,
1120 struct aarch64_register rm)
1121 {
1122 uint32_t size = ENCODE (rd.is64, 1, 31);
1123
1124 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1125 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
1126 }
1127
1128 /* Helper function for data processing instructions taking either a register
1129 or an immediate. */
1130
1131 static int
1132 emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1133 struct aarch64_register rd,
1134 struct aarch64_register rn,
1135 struct aarch64_operand operand)
1136 {
1137 uint32_t size = ENCODE (rd.is64, 1, 31);
1138 /* The opcode is different for register and immediate source operands. */
1139 uint32_t operand_opcode;
1140
1141 if (operand.type == OPERAND_IMMEDIATE)
1142 {
1143 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1144 operand_opcode = ENCODE (8, 4, 25);
1145
1146 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1147 | ENCODE (operand.imm, 12, 10)
1148 | ENCODE (rn.num, 5, 5)
1149 | ENCODE (rd.num, 5, 0));
1150 }
1151 else
1152 {
1153 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1154 operand_opcode = ENCODE (5, 4, 25);
1155
1156 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1157 rn, operand.reg);
1158 }
1159 }
1160
1161 /* Write an ADD instruction into *BUF.
1162
1163 ADD rd, rn, #imm
1164 ADD rd, rn, rm
1165
1166 This function handles both an immediate and register add.
1167
1168 RD is the destination register.
1169 RN is the input register.
1170 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1171 OPERAND_REGISTER. */
1172
1173 static int
1174 emit_add (uint32_t *buf, struct aarch64_register rd,
1175 struct aarch64_register rn, struct aarch64_operand operand)
1176 {
1177 return emit_data_processing (buf, ADD, rd, rn, operand);
1178 }
1179
1180 /* Write a SUB instruction into *BUF.
1181
1182 SUB rd, rn, #imm
1183 SUB rd, rn, rm
1184
1185 This function handles both an immediate and register sub.
1186
1187 RD is the destination register.
1188 RN is the input register.
1189 IMM is the immediate to substract to RN. */
1190
1191 static int
1192 emit_sub (uint32_t *buf, struct aarch64_register rd,
1193 struct aarch64_register rn, struct aarch64_operand operand)
1194 {
1195 return emit_data_processing (buf, SUB, rd, rn, operand);
1196 }
1197
1198 /* Write a MOV instruction into *BUF.
1199
1200 MOV rd, #imm
1201 MOV rd, rm
1202
1203 This function handles both a wide immediate move and a register move,
1204 with the condition that the source register is not xzr. xzr and the
1205 stack pointer share the same encoding and this function only supports
1206 the stack pointer.
1207
1208 RD is the destination register.
1209 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1210 OPERAND_REGISTER. */
1211
1212 static int
1213 emit_mov (uint32_t *buf, struct aarch64_register rd,
1214 struct aarch64_operand operand)
1215 {
1216 if (operand.type == OPERAND_IMMEDIATE)
1217 {
1218 uint32_t size = ENCODE (rd.is64, 1, 31);
1219 /* Do not shift the immediate. */
1220 uint32_t shift = ENCODE (0, 2, 21);
1221
1222 return aarch64_emit_insn (buf, MOV | size | shift
1223 | ENCODE (operand.imm, 16, 5)
1224 | ENCODE (rd.num, 5, 0));
1225 }
1226 else
1227 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1228 }
1229
1230 /* Write a MOVK instruction into *BUF.
1231
1232 MOVK rd, #imm, lsl #shift
1233
1234 RD is the destination register.
1235 IMM is the immediate.
1236 SHIFT is the logical shift left to apply to IMM. */
1237
1238 static int
1239 emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1240 unsigned shift)
1241 {
1242 uint32_t size = ENCODE (rd.is64, 1, 31);
1243
1244 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1245 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
1246 }
1247
1248 /* Write instructions into *BUF in order to move ADDR into a register.
1249 ADDR can be a 64-bit value.
1250
1251 This function will emit a series of MOV and MOVK instructions, such as:
1252
1253 MOV xd, #(addr)
1254 MOVK xd, #(addr >> 16), lsl #16
1255 MOVK xd, #(addr >> 32), lsl #32
1256 MOVK xd, #(addr >> 48), lsl #48 */
1257
1258 static int
1259 emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1260 {
1261 uint32_t *p = buf;
1262
1263 /* The MOV (wide immediate) instruction clears to top bits of the
1264 register. */
1265 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1266
1267 if ((addr >> 16) != 0)
1268 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1269 else
1270 return p - buf;
1271
1272 if ((addr >> 32) != 0)
1273 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1274 else
1275 return p - buf;
1276
1277 if ((addr >> 48) != 0)
1278 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1279
1280 return p - buf;
1281 }
1282
1283 /* Write a SUBS instruction into *BUF.
1284
1285 SUBS rd, rn, rm
1286
1287 This instruction update the condition flags.
1288
1289 RD is the destination register.
1290 RN and RM are the source registers. */
1291
1292 static int
1293 emit_subs (uint32_t *buf, struct aarch64_register rd,
1294 struct aarch64_register rn, struct aarch64_operand operand)
1295 {
1296 return emit_data_processing (buf, SUBS, rd, rn, operand);
1297 }
1298
1299 /* Write a CMP instruction into *BUF.
1300
1301 CMP rn, rm
1302
1303 This instruction is an alias of SUBS xzr, rn, rm.
1304
1305 RN and RM are the registers to compare. */
1306
1307 static int
1308 emit_cmp (uint32_t *buf, struct aarch64_register rn,
1309 struct aarch64_operand operand)
1310 {
1311 return emit_subs (buf, xzr, rn, operand);
1312 }
1313
1314 /* Write a AND instruction into *BUF.
1315
1316 AND rd, rn, rm
1317
1318 RD is the destination register.
1319 RN and RM are the source registers. */
1320
1321 static int
1322 emit_and (uint32_t *buf, struct aarch64_register rd,
1323 struct aarch64_register rn, struct aarch64_register rm)
1324 {
1325 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1326 }
1327
1328 /* Write a ORR instruction into *BUF.
1329
1330 ORR rd, rn, rm
1331
1332 RD is the destination register.
1333 RN and RM are the source registers. */
1334
1335 static int
1336 emit_orr (uint32_t *buf, struct aarch64_register rd,
1337 struct aarch64_register rn, struct aarch64_register rm)
1338 {
1339 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1340 }
1341
1342 /* Write a ORN instruction into *BUF.
1343
1344 ORN rd, rn, rm
1345
1346 RD is the destination register.
1347 RN and RM are the source registers. */
1348
1349 static int
1350 emit_orn (uint32_t *buf, struct aarch64_register rd,
1351 struct aarch64_register rn, struct aarch64_register rm)
1352 {
1353 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1354 }
1355
1356 /* Write a EOR instruction into *BUF.
1357
1358 EOR rd, rn, rm
1359
1360 RD is the destination register.
1361 RN and RM are the source registers. */
1362
1363 static int
1364 emit_eor (uint32_t *buf, struct aarch64_register rd,
1365 struct aarch64_register rn, struct aarch64_register rm)
1366 {
1367 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1368 }
1369
1370 /* Write a MVN instruction into *BUF.
1371
1372 MVN rd, rm
1373
1374 This is an alias for ORN rd, xzr, rm.
1375
1376 RD is the destination register.
1377 RM is the source register. */
1378
1379 static int
1380 emit_mvn (uint32_t *buf, struct aarch64_register rd,
1381 struct aarch64_register rm)
1382 {
1383 return emit_orn (buf, rd, xzr, rm);
1384 }
1385
1386 /* Write a LSLV instruction into *BUF.
1387
1388 LSLV rd, rn, rm
1389
1390 RD is the destination register.
1391 RN and RM are the source registers. */
1392
1393 static int
1394 emit_lslv (uint32_t *buf, struct aarch64_register rd,
1395 struct aarch64_register rn, struct aarch64_register rm)
1396 {
1397 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1398 }
1399
1400 /* Write a LSRV instruction into *BUF.
1401
1402 LSRV rd, rn, rm
1403
1404 RD is the destination register.
1405 RN and RM are the source registers. */
1406
1407 static int
1408 emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1409 struct aarch64_register rn, struct aarch64_register rm)
1410 {
1411 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1412 }
1413
1414 /* Write a ASRV instruction into *BUF.
1415
1416 ASRV rd, rn, rm
1417
1418 RD is the destination register.
1419 RN and RM are the source registers. */
1420
1421 static int
1422 emit_asrv (uint32_t *buf, struct aarch64_register rd,
1423 struct aarch64_register rn, struct aarch64_register rm)
1424 {
1425 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1426 }
1427
1428 /* Write a MUL instruction into *BUF.
1429
1430 MUL rd, rn, rm
1431
1432 RD is the destination register.
1433 RN and RM are the source registers. */
1434
1435 static int
1436 emit_mul (uint32_t *buf, struct aarch64_register rd,
1437 struct aarch64_register rn, struct aarch64_register rm)
1438 {
1439 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1440 }
1441
1442 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1443
1444 MRS xt, system_reg
1445
1446 RT is the destination register.
1447 SYSTEM_REG is special purpose register to read. */
1448
1449 static int
1450 emit_mrs (uint32_t *buf, struct aarch64_register rt,
1451 enum aarch64_system_control_registers system_reg)
1452 {
1453 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1454 | ENCODE (rt.num, 5, 0));
1455 }
1456
1457 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1458
1459 MSR system_reg, xt
1460
1461 SYSTEM_REG is special purpose register to write.
1462 RT is the input register. */
1463
1464 static int
1465 emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1466 struct aarch64_register rt)
1467 {
1468 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1469 | ENCODE (rt.num, 5, 0));
1470 }
1471
1472 /* Write a SEVL instruction into *BUF.
1473
1474 This is a hint instruction telling the hardware to trigger an event. */
1475
1476 static int
1477 emit_sevl (uint32_t *buf)
1478 {
1479 return aarch64_emit_insn (buf, SEVL);
1480 }
1481
1482 /* Write a WFE instruction into *BUF.
1483
1484 This is a hint instruction telling the hardware to wait for an event. */
1485
1486 static int
1487 emit_wfe (uint32_t *buf)
1488 {
1489 return aarch64_emit_insn (buf, WFE);
1490 }
1491
1492 /* Write a SBFM instruction into *BUF.
1493
1494 SBFM rd, rn, #immr, #imms
1495
1496 This instruction moves the bits from #immr to #imms into the
1497 destination, sign extending the result.
1498
1499 RD is the destination register.
1500 RN is the source register.
1501 IMMR is the bit number to start at (least significant bit).
1502 IMMS is the bit number to stop at (most significant bit). */
1503
1504 static int
1505 emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1506 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1507 {
1508 uint32_t size = ENCODE (rd.is64, 1, 31);
1509 uint32_t n = ENCODE (rd.is64, 1, 22);
1510
1511 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1512 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1513 | ENCODE (rd.num, 5, 0));
1514 }
1515
1516 /* Write a SBFX instruction into *BUF.
1517
1518 SBFX rd, rn, #lsb, #width
1519
1520 This instruction moves #width bits from #lsb into the destination, sign
1521 extending the result. This is an alias for:
1522
1523 SBFM rd, rn, #lsb, #(lsb + width - 1)
1524
1525 RD is the destination register.
1526 RN is the source register.
1527 LSB is the bit number to start at (least significant bit).
1528 WIDTH is the number of bits to move. */
1529
1530 static int
1531 emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1532 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1533 {
1534 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1535 }
1536
1537 /* Write a UBFM instruction into *BUF.
1538
1539 UBFM rd, rn, #immr, #imms
1540
1541 This instruction moves the bits from #immr to #imms into the
1542 destination, extending the result with zeros.
1543
1544 RD is the destination register.
1545 RN is the source register.
1546 IMMR is the bit number to start at (least significant bit).
1547 IMMS is the bit number to stop at (most significant bit). */
1548
1549 static int
1550 emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1551 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1552 {
1553 uint32_t size = ENCODE (rd.is64, 1, 31);
1554 uint32_t n = ENCODE (rd.is64, 1, 22);
1555
1556 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1557 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1558 | ENCODE (rd.num, 5, 0));
1559 }
1560
1561 /* Write a UBFX instruction into *BUF.
1562
1563 UBFX rd, rn, #lsb, #width
1564
1565 This instruction moves #width bits from #lsb into the destination,
1566 extending the result with zeros. This is an alias for:
1567
1568 UBFM rd, rn, #lsb, #(lsb + width - 1)
1569
1570 RD is the destination register.
1571 RN is the source register.
1572 LSB is the bit number to start at (least significant bit).
1573 WIDTH is the number of bits to move. */
1574
1575 static int
1576 emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1577 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1578 {
1579 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1580 }
1581
1582 /* Write a CSINC instruction into *BUF.
1583
1584 CSINC rd, rn, rm, cond
1585
1586 This instruction conditionally increments rn or rm and places the result
1587 in rd. rn is chosen is the condition is true.
1588
1589 RD is the destination register.
1590 RN and RM are the source registers.
1591 COND is the encoded condition. */
1592
1593 static int
1594 emit_csinc (uint32_t *buf, struct aarch64_register rd,
1595 struct aarch64_register rn, struct aarch64_register rm,
1596 unsigned cond)
1597 {
1598 uint32_t size = ENCODE (rd.is64, 1, 31);
1599
1600 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1601 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1602 | ENCODE (rd.num, 5, 0));
1603 }
1604
1605 /* Write a CSET instruction into *BUF.
1606
1607 CSET rd, cond
1608
1609 This instruction conditionally write 1 or 0 in the destination register.
1610 1 is written if the condition is true. This is an alias for:
1611
1612 CSINC rd, xzr, xzr, !cond
1613
1614 Note that the condition needs to be inverted.
1615
1616 RD is the destination register.
1617 RN and RM are the source registers.
1618 COND is the encoded condition. */
1619
1620 static int
1621 emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1622 {
1623 /* The least significant bit of the condition needs toggling in order to
1624 invert it. */
1625 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1626 }
1627
1628 /* Write LEN instructions from BUF into the inferior memory at *TO.
1629
1630 Note instructions are always little endian on AArch64, unlike data. */
1631
1632 static void
1633 append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1634 {
1635 size_t byte_len = len * sizeof (uint32_t);
1636 #if (__BYTE_ORDER == __BIG_ENDIAN)
1637 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
1638 size_t i;
1639
1640 for (i = 0; i < len; i++)
1641 le_buf[i] = htole32 (buf[i]);
1642
1643 target_write_memory (*to, (const unsigned char *) le_buf, byte_len);
1644
1645 xfree (le_buf);
1646 #else
1647 target_write_memory (*to, (const unsigned char *) buf, byte_len);
1648 #endif
1649
1650 *to += byte_len;
1651 }
1652
1653 /* Sub-class of struct aarch64_insn_data, store information of
1654 instruction relocation for fast tracepoint. Visitor can
1655 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1656 the relocated instructions in buffer pointed by INSN_PTR. */
1657
1658 struct aarch64_insn_relocation_data
1659 {
1660 struct aarch64_insn_data base;
1661
1662 /* The new address the instruction is relocated to. */
1663 CORE_ADDR new_addr;
1664 /* Pointer to the buffer of relocated instruction(s). */
1665 uint32_t *insn_ptr;
1666 };
1667
1668 /* Implementation of aarch64_insn_visitor method "b". */
1669
1670 static void
1671 aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1672 struct aarch64_insn_data *data)
1673 {
1674 struct aarch64_insn_relocation_data *insn_reloc
1675 = (struct aarch64_insn_relocation_data *) data;
1676 int64_t new_offset
1677 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1678
1679 if (can_encode_int32 (new_offset, 28))
1680 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1681 }
1682
1683 /* Implementation of aarch64_insn_visitor method "b_cond". */
1684
1685 static void
1686 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1687 struct aarch64_insn_data *data)
1688 {
1689 struct aarch64_insn_relocation_data *insn_reloc
1690 = (struct aarch64_insn_relocation_data *) data;
1691 int64_t new_offset
1692 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1693
1694 if (can_encode_int32 (new_offset, 21))
1695 {
1696 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1697 new_offset);
1698 }
1699 else if (can_encode_int32 (new_offset, 28))
1700 {
1701 /* The offset is out of range for a conditional branch
1702 instruction but not for a unconditional branch. We can use
1703 the following instructions instead:
1704
1705 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1706 B NOT_TAKEN ; Else jump over TAKEN and continue.
1707 TAKEN:
1708 B #(offset - 8)
1709 NOT_TAKEN:
1710
1711 */
1712
1713 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1714 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1715 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1716 }
1717 }
1718
1719 /* Implementation of aarch64_insn_visitor method "cb". */
1720
1721 static void
1722 aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1723 const unsigned rn, int is64,
1724 struct aarch64_insn_data *data)
1725 {
1726 struct aarch64_insn_relocation_data *insn_reloc
1727 = (struct aarch64_insn_relocation_data *) data;
1728 int64_t new_offset
1729 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1730
1731 if (can_encode_int32 (new_offset, 21))
1732 {
1733 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1734 aarch64_register (rn, is64), new_offset);
1735 }
1736 else if (can_encode_int32 (new_offset, 28))
1737 {
1738 /* The offset is out of range for a compare and branch
1739 instruction but not for a unconditional branch. We can use
1740 the following instructions instead:
1741
1742 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1743 B NOT_TAKEN ; Else jump over TAKEN and continue.
1744 TAKEN:
1745 B #(offset - 8)
1746 NOT_TAKEN:
1747
1748 */
1749 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1750 aarch64_register (rn, is64), 8);
1751 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1752 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1753 }
1754 }
1755
1756 /* Implementation of aarch64_insn_visitor method "tb". */
1757
1758 static void
1759 aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1760 const unsigned rt, unsigned bit,
1761 struct aarch64_insn_data *data)
1762 {
1763 struct aarch64_insn_relocation_data *insn_reloc
1764 = (struct aarch64_insn_relocation_data *) data;
1765 int64_t new_offset
1766 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1767
1768 if (can_encode_int32 (new_offset, 16))
1769 {
1770 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1771 aarch64_register (rt, 1), new_offset);
1772 }
1773 else if (can_encode_int32 (new_offset, 28))
1774 {
1775 /* The offset is out of range for a test bit and branch
1776 instruction but not for a unconditional branch. We can use
1777 the following instructions instead:
1778
1779 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1780 B NOT_TAKEN ; Else jump over TAKEN and continue.
1781 TAKEN:
1782 B #(offset - 8)
1783 NOT_TAKEN:
1784
1785 */
1786 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1787 aarch64_register (rt, 1), 8);
1788 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1789 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1790 new_offset - 8);
1791 }
1792 }
1793
1794 /* Implementation of aarch64_insn_visitor method "adr". */
1795
1796 static void
1797 aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1798 const int is_adrp,
1799 struct aarch64_insn_data *data)
1800 {
1801 struct aarch64_insn_relocation_data *insn_reloc
1802 = (struct aarch64_insn_relocation_data *) data;
1803 /* We know exactly the address the ADR{P,} instruction will compute.
1804 We can just write it to the destination register. */
1805 CORE_ADDR address = data->insn_addr + offset;
1806
1807 if (is_adrp)
1808 {
1809 /* Clear the lower 12 bits of the offset to get the 4K page. */
1810 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1811 aarch64_register (rd, 1),
1812 address & ~0xfff);
1813 }
1814 else
1815 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1816 aarch64_register (rd, 1), address);
1817 }
1818
1819 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1820
1821 static void
1822 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1823 const unsigned rt, const int is64,
1824 struct aarch64_insn_data *data)
1825 {
1826 struct aarch64_insn_relocation_data *insn_reloc
1827 = (struct aarch64_insn_relocation_data *) data;
1828 CORE_ADDR address = data->insn_addr + offset;
1829
1830 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1831 aarch64_register (rt, 1), address);
1832
1833 /* We know exactly what address to load from, and what register we
1834 can use:
1835
1836 MOV xd, #(oldloc + offset)
1837 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1838 ...
1839
1840 LDR xd, [xd] ; or LDRSW xd, [xd]
1841
1842 */
1843
1844 if (is_sw)
1845 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1846 aarch64_register (rt, 1),
1847 aarch64_register (rt, 1),
1848 offset_memory_operand (0));
1849 else
1850 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1851 aarch64_register (rt, is64),
1852 aarch64_register (rt, 1),
1853 offset_memory_operand (0));
1854 }
1855
1856 /* Implementation of aarch64_insn_visitor method "others". */
1857
1858 static void
1859 aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1860 struct aarch64_insn_data *data)
1861 {
1862 struct aarch64_insn_relocation_data *insn_reloc
1863 = (struct aarch64_insn_relocation_data *) data;
1864
1865 /* The instruction is not PC relative. Just re-emit it at the new
1866 location. */
1867 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
1868 }
1869
1870 static const struct aarch64_insn_visitor visitor =
1871 {
1872 aarch64_ftrace_insn_reloc_b,
1873 aarch64_ftrace_insn_reloc_b_cond,
1874 aarch64_ftrace_insn_reloc_cb,
1875 aarch64_ftrace_insn_reloc_tb,
1876 aarch64_ftrace_insn_reloc_adr,
1877 aarch64_ftrace_insn_reloc_ldr_literal,
1878 aarch64_ftrace_insn_reloc_others,
1879 };
1880
1881 /* Implementation of linux_target_ops method
1882 "install_fast_tracepoint_jump_pad". */
1883
1884 static int
1885 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1886 CORE_ADDR tpaddr,
1887 CORE_ADDR collector,
1888 CORE_ADDR lockaddr,
1889 ULONGEST orig_size,
1890 CORE_ADDR *jump_entry,
1891 CORE_ADDR *trampoline,
1892 ULONGEST *trampoline_size,
1893 unsigned char *jjump_pad_insn,
1894 ULONGEST *jjump_pad_insn_size,
1895 CORE_ADDR *adjusted_insn_addr,
1896 CORE_ADDR *adjusted_insn_addr_end,
1897 char *err)
1898 {
1899 uint32_t buf[256];
1900 uint32_t *p = buf;
1901 int64_t offset;
1902 int i;
1903 uint32_t insn;
1904 CORE_ADDR buildaddr = *jump_entry;
1905 struct aarch64_insn_relocation_data insn_data;
1906
1907 /* We need to save the current state on the stack both to restore it
1908 later and to collect register values when the tracepoint is hit.
1909
1910 The saved registers are pushed in a layout that needs to be in sync
1911 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1912 the supply_fast_tracepoint_registers function will fill in the
1913 register cache from a pointer to saved registers on the stack we build
1914 here.
1915
1916 For simplicity, we set the size of each cell on the stack to 16 bytes.
1917 This way one cell can hold any register type, from system registers
1918 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1919 has to be 16 bytes aligned anyway.
1920
1921 Note that the CPSR register does not exist on AArch64. Instead we
1922 can access system bits describing the process state with the
1923 MRS/MSR instructions, namely the condition flags. We save them as
1924 if they are part of a CPSR register because that's how GDB
1925 interprets these system bits. At the moment, only the condition
1926 flags are saved in CPSR (NZCV).
1927
1928 Stack layout, each cell is 16 bytes (descending):
1929
1930 High *-------- SIMD&FP registers from 31 down to 0. --------*
1931 | q31 |
1932 . .
1933 . . 32 cells
1934 . .
1935 | q0 |
1936 *---- General purpose registers from 30 down to 0. ----*
1937 | x30 |
1938 . .
1939 . . 31 cells
1940 . .
1941 | x0 |
1942 *------------- Special purpose registers. -------------*
1943 | SP |
1944 | PC |
1945 | CPSR (NZCV) | 5 cells
1946 | FPSR |
1947 | FPCR | <- SP + 16
1948 *------------- collecting_t object --------------------*
1949 | TPIDR_EL0 | struct tracepoint * |
1950 Low *------------------------------------------------------*
1951
1952 After this stack is set up, we issue a call to the collector, passing
1953 it the saved registers at (SP + 16). */
1954
1955 /* Push SIMD&FP registers on the stack:
1956
1957 SUB sp, sp, #(32 * 16)
1958
1959 STP q30, q31, [sp, #(30 * 16)]
1960 ...
1961 STP q0, q1, [sp]
1962
1963 */
1964 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
1965 for (i = 30; i >= 0; i -= 2)
1966 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
1967
1968 /* Push general purpose registers on the stack. Note that we do not need
1969 to push x31 as it represents the xzr register and not the stack
1970 pointer in a STR instruction.
1971
1972 SUB sp, sp, #(31 * 16)
1973
1974 STR x30, [sp, #(30 * 16)]
1975 ...
1976 STR x0, [sp]
1977
1978 */
1979 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
1980 for (i = 30; i >= 0; i -= 1)
1981 p += emit_str (p, aarch64_register (i, 1), sp,
1982 offset_memory_operand (i * 16));
1983
1984 /* Make space for 5 more cells.
1985
1986 SUB sp, sp, #(5 * 16)
1987
1988 */
1989 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
1990
1991
1992 /* Save SP:
1993
1994 ADD x4, sp, #((32 + 31 + 5) * 16)
1995 STR x4, [sp, #(4 * 16)]
1996
1997 */
1998 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
1999 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
2000
2001 /* Save PC (tracepoint address):
2002
2003 MOV x3, #(tpaddr)
2004 ...
2005
2006 STR x3, [sp, #(3 * 16)]
2007
2008 */
2009
2010 p += emit_mov_addr (p, x3, tpaddr);
2011 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
2012
2013 /* Save CPSR (NZCV), FPSR and FPCR:
2014
2015 MRS x2, nzcv
2016 MRS x1, fpsr
2017 MRS x0, fpcr
2018
2019 STR x2, [sp, #(2 * 16)]
2020 STR x1, [sp, #(1 * 16)]
2021 STR x0, [sp, #(0 * 16)]
2022
2023 */
2024 p += emit_mrs (p, x2, NZCV);
2025 p += emit_mrs (p, x1, FPSR);
2026 p += emit_mrs (p, x0, FPCR);
2027 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2028 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2029 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2030
2031 /* Push the collecting_t object. It consist of the address of the
2032 tracepoint and an ID for the current thread. We get the latter by
2033 reading the tpidr_el0 system register. It corresponds to the
2034 NT_ARM_TLS register accessible with ptrace.
2035
2036 MOV x0, #(tpoint)
2037 ...
2038
2039 MRS x1, tpidr_el0
2040
2041 STP x0, x1, [sp, #-16]!
2042
2043 */
2044
2045 p += emit_mov_addr (p, x0, tpoint);
2046 p += emit_mrs (p, x1, TPIDR_EL0);
2047 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2048
2049 /* Spin-lock:
2050
2051 The shared memory for the lock is at lockaddr. It will hold zero
2052 if no-one is holding the lock, otherwise it contains the address of
2053 the collecting_t object on the stack of the thread which acquired it.
2054
2055 At this stage, the stack pointer points to this thread's collecting_t
2056 object.
2057
2058 We use the following registers:
2059 - x0: Address of the lock.
2060 - x1: Pointer to collecting_t object.
2061 - x2: Scratch register.
2062
2063 MOV x0, #(lockaddr)
2064 ...
2065 MOV x1, sp
2066
2067 ; Trigger an event local to this core. So the following WFE
2068 ; instruction is ignored.
2069 SEVL
2070 again:
2071 ; Wait for an event. The event is triggered by either the SEVL
2072 ; or STLR instructions (store release).
2073 WFE
2074
2075 ; Atomically read at lockaddr. This marks the memory location as
2076 ; exclusive. This instruction also has memory constraints which
2077 ; make sure all previous data reads and writes are done before
2078 ; executing it.
2079 LDAXR x2, [x0]
2080
2081 ; Try again if another thread holds the lock.
2082 CBNZ x2, again
2083
2084 ; We can lock it! Write the address of the collecting_t object.
2085 ; This instruction will fail if the memory location is not marked
2086 ; as exclusive anymore. If it succeeds, it will remove the
2087 ; exclusive mark on the memory location. This way, if another
2088 ; thread executes this instruction before us, we will fail and try
2089 ; all over again.
2090 STXR w2, x1, [x0]
2091 CBNZ w2, again
2092
2093 */
2094
2095 p += emit_mov_addr (p, x0, lockaddr);
2096 p += emit_mov (p, x1, register_operand (sp));
2097
2098 p += emit_sevl (p);
2099 p += emit_wfe (p);
2100 p += emit_ldaxr (p, x2, x0);
2101 p += emit_cb (p, 1, w2, -2 * 4);
2102 p += emit_stxr (p, w2, x1, x0);
2103 p += emit_cb (p, 1, x2, -4 * 4);
2104
2105 /* Call collector (struct tracepoint *, unsigned char *):
2106
2107 MOV x0, #(tpoint)
2108 ...
2109
2110 ; Saved registers start after the collecting_t object.
2111 ADD x1, sp, #16
2112
2113 ; We use an intra-procedure-call scratch register.
2114 MOV ip0, #(collector)
2115 ...
2116
2117 ; And call back to C!
2118 BLR ip0
2119
2120 */
2121
2122 p += emit_mov_addr (p, x0, tpoint);
2123 p += emit_add (p, x1, sp, immediate_operand (16));
2124
2125 p += emit_mov_addr (p, ip0, collector);
2126 p += emit_blr (p, ip0);
2127
2128 /* Release the lock.
2129
2130 MOV x0, #(lockaddr)
2131 ...
2132
2133 ; This instruction is a normal store with memory ordering
2134 ; constraints. Thanks to this we do not have to put a data
2135 ; barrier instruction to make sure all data read and writes are done
2136 ; before this instruction is executed. Furthermore, this instruction
2137 ; will trigger an event, letting other threads know they can grab
2138 ; the lock.
2139 STLR xzr, [x0]
2140
2141 */
2142 p += emit_mov_addr (p, x0, lockaddr);
2143 p += emit_stlr (p, xzr, x0);
2144
2145 /* Free collecting_t object:
2146
2147 ADD sp, sp, #16
2148
2149 */
2150 p += emit_add (p, sp, sp, immediate_operand (16));
2151
2152 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2153 registers from the stack.
2154
2155 LDR x2, [sp, #(2 * 16)]
2156 LDR x1, [sp, #(1 * 16)]
2157 LDR x0, [sp, #(0 * 16)]
2158
2159 MSR NZCV, x2
2160 MSR FPSR, x1
2161 MSR FPCR, x0
2162
2163 ADD sp, sp #(5 * 16)
2164
2165 */
2166 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2167 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2168 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2169 p += emit_msr (p, NZCV, x2);
2170 p += emit_msr (p, FPSR, x1);
2171 p += emit_msr (p, FPCR, x0);
2172
2173 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2174
2175 /* Pop general purpose registers:
2176
2177 LDR x0, [sp]
2178 ...
2179 LDR x30, [sp, #(30 * 16)]
2180
2181 ADD sp, sp, #(31 * 16)
2182
2183 */
2184 for (i = 0; i <= 30; i += 1)
2185 p += emit_ldr (p, aarch64_register (i, 1), sp,
2186 offset_memory_operand (i * 16));
2187 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2188
2189 /* Pop SIMD&FP registers:
2190
2191 LDP q0, q1, [sp]
2192 ...
2193 LDP q30, q31, [sp, #(30 * 16)]
2194
2195 ADD sp, sp, #(32 * 16)
2196
2197 */
2198 for (i = 0; i <= 30; i += 2)
2199 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2200 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2201
2202 /* Write the code into the inferior memory. */
2203 append_insns (&buildaddr, p - buf, buf);
2204
2205 /* Now emit the relocated instruction. */
2206 *adjusted_insn_addr = buildaddr;
2207 target_read_uint32 (tpaddr, &insn);
2208
2209 insn_data.base.insn_addr = tpaddr;
2210 insn_data.new_addr = buildaddr;
2211 insn_data.insn_ptr = buf;
2212
2213 aarch64_relocate_instruction (insn, &visitor,
2214 (struct aarch64_insn_data *) &insn_data);
2215
2216 /* We may not have been able to relocate the instruction. */
2217 if (insn_data.insn_ptr == buf)
2218 {
2219 sprintf (err,
2220 "E.Could not relocate instruction from %s to %s.",
2221 core_addr_to_string_nz (tpaddr),
2222 core_addr_to_string_nz (buildaddr));
2223 return 1;
2224 }
2225 else
2226 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
2227 *adjusted_insn_addr_end = buildaddr;
2228
2229 /* Go back to the start of the buffer. */
2230 p = buf;
2231
2232 /* Emit a branch back from the jump pad. */
2233 offset = (tpaddr + orig_size - buildaddr);
2234 if (!can_encode_int32 (offset, 28))
2235 {
2236 sprintf (err,
2237 "E.Jump back from jump pad too far from tracepoint "
2238 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2239 offset);
2240 return 1;
2241 }
2242
2243 p += emit_b (p, 0, offset);
2244 append_insns (&buildaddr, p - buf, buf);
2245
2246 /* Give the caller a branch instruction into the jump pad. */
2247 offset = (*jump_entry - tpaddr);
2248 if (!can_encode_int32 (offset, 28))
2249 {
2250 sprintf (err,
2251 "E.Jump pad too far from tracepoint "
2252 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2253 offset);
2254 return 1;
2255 }
2256
2257 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2258 *jjump_pad_insn_size = 4;
2259
2260 /* Return the end address of our pad. */
2261 *jump_entry = buildaddr;
2262
2263 return 0;
2264 }
2265
2266 /* Helper function writing LEN instructions from START into
2267 current_insn_ptr. */
2268
2269 static void
2270 emit_ops_insns (const uint32_t *start, int len)
2271 {
2272 CORE_ADDR buildaddr = current_insn_ptr;
2273
2274 if (debug_threads)
2275 debug_printf ("Adding %d instrucions at %s\n",
2276 len, paddress (buildaddr));
2277
2278 append_insns (&buildaddr, len, start);
2279 current_insn_ptr = buildaddr;
2280 }
2281
2282 /* Pop a register from the stack. */
2283
2284 static int
2285 emit_pop (uint32_t *buf, struct aarch64_register rt)
2286 {
2287 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2288 }
2289
2290 /* Push a register on the stack. */
2291
2292 static int
2293 emit_push (uint32_t *buf, struct aarch64_register rt)
2294 {
2295 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2296 }
2297
2298 /* Implementation of emit_ops method "emit_prologue". */
2299
2300 static void
2301 aarch64_emit_prologue (void)
2302 {
2303 uint32_t buf[16];
2304 uint32_t *p = buf;
2305
2306 /* This function emit a prologue for the following function prototype:
2307
2308 enum eval_result_type f (unsigned char *regs,
2309 ULONGEST *value);
2310
2311 The first argument is a buffer of raw registers. The second
2312 argument is the result of
2313 evaluating the expression, which will be set to whatever is on top of
2314 the stack at the end.
2315
2316 The stack set up by the prologue is as such:
2317
2318 High *------------------------------------------------------*
2319 | LR |
2320 | FP | <- FP
2321 | x1 (ULONGEST *value) |
2322 | x0 (unsigned char *regs) |
2323 Low *------------------------------------------------------*
2324
2325 As we are implementing a stack machine, each opcode can expand the
2326 stack so we never know how far we are from the data saved by this
2327 prologue. In order to be able refer to value and regs later, we save
2328 the current stack pointer in the frame pointer. This way, it is not
2329 clobbered when calling C functions.
2330
2331 Finally, throughout every operation, we are using register x0 as the
2332 top of the stack, and x1 as a scratch register. */
2333
2334 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2335 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2336 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2337
2338 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2339
2340
2341 emit_ops_insns (buf, p - buf);
2342 }
2343
2344 /* Implementation of emit_ops method "emit_epilogue". */
2345
2346 static void
2347 aarch64_emit_epilogue (void)
2348 {
2349 uint32_t buf[16];
2350 uint32_t *p = buf;
2351
2352 /* Store the result of the expression (x0) in *value. */
2353 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2354 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2355 p += emit_str (p, x0, x1, offset_memory_operand (0));
2356
2357 /* Restore the previous state. */
2358 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2359 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2360
2361 /* Return expr_eval_no_error. */
2362 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2363 p += emit_ret (p, lr);
2364
2365 emit_ops_insns (buf, p - buf);
2366 }
2367
2368 /* Implementation of emit_ops method "emit_add". */
2369
2370 static void
2371 aarch64_emit_add (void)
2372 {
2373 uint32_t buf[16];
2374 uint32_t *p = buf;
2375
2376 p += emit_pop (p, x1);
2377 p += emit_add (p, x0, x1, register_operand (x0));
2378
2379 emit_ops_insns (buf, p - buf);
2380 }
2381
2382 /* Implementation of emit_ops method "emit_sub". */
2383
2384 static void
2385 aarch64_emit_sub (void)
2386 {
2387 uint32_t buf[16];
2388 uint32_t *p = buf;
2389
2390 p += emit_pop (p, x1);
2391 p += emit_sub (p, x0, x1, register_operand (x0));
2392
2393 emit_ops_insns (buf, p - buf);
2394 }
2395
2396 /* Implementation of emit_ops method "emit_mul". */
2397
2398 static void
2399 aarch64_emit_mul (void)
2400 {
2401 uint32_t buf[16];
2402 uint32_t *p = buf;
2403
2404 p += emit_pop (p, x1);
2405 p += emit_mul (p, x0, x1, x0);
2406
2407 emit_ops_insns (buf, p - buf);
2408 }
2409
2410 /* Implementation of emit_ops method "emit_lsh". */
2411
2412 static void
2413 aarch64_emit_lsh (void)
2414 {
2415 uint32_t buf[16];
2416 uint32_t *p = buf;
2417
2418 p += emit_pop (p, x1);
2419 p += emit_lslv (p, x0, x1, x0);
2420
2421 emit_ops_insns (buf, p - buf);
2422 }
2423
2424 /* Implementation of emit_ops method "emit_rsh_signed". */
2425
2426 static void
2427 aarch64_emit_rsh_signed (void)
2428 {
2429 uint32_t buf[16];
2430 uint32_t *p = buf;
2431
2432 p += emit_pop (p, x1);
2433 p += emit_asrv (p, x0, x1, x0);
2434
2435 emit_ops_insns (buf, p - buf);
2436 }
2437
2438 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2439
2440 static void
2441 aarch64_emit_rsh_unsigned (void)
2442 {
2443 uint32_t buf[16];
2444 uint32_t *p = buf;
2445
2446 p += emit_pop (p, x1);
2447 p += emit_lsrv (p, x0, x1, x0);
2448
2449 emit_ops_insns (buf, p - buf);
2450 }
2451
2452 /* Implementation of emit_ops method "emit_ext". */
2453
2454 static void
2455 aarch64_emit_ext (int arg)
2456 {
2457 uint32_t buf[16];
2458 uint32_t *p = buf;
2459
2460 p += emit_sbfx (p, x0, x0, 0, arg);
2461
2462 emit_ops_insns (buf, p - buf);
2463 }
2464
2465 /* Implementation of emit_ops method "emit_log_not". */
2466
2467 static void
2468 aarch64_emit_log_not (void)
2469 {
2470 uint32_t buf[16];
2471 uint32_t *p = buf;
2472
2473 /* If the top of the stack is 0, replace it with 1. Else replace it with
2474 0. */
2475
2476 p += emit_cmp (p, x0, immediate_operand (0));
2477 p += emit_cset (p, x0, EQ);
2478
2479 emit_ops_insns (buf, p - buf);
2480 }
2481
2482 /* Implementation of emit_ops method "emit_bit_and". */
2483
2484 static void
2485 aarch64_emit_bit_and (void)
2486 {
2487 uint32_t buf[16];
2488 uint32_t *p = buf;
2489
2490 p += emit_pop (p, x1);
2491 p += emit_and (p, x0, x0, x1);
2492
2493 emit_ops_insns (buf, p - buf);
2494 }
2495
2496 /* Implementation of emit_ops method "emit_bit_or". */
2497
2498 static void
2499 aarch64_emit_bit_or (void)
2500 {
2501 uint32_t buf[16];
2502 uint32_t *p = buf;
2503
2504 p += emit_pop (p, x1);
2505 p += emit_orr (p, x0, x0, x1);
2506
2507 emit_ops_insns (buf, p - buf);
2508 }
2509
2510 /* Implementation of emit_ops method "emit_bit_xor". */
2511
2512 static void
2513 aarch64_emit_bit_xor (void)
2514 {
2515 uint32_t buf[16];
2516 uint32_t *p = buf;
2517
2518 p += emit_pop (p, x1);
2519 p += emit_eor (p, x0, x0, x1);
2520
2521 emit_ops_insns (buf, p - buf);
2522 }
2523
2524 /* Implementation of emit_ops method "emit_bit_not". */
2525
2526 static void
2527 aarch64_emit_bit_not (void)
2528 {
2529 uint32_t buf[16];
2530 uint32_t *p = buf;
2531
2532 p += emit_mvn (p, x0, x0);
2533
2534 emit_ops_insns (buf, p - buf);
2535 }
2536
2537 /* Implementation of emit_ops method "emit_equal". */
2538
2539 static void
2540 aarch64_emit_equal (void)
2541 {
2542 uint32_t buf[16];
2543 uint32_t *p = buf;
2544
2545 p += emit_pop (p, x1);
2546 p += emit_cmp (p, x0, register_operand (x1));
2547 p += emit_cset (p, x0, EQ);
2548
2549 emit_ops_insns (buf, p - buf);
2550 }
2551
2552 /* Implementation of emit_ops method "emit_less_signed". */
2553
2554 static void
2555 aarch64_emit_less_signed (void)
2556 {
2557 uint32_t buf[16];
2558 uint32_t *p = buf;
2559
2560 p += emit_pop (p, x1);
2561 p += emit_cmp (p, x1, register_operand (x0));
2562 p += emit_cset (p, x0, LT);
2563
2564 emit_ops_insns (buf, p - buf);
2565 }
2566
2567 /* Implementation of emit_ops method "emit_less_unsigned". */
2568
2569 static void
2570 aarch64_emit_less_unsigned (void)
2571 {
2572 uint32_t buf[16];
2573 uint32_t *p = buf;
2574
2575 p += emit_pop (p, x1);
2576 p += emit_cmp (p, x1, register_operand (x0));
2577 p += emit_cset (p, x0, LO);
2578
2579 emit_ops_insns (buf, p - buf);
2580 }
2581
2582 /* Implementation of emit_ops method "emit_ref". */
2583
2584 static void
2585 aarch64_emit_ref (int size)
2586 {
2587 uint32_t buf[16];
2588 uint32_t *p = buf;
2589
2590 switch (size)
2591 {
2592 case 1:
2593 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2594 break;
2595 case 2:
2596 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2597 break;
2598 case 4:
2599 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2600 break;
2601 case 8:
2602 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2603 break;
2604 default:
2605 /* Unknown size, bail on compilation. */
2606 emit_error = 1;
2607 break;
2608 }
2609
2610 emit_ops_insns (buf, p - buf);
2611 }
2612
2613 /* Implementation of emit_ops method "emit_if_goto". */
2614
2615 static void
2616 aarch64_emit_if_goto (int *offset_p, int *size_p)
2617 {
2618 uint32_t buf[16];
2619 uint32_t *p = buf;
2620
2621 /* The Z flag is set or cleared here. */
2622 p += emit_cmp (p, x0, immediate_operand (0));
2623 /* This instruction must not change the Z flag. */
2624 p += emit_pop (p, x0);
2625 /* Branch over the next instruction if x0 == 0. */
2626 p += emit_bcond (p, EQ, 8);
2627
2628 /* The NOP instruction will be patched with an unconditional branch. */
2629 if (offset_p)
2630 *offset_p = (p - buf) * 4;
2631 if (size_p)
2632 *size_p = 4;
2633 p += emit_nop (p);
2634
2635 emit_ops_insns (buf, p - buf);
2636 }
2637
2638 /* Implementation of emit_ops method "emit_goto". */
2639
2640 static void
2641 aarch64_emit_goto (int *offset_p, int *size_p)
2642 {
2643 uint32_t buf[16];
2644 uint32_t *p = buf;
2645
2646 /* The NOP instruction will be patched with an unconditional branch. */
2647 if (offset_p)
2648 *offset_p = 0;
2649 if (size_p)
2650 *size_p = 4;
2651 p += emit_nop (p);
2652
2653 emit_ops_insns (buf, p - buf);
2654 }
2655
2656 /* Implementation of emit_ops method "write_goto_address". */
2657
2658 static void
2659 aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2660 {
2661 uint32_t insn;
2662
2663 emit_b (&insn, 0, to - from);
2664 append_insns (&from, 1, &insn);
2665 }
2666
2667 /* Implementation of emit_ops method "emit_const". */
2668
2669 static void
2670 aarch64_emit_const (LONGEST num)
2671 {
2672 uint32_t buf[16];
2673 uint32_t *p = buf;
2674
2675 p += emit_mov_addr (p, x0, num);
2676
2677 emit_ops_insns (buf, p - buf);
2678 }
2679
2680 /* Implementation of emit_ops method "emit_call". */
2681
2682 static void
2683 aarch64_emit_call (CORE_ADDR fn)
2684 {
2685 uint32_t buf[16];
2686 uint32_t *p = buf;
2687
2688 p += emit_mov_addr (p, ip0, fn);
2689 p += emit_blr (p, ip0);
2690
2691 emit_ops_insns (buf, p - buf);
2692 }
2693
2694 /* Implementation of emit_ops method "emit_reg". */
2695
2696 static void
2697 aarch64_emit_reg (int reg)
2698 {
2699 uint32_t buf[16];
2700 uint32_t *p = buf;
2701
2702 /* Set x0 to unsigned char *regs. */
2703 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2704 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2705 p += emit_mov (p, x1, immediate_operand (reg));
2706
2707 emit_ops_insns (buf, p - buf);
2708
2709 aarch64_emit_call (get_raw_reg_func_addr ());
2710 }
2711
2712 /* Implementation of emit_ops method "emit_pop". */
2713
2714 static void
2715 aarch64_emit_pop (void)
2716 {
2717 uint32_t buf[16];
2718 uint32_t *p = buf;
2719
2720 p += emit_pop (p, x0);
2721
2722 emit_ops_insns (buf, p - buf);
2723 }
2724
2725 /* Implementation of emit_ops method "emit_stack_flush". */
2726
2727 static void
2728 aarch64_emit_stack_flush (void)
2729 {
2730 uint32_t buf[16];
2731 uint32_t *p = buf;
2732
2733 p += emit_push (p, x0);
2734
2735 emit_ops_insns (buf, p - buf);
2736 }
2737
2738 /* Implementation of emit_ops method "emit_zero_ext". */
2739
2740 static void
2741 aarch64_emit_zero_ext (int arg)
2742 {
2743 uint32_t buf[16];
2744 uint32_t *p = buf;
2745
2746 p += emit_ubfx (p, x0, x0, 0, arg);
2747
2748 emit_ops_insns (buf, p - buf);
2749 }
2750
2751 /* Implementation of emit_ops method "emit_swap". */
2752
2753 static void
2754 aarch64_emit_swap (void)
2755 {
2756 uint32_t buf[16];
2757 uint32_t *p = buf;
2758
2759 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2760 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2761 p += emit_mov (p, x0, register_operand (x1));
2762
2763 emit_ops_insns (buf, p - buf);
2764 }
2765
2766 /* Implementation of emit_ops method "emit_stack_adjust". */
2767
2768 static void
2769 aarch64_emit_stack_adjust (int n)
2770 {
2771 /* This is not needed with our design. */
2772 uint32_t buf[16];
2773 uint32_t *p = buf;
2774
2775 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2776
2777 emit_ops_insns (buf, p - buf);
2778 }
2779
2780 /* Implementation of emit_ops method "emit_int_call_1". */
2781
2782 static void
2783 aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2784 {
2785 uint32_t buf[16];
2786 uint32_t *p = buf;
2787
2788 p += emit_mov (p, x0, immediate_operand (arg1));
2789
2790 emit_ops_insns (buf, p - buf);
2791
2792 aarch64_emit_call (fn);
2793 }
2794
2795 /* Implementation of emit_ops method "emit_void_call_2". */
2796
2797 static void
2798 aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2799 {
2800 uint32_t buf[16];
2801 uint32_t *p = buf;
2802
2803 /* Push x0 on the stack. */
2804 aarch64_emit_stack_flush ();
2805
2806 /* Setup arguments for the function call:
2807
2808 x0: arg1
2809 x1: top of the stack
2810
2811 MOV x1, x0
2812 MOV x0, #arg1 */
2813
2814 p += emit_mov (p, x1, register_operand (x0));
2815 p += emit_mov (p, x0, immediate_operand (arg1));
2816
2817 emit_ops_insns (buf, p - buf);
2818
2819 aarch64_emit_call (fn);
2820
2821 /* Restore x0. */
2822 aarch64_emit_pop ();
2823 }
2824
2825 /* Implementation of emit_ops method "emit_eq_goto". */
2826
2827 static void
2828 aarch64_emit_eq_goto (int *offset_p, int *size_p)
2829 {
2830 uint32_t buf[16];
2831 uint32_t *p = buf;
2832
2833 p += emit_pop (p, x1);
2834 p += emit_cmp (p, x1, register_operand (x0));
2835 /* Branch over the next instruction if x0 != x1. */
2836 p += emit_bcond (p, NE, 8);
2837 /* The NOP instruction will be patched with an unconditional branch. */
2838 if (offset_p)
2839 *offset_p = (p - buf) * 4;
2840 if (size_p)
2841 *size_p = 4;
2842 p += emit_nop (p);
2843
2844 emit_ops_insns (buf, p - buf);
2845 }
2846
2847 /* Implementation of emit_ops method "emit_ne_goto". */
2848
2849 static void
2850 aarch64_emit_ne_goto (int *offset_p, int *size_p)
2851 {
2852 uint32_t buf[16];
2853 uint32_t *p = buf;
2854
2855 p += emit_pop (p, x1);
2856 p += emit_cmp (p, x1, register_operand (x0));
2857 /* Branch over the next instruction if x0 == x1. */
2858 p += emit_bcond (p, EQ, 8);
2859 /* The NOP instruction will be patched with an unconditional branch. */
2860 if (offset_p)
2861 *offset_p = (p - buf) * 4;
2862 if (size_p)
2863 *size_p = 4;
2864 p += emit_nop (p);
2865
2866 emit_ops_insns (buf, p - buf);
2867 }
2868
2869 /* Implementation of emit_ops method "emit_lt_goto". */
2870
2871 static void
2872 aarch64_emit_lt_goto (int *offset_p, int *size_p)
2873 {
2874 uint32_t buf[16];
2875 uint32_t *p = buf;
2876
2877 p += emit_pop (p, x1);
2878 p += emit_cmp (p, x1, register_operand (x0));
2879 /* Branch over the next instruction if x0 >= x1. */
2880 p += emit_bcond (p, GE, 8);
2881 /* The NOP instruction will be patched with an unconditional branch. */
2882 if (offset_p)
2883 *offset_p = (p - buf) * 4;
2884 if (size_p)
2885 *size_p = 4;
2886 p += emit_nop (p);
2887
2888 emit_ops_insns (buf, p - buf);
2889 }
2890
2891 /* Implementation of emit_ops method "emit_le_goto". */
2892
2893 static void
2894 aarch64_emit_le_goto (int *offset_p, int *size_p)
2895 {
2896 uint32_t buf[16];
2897 uint32_t *p = buf;
2898
2899 p += emit_pop (p, x1);
2900 p += emit_cmp (p, x1, register_operand (x0));
2901 /* Branch over the next instruction if x0 > x1. */
2902 p += emit_bcond (p, GT, 8);
2903 /* The NOP instruction will be patched with an unconditional branch. */
2904 if (offset_p)
2905 *offset_p = (p - buf) * 4;
2906 if (size_p)
2907 *size_p = 4;
2908 p += emit_nop (p);
2909
2910 emit_ops_insns (buf, p - buf);
2911 }
2912
2913 /* Implementation of emit_ops method "emit_gt_goto". */
2914
2915 static void
2916 aarch64_emit_gt_goto (int *offset_p, int *size_p)
2917 {
2918 uint32_t buf[16];
2919 uint32_t *p = buf;
2920
2921 p += emit_pop (p, x1);
2922 p += emit_cmp (p, x1, register_operand (x0));
2923 /* Branch over the next instruction if x0 <= x1. */
2924 p += emit_bcond (p, LE, 8);
2925 /* The NOP instruction will be patched with an unconditional branch. */
2926 if (offset_p)
2927 *offset_p = (p - buf) * 4;
2928 if (size_p)
2929 *size_p = 4;
2930 p += emit_nop (p);
2931
2932 emit_ops_insns (buf, p - buf);
2933 }
2934
2935 /* Implementation of emit_ops method "emit_ge_got". */
2936
2937 static void
2938 aarch64_emit_ge_got (int *offset_p, int *size_p)
2939 {
2940 uint32_t buf[16];
2941 uint32_t *p = buf;
2942
2943 p += emit_pop (p, x1);
2944 p += emit_cmp (p, x1, register_operand (x0));
2945 /* Branch over the next instruction if x0 <= x1. */
2946 p += emit_bcond (p, LT, 8);
2947 /* The NOP instruction will be patched with an unconditional branch. */
2948 if (offset_p)
2949 *offset_p = (p - buf) * 4;
2950 if (size_p)
2951 *size_p = 4;
2952 p += emit_nop (p);
2953
2954 emit_ops_insns (buf, p - buf);
2955 }
2956
2957 static struct emit_ops aarch64_emit_ops_impl =
2958 {
2959 aarch64_emit_prologue,
2960 aarch64_emit_epilogue,
2961 aarch64_emit_add,
2962 aarch64_emit_sub,
2963 aarch64_emit_mul,
2964 aarch64_emit_lsh,
2965 aarch64_emit_rsh_signed,
2966 aarch64_emit_rsh_unsigned,
2967 aarch64_emit_ext,
2968 aarch64_emit_log_not,
2969 aarch64_emit_bit_and,
2970 aarch64_emit_bit_or,
2971 aarch64_emit_bit_xor,
2972 aarch64_emit_bit_not,
2973 aarch64_emit_equal,
2974 aarch64_emit_less_signed,
2975 aarch64_emit_less_unsigned,
2976 aarch64_emit_ref,
2977 aarch64_emit_if_goto,
2978 aarch64_emit_goto,
2979 aarch64_write_goto_address,
2980 aarch64_emit_const,
2981 aarch64_emit_call,
2982 aarch64_emit_reg,
2983 aarch64_emit_pop,
2984 aarch64_emit_stack_flush,
2985 aarch64_emit_zero_ext,
2986 aarch64_emit_swap,
2987 aarch64_emit_stack_adjust,
2988 aarch64_emit_int_call_1,
2989 aarch64_emit_void_call_2,
2990 aarch64_emit_eq_goto,
2991 aarch64_emit_ne_goto,
2992 aarch64_emit_lt_goto,
2993 aarch64_emit_le_goto,
2994 aarch64_emit_gt_goto,
2995 aarch64_emit_ge_got,
2996 };
2997
2998 /* Implementation of linux_target_ops method "emit_ops". */
2999
3000 static struct emit_ops *
3001 aarch64_emit_ops (void)
3002 {
3003 return &aarch64_emit_ops_impl;
3004 }
3005
3006 /* Implementation of linux_target_ops method
3007 "get_min_fast_tracepoint_insn_len". */
3008
3009 static int
3010 aarch64_get_min_fast_tracepoint_insn_len (void)
3011 {
3012 return 4;
3013 }
3014
3015 /* Implementation of linux_target_ops method "supports_range_stepping". */
3016
3017 static int
3018 aarch64_supports_range_stepping (void)
3019 {
3020 return 1;
3021 }
3022
3023 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
3024
3025 static const gdb_byte *
3026 aarch64_sw_breakpoint_from_kind (int kind, int *size)
3027 {
3028 if (is_64bit_tdesc ())
3029 {
3030 *size = aarch64_breakpoint_len;
3031 return aarch64_breakpoint;
3032 }
3033 else
3034 return arm_sw_breakpoint_from_kind (kind, size);
3035 }
3036
3037 /* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
3038
3039 static int
3040 aarch64_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
3041 {
3042 if (is_64bit_tdesc ())
3043 return aarch64_breakpoint_len;
3044 else
3045 return arm_breakpoint_kind_from_pc (pcptr);
3046 }
3047
3048 /* Implementation of the linux_target_ops method
3049 "breakpoint_kind_from_current_state". */
3050
3051 static int
3052 aarch64_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
3053 {
3054 if (is_64bit_tdesc ())
3055 return aarch64_breakpoint_len;
3056 else
3057 return arm_breakpoint_kind_from_current_state (pcptr);
3058 }
3059
3060 /* Support for hardware single step. */
3061
3062 static int
3063 aarch64_supports_hardware_single_step (void)
3064 {
3065 return 1;
3066 }
3067
3068 struct linux_target_ops the_low_target =
3069 {
3070 NULL, /* cannot_fetch_register */
3071 NULL, /* cannot_store_register */
3072 NULL, /* fetch_register */
3073 aarch64_get_pc,
3074 aarch64_set_pc,
3075 aarch64_breakpoint_kind_from_pc,
3076 aarch64_sw_breakpoint_from_kind,
3077 NULL, /* get_next_pcs */
3078 0, /* decr_pc_after_break */
3079 aarch64_breakpoint_at,
3080 aarch64_supports_z_point_type,
3081 aarch64_insert_point,
3082 aarch64_remove_point,
3083 aarch64_stopped_by_watchpoint,
3084 aarch64_stopped_data_address,
3085 NULL, /* collect_ptrace_register */
3086 NULL, /* supply_ptrace_register */
3087 aarch64_linux_siginfo_fixup,
3088 aarch64_linux_new_process,
3089 aarch64_linux_delete_process,
3090 aarch64_linux_new_thread,
3091 aarch64_linux_delete_thread,
3092 aarch64_linux_new_fork,
3093 aarch64_linux_prepare_to_resume,
3094 NULL, /* process_qsupported */
3095 aarch64_supports_tracepoints,
3096 aarch64_get_thread_area,
3097 aarch64_install_fast_tracepoint_jump_pad,
3098 aarch64_emit_ops,
3099 aarch64_get_min_fast_tracepoint_insn_len,
3100 aarch64_supports_range_stepping,
3101 aarch64_breakpoint_kind_from_current_state,
3102 aarch64_supports_hardware_single_step,
3103 aarch64_get_syscall_trapinfo,
3104 };
3105
3106 /* The linux target ops object. */
3107
3108 linux_process_target *the_linux_target = &the_aarch64_target;
3109
3110 void
3111 initialize_low_arch (void)
3112 {
3113 initialize_low_arch_aarch32 ();
3114
3115 initialize_regsets_info (&aarch64_regsets_info);
3116 initialize_regsets_info (&aarch64_sve_regsets_info);
3117 }
This page took 0.105068 seconds and 4 git commands to generate.