0bcac19384a3614ef904e992efbd851a03a3ea51
[deliverable/binutils-gdb.git] / gdbserver / linux-aarch64-low.cc
1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "server.h"
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
29 #include "ax.h"
30 #include "tracepoint.h"
31 #include "debug.h"
32
33 #include <signal.h>
34 #include <sys/user.h>
35 #include "nat/gdb_ptrace.h"
36 #include <asm/ptrace.h>
37 #include <inttypes.h>
38 #include <endian.h>
39 #include <sys/uio.h>
40
41 #include "gdb_proc_service.h"
42 #include "arch/aarch64.h"
43 #include "linux-aarch32-tdesc.h"
44 #include "linux-aarch64-tdesc.h"
45 #include "nat/aarch64-sve-linux-ptrace.h"
46 #include "tdesc.h"
47
48 #ifdef HAVE_SYS_REG_H
49 #include <sys/reg.h>
50 #endif
51
52 /* Linux target op definitions for the AArch64 architecture. */
53
54 class aarch64_target : public linux_process_target
55 {
56 public:
57
58 const regs_info *get_regs_info () override;
59
60 protected:
61
62 void low_arch_setup () override;
63
64 bool low_cannot_fetch_register (int regno) override;
65
66 bool low_cannot_store_register (int regno) override;
67 };
68
69 /* The singleton target ops object. */
70
71 static aarch64_target the_aarch64_target;
72
73 bool
74 aarch64_target::low_cannot_fetch_register (int regno)
75 {
76 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
77 "is not implemented by the target");
78 }
79
80 bool
81 aarch64_target::low_cannot_store_register (int regno)
82 {
83 gdb_assert_not_reached ("linux target op low_cannot_store_register "
84 "is not implemented by the target");
85 }
86
87 /* Per-process arch-specific data we want to keep. */
88
89 struct arch_process_info
90 {
91 /* Hardware breakpoint/watchpoint data.
92 The reason for them to be per-process rather than per-thread is
93 due to the lack of information in the gdbserver environment;
94 gdbserver is not told that whether a requested hardware
95 breakpoint/watchpoint is thread specific or not, so it has to set
96 each hw bp/wp for every thread in the current process. The
97 higher level bp/wp management in gdb will resume a thread if a hw
98 bp/wp trap is not expected for it. Since the hw bp/wp setting is
99 same for each thread, it is reasonable for the data to live here.
100 */
101 struct aarch64_debug_reg_state debug_reg_state;
102 };
103
104 /* Return true if the size of register 0 is 8 byte. */
105
106 static int
107 is_64bit_tdesc (void)
108 {
109 struct regcache *regcache = get_thread_regcache (current_thread, 0);
110
111 return register_size (regcache->tdesc, 0) == 8;
112 }
113
114 /* Return true if the regcache contains the number of SVE registers. */
115
116 static bool
117 is_sve_tdesc (void)
118 {
119 struct regcache *regcache = get_thread_regcache (current_thread, 0);
120
121 return tdesc_contains_feature (regcache->tdesc, "org.gnu.gdb.aarch64.sve");
122 }
123
124 static void
125 aarch64_fill_gregset (struct regcache *regcache, void *buf)
126 {
127 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
128 int i;
129
130 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
131 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
132 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
133 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
134 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
135 }
136
137 static void
138 aarch64_store_gregset (struct regcache *regcache, const void *buf)
139 {
140 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
141 int i;
142
143 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
144 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
145 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
146 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
147 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
148 }
149
150 static void
151 aarch64_fill_fpregset (struct regcache *regcache, void *buf)
152 {
153 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
154 int i;
155
156 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
157 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
158 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
159 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
160 }
161
162 static void
163 aarch64_store_fpregset (struct regcache *regcache, const void *buf)
164 {
165 const struct user_fpsimd_state *regset
166 = (const struct user_fpsimd_state *) buf;
167 int i;
168
169 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
170 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
171 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
172 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
173 }
174
175 /* Store the pauth registers to regcache. */
176
177 static void
178 aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
179 {
180 uint64_t *pauth_regset = (uint64_t *) buf;
181 int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
182
183 if (pauth_base == 0)
184 return;
185
186 supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
187 &pauth_regset[0]);
188 supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
189 &pauth_regset[1]);
190 }
191
192 /* Implementation of linux_target_ops method "get_pc". */
193
194 static CORE_ADDR
195 aarch64_get_pc (struct regcache *regcache)
196 {
197 if (register_size (regcache->tdesc, 0) == 8)
198 return linux_get_pc_64bit (regcache);
199 else
200 return linux_get_pc_32bit (regcache);
201 }
202
203 /* Implementation of linux_target_ops method "set_pc". */
204
205 static void
206 aarch64_set_pc (struct regcache *regcache, CORE_ADDR pc)
207 {
208 if (register_size (regcache->tdesc, 0) == 8)
209 linux_set_pc_64bit (regcache, pc);
210 else
211 linux_set_pc_32bit (regcache, pc);
212 }
213
214 #define aarch64_breakpoint_len 4
215
216 /* AArch64 BRK software debug mode instruction.
217 This instruction needs to match gdb/aarch64-tdep.c
218 (aarch64_default_breakpoint). */
219 static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
220
221 /* Implementation of linux_target_ops method "breakpoint_at". */
222
223 static int
224 aarch64_breakpoint_at (CORE_ADDR where)
225 {
226 if (is_64bit_tdesc ())
227 {
228 gdb_byte insn[aarch64_breakpoint_len];
229
230 the_target->read_memory (where, (unsigned char *) &insn,
231 aarch64_breakpoint_len);
232 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
233 return 1;
234
235 return 0;
236 }
237 else
238 return arm_breakpoint_at (where);
239 }
240
241 static void
242 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
243 {
244 int i;
245
246 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
247 {
248 state->dr_addr_bp[i] = 0;
249 state->dr_ctrl_bp[i] = 0;
250 state->dr_ref_count_bp[i] = 0;
251 }
252
253 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
254 {
255 state->dr_addr_wp[i] = 0;
256 state->dr_ctrl_wp[i] = 0;
257 state->dr_ref_count_wp[i] = 0;
258 }
259 }
260
261 /* Return the pointer to the debug register state structure in the
262 current process' arch-specific data area. */
263
264 struct aarch64_debug_reg_state *
265 aarch64_get_debug_reg_state (pid_t pid)
266 {
267 struct process_info *proc = find_process_pid (pid);
268
269 return &proc->priv->arch_private->debug_reg_state;
270 }
271
272 /* Implementation of linux_target_ops method "supports_z_point_type". */
273
274 static int
275 aarch64_supports_z_point_type (char z_type)
276 {
277 switch (z_type)
278 {
279 case Z_PACKET_SW_BP:
280 case Z_PACKET_HW_BP:
281 case Z_PACKET_WRITE_WP:
282 case Z_PACKET_READ_WP:
283 case Z_PACKET_ACCESS_WP:
284 return 1;
285 default:
286 return 0;
287 }
288 }
289
290 /* Implementation of linux_target_ops method "insert_point".
291
292 It actually only records the info of the to-be-inserted bp/wp;
293 the actual insertion will happen when threads are resumed. */
294
295 static int
296 aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
297 int len, struct raw_breakpoint *bp)
298 {
299 int ret;
300 enum target_hw_bp_type targ_type;
301 struct aarch64_debug_reg_state *state
302 = aarch64_get_debug_reg_state (pid_of (current_thread));
303
304 if (show_debug_regs)
305 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
306 (unsigned long) addr, len);
307
308 /* Determine the type from the raw breakpoint type. */
309 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
310
311 if (targ_type != hw_execute)
312 {
313 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
314 ret = aarch64_handle_watchpoint (targ_type, addr, len,
315 1 /* is_insert */, state);
316 else
317 ret = -1;
318 }
319 else
320 {
321 if (len == 3)
322 {
323 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
324 instruction. Set it to 2 to correctly encode length bit
325 mask in hardware/watchpoint control register. */
326 len = 2;
327 }
328 ret = aarch64_handle_breakpoint (targ_type, addr, len,
329 1 /* is_insert */, state);
330 }
331
332 if (show_debug_regs)
333 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
334 targ_type);
335
336 return ret;
337 }
338
339 /* Implementation of linux_target_ops method "remove_point".
340
341 It actually only records the info of the to-be-removed bp/wp,
342 the actual removal will be done when threads are resumed. */
343
344 static int
345 aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
346 int len, struct raw_breakpoint *bp)
347 {
348 int ret;
349 enum target_hw_bp_type targ_type;
350 struct aarch64_debug_reg_state *state
351 = aarch64_get_debug_reg_state (pid_of (current_thread));
352
353 if (show_debug_regs)
354 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
355 (unsigned long) addr, len);
356
357 /* Determine the type from the raw breakpoint type. */
358 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
359
360 /* Set up state pointers. */
361 if (targ_type != hw_execute)
362 ret =
363 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
364 state);
365 else
366 {
367 if (len == 3)
368 {
369 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
370 instruction. Set it to 2 to correctly encode length bit
371 mask in hardware/watchpoint control register. */
372 len = 2;
373 }
374 ret = aarch64_handle_breakpoint (targ_type, addr, len,
375 0 /* is_insert */, state);
376 }
377
378 if (show_debug_regs)
379 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
380 targ_type);
381
382 return ret;
383 }
384
385 /* Implementation of linux_target_ops method "stopped_data_address". */
386
387 static CORE_ADDR
388 aarch64_stopped_data_address (void)
389 {
390 siginfo_t siginfo;
391 int pid, i;
392 struct aarch64_debug_reg_state *state;
393
394 pid = lwpid_of (current_thread);
395
396 /* Get the siginfo. */
397 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
398 return (CORE_ADDR) 0;
399
400 /* Need to be a hardware breakpoint/watchpoint trap. */
401 if (siginfo.si_signo != SIGTRAP
402 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
403 return (CORE_ADDR) 0;
404
405 /* Check if the address matches any watched address. */
406 state = aarch64_get_debug_reg_state (pid_of (current_thread));
407 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
408 {
409 const unsigned int offset
410 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
411 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
412 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
413 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
414 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
415 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
416
417 if (state->dr_ref_count_wp[i]
418 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
419 && addr_trap >= addr_watch_aligned
420 && addr_trap < addr_watch + len)
421 {
422 /* ADDR_TRAP reports the first address of the memory range
423 accessed by the CPU, regardless of what was the memory
424 range watched. Thus, a large CPU access that straddles
425 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
426 ADDR_TRAP that is lower than the
427 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
428
429 addr: | 4 | 5 | 6 | 7 | 8 |
430 |---- range watched ----|
431 |----------- range accessed ------------|
432
433 In this case, ADDR_TRAP will be 4.
434
435 To match a watchpoint known to GDB core, we must never
436 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
437 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
438 positive on kernels older than 4.10. See PR
439 external/20207. */
440 return addr_orig;
441 }
442 }
443
444 return (CORE_ADDR) 0;
445 }
446
447 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
448
449 static int
450 aarch64_stopped_by_watchpoint (void)
451 {
452 if (aarch64_stopped_data_address () != 0)
453 return 1;
454 else
455 return 0;
456 }
457
458 /* Fetch the thread-local storage pointer for libthread_db. */
459
460 ps_err_e
461 ps_get_thread_area (struct ps_prochandle *ph,
462 lwpid_t lwpid, int idx, void **base)
463 {
464 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
465 is_64bit_tdesc ());
466 }
467
468 /* Implementation of linux_target_ops method "siginfo_fixup". */
469
470 static int
471 aarch64_linux_siginfo_fixup (siginfo_t *native, gdb_byte *inf, int direction)
472 {
473 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
474 if (!is_64bit_tdesc ())
475 {
476 if (direction == 0)
477 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
478 native);
479 else
480 aarch64_siginfo_from_compat_siginfo (native,
481 (struct compat_siginfo *) inf);
482
483 return 1;
484 }
485
486 return 0;
487 }
488
489 /* Implementation of linux_target_ops method "new_process". */
490
491 static struct arch_process_info *
492 aarch64_linux_new_process (void)
493 {
494 struct arch_process_info *info = XCNEW (struct arch_process_info);
495
496 aarch64_init_debug_reg_state (&info->debug_reg_state);
497
498 return info;
499 }
500
501 /* Implementation of linux_target_ops method "delete_process". */
502
503 static void
504 aarch64_linux_delete_process (struct arch_process_info *info)
505 {
506 xfree (info);
507 }
508
509 /* Implementation of linux_target_ops method "linux_new_fork". */
510
511 static void
512 aarch64_linux_new_fork (struct process_info *parent,
513 struct process_info *child)
514 {
515 /* These are allocated by linux_add_process. */
516 gdb_assert (parent->priv != NULL
517 && parent->priv->arch_private != NULL);
518 gdb_assert (child->priv != NULL
519 && child->priv->arch_private != NULL);
520
521 /* Linux kernel before 2.6.33 commit
522 72f674d203cd230426437cdcf7dd6f681dad8b0d
523 will inherit hardware debug registers from parent
524 on fork/vfork/clone. Newer Linux kernels create such tasks with
525 zeroed debug registers.
526
527 GDB core assumes the child inherits the watchpoints/hw
528 breakpoints of the parent, and will remove them all from the
529 forked off process. Copy the debug registers mirrors into the
530 new process so that all breakpoints and watchpoints can be
531 removed together. The debug registers mirror will become zeroed
532 in the end before detaching the forked off process, thus making
533 this compatible with older Linux kernels too. */
534
535 *child->priv->arch_private = *parent->priv->arch_private;
536 }
537
538 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
539 #define AARCH64_HWCAP_PACA (1 << 30)
540
541 /* Implementation of linux target ops method "low_arch_setup". */
542
543 void
544 aarch64_target::low_arch_setup ()
545 {
546 unsigned int machine;
547 int is_elf64;
548 int tid;
549
550 tid = lwpid_of (current_thread);
551
552 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
553
554 if (is_elf64)
555 {
556 uint64_t vq = aarch64_sve_get_vq (tid);
557 unsigned long hwcap = linux_get_hwcap (8);
558 bool pauth_p = hwcap & AARCH64_HWCAP_PACA;
559
560 current_process ()->tdesc = aarch64_linux_read_description (vq, pauth_p);
561 }
562 else
563 current_process ()->tdesc = aarch32_linux_read_description ();
564
565 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
566 }
567
568 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
569
570 static void
571 aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
572 {
573 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
574 }
575
576 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
577
578 static void
579 aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
580 {
581 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
582 }
583
584 static struct regset_info aarch64_regsets[] =
585 {
586 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
587 sizeof (struct user_pt_regs), GENERAL_REGS,
588 aarch64_fill_gregset, aarch64_store_gregset },
589 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
590 sizeof (struct user_fpsimd_state), FP_REGS,
591 aarch64_fill_fpregset, aarch64_store_fpregset
592 },
593 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
594 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
595 NULL, aarch64_store_pauthregset },
596 NULL_REGSET
597 };
598
599 static struct regsets_info aarch64_regsets_info =
600 {
601 aarch64_regsets, /* regsets */
602 0, /* num_regsets */
603 NULL, /* disabled_regsets */
604 };
605
606 static struct regs_info regs_info_aarch64 =
607 {
608 NULL, /* regset_bitmap */
609 NULL, /* usrregs */
610 &aarch64_regsets_info,
611 };
612
613 static struct regset_info aarch64_sve_regsets[] =
614 {
615 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
616 sizeof (struct user_pt_regs), GENERAL_REGS,
617 aarch64_fill_gregset, aarch64_store_gregset },
618 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
619 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
620 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
621 },
622 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
623 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
624 NULL, aarch64_store_pauthregset },
625 NULL_REGSET
626 };
627
628 static struct regsets_info aarch64_sve_regsets_info =
629 {
630 aarch64_sve_regsets, /* regsets. */
631 0, /* num_regsets. */
632 NULL, /* disabled_regsets. */
633 };
634
635 static struct regs_info regs_info_aarch64_sve =
636 {
637 NULL, /* regset_bitmap. */
638 NULL, /* usrregs. */
639 &aarch64_sve_regsets_info,
640 };
641
642 /* Implementation of linux target ops method "get_regs_info". */
643
644 const regs_info *
645 aarch64_target::get_regs_info ()
646 {
647 if (!is_64bit_tdesc ())
648 return &regs_info_aarch32;
649
650 if (is_sve_tdesc ())
651 return &regs_info_aarch64_sve;
652
653 return &regs_info_aarch64;
654 }
655
656 /* Implementation of linux_target_ops method "supports_tracepoints". */
657
658 static int
659 aarch64_supports_tracepoints (void)
660 {
661 if (current_thread == NULL)
662 return 1;
663 else
664 {
665 /* We don't support tracepoints on aarch32 now. */
666 return is_64bit_tdesc ();
667 }
668 }
669
670 /* Implementation of linux_target_ops method "get_thread_area". */
671
672 static int
673 aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
674 {
675 struct iovec iovec;
676 uint64_t reg;
677
678 iovec.iov_base = &reg;
679 iovec.iov_len = sizeof (reg);
680
681 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
682 return -1;
683
684 *addrp = reg;
685
686 return 0;
687 }
688
689 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
690
691 static void
692 aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
693 {
694 int use_64bit = register_size (regcache->tdesc, 0) == 8;
695
696 if (use_64bit)
697 {
698 long l_sysno;
699
700 collect_register_by_name (regcache, "x8", &l_sysno);
701 *sysno = (int) l_sysno;
702 }
703 else
704 collect_register_by_name (regcache, "r7", sysno);
705 }
706
707 /* List of condition codes that we need. */
708
709 enum aarch64_condition_codes
710 {
711 EQ = 0x0,
712 NE = 0x1,
713 LO = 0x3,
714 GE = 0xa,
715 LT = 0xb,
716 GT = 0xc,
717 LE = 0xd,
718 };
719
720 enum aarch64_operand_type
721 {
722 OPERAND_IMMEDIATE,
723 OPERAND_REGISTER,
724 };
725
726 /* Representation of an operand. At this time, it only supports register
727 and immediate types. */
728
729 struct aarch64_operand
730 {
731 /* Type of the operand. */
732 enum aarch64_operand_type type;
733
734 /* Value of the operand according to the type. */
735 union
736 {
737 uint32_t imm;
738 struct aarch64_register reg;
739 };
740 };
741
742 /* List of registers that we are currently using, we can add more here as
743 we need to use them. */
744
745 /* General purpose scratch registers (64 bit). */
746 static const struct aarch64_register x0 = { 0, 1 };
747 static const struct aarch64_register x1 = { 1, 1 };
748 static const struct aarch64_register x2 = { 2, 1 };
749 static const struct aarch64_register x3 = { 3, 1 };
750 static const struct aarch64_register x4 = { 4, 1 };
751
752 /* General purpose scratch registers (32 bit). */
753 static const struct aarch64_register w0 = { 0, 0 };
754 static const struct aarch64_register w2 = { 2, 0 };
755
756 /* Intra-procedure scratch registers. */
757 static const struct aarch64_register ip0 = { 16, 1 };
758
759 /* Special purpose registers. */
760 static const struct aarch64_register fp = { 29, 1 };
761 static const struct aarch64_register lr = { 30, 1 };
762 static const struct aarch64_register sp = { 31, 1 };
763 static const struct aarch64_register xzr = { 31, 1 };
764
765 /* Dynamically allocate a new register. If we know the register
766 statically, we should make it a global as above instead of using this
767 helper function. */
768
769 static struct aarch64_register
770 aarch64_register (unsigned num, int is64)
771 {
772 return (struct aarch64_register) { num, is64 };
773 }
774
775 /* Helper function to create a register operand, for instructions with
776 different types of operands.
777
778 For example:
779 p += emit_mov (p, x0, register_operand (x1)); */
780
781 static struct aarch64_operand
782 register_operand (struct aarch64_register reg)
783 {
784 struct aarch64_operand operand;
785
786 operand.type = OPERAND_REGISTER;
787 operand.reg = reg;
788
789 return operand;
790 }
791
792 /* Helper function to create an immediate operand, for instructions with
793 different types of operands.
794
795 For example:
796 p += emit_mov (p, x0, immediate_operand (12)); */
797
798 static struct aarch64_operand
799 immediate_operand (uint32_t imm)
800 {
801 struct aarch64_operand operand;
802
803 operand.type = OPERAND_IMMEDIATE;
804 operand.imm = imm;
805
806 return operand;
807 }
808
809 /* Helper function to create an offset memory operand.
810
811 For example:
812 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
813
814 static struct aarch64_memory_operand
815 offset_memory_operand (int32_t offset)
816 {
817 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
818 }
819
820 /* Helper function to create a pre-index memory operand.
821
822 For example:
823 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
824
825 static struct aarch64_memory_operand
826 preindex_memory_operand (int32_t index)
827 {
828 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
829 }
830
831 /* Helper function to create a post-index memory operand.
832
833 For example:
834 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
835
836 static struct aarch64_memory_operand
837 postindex_memory_operand (int32_t index)
838 {
839 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
840 }
841
842 /* System control registers. These special registers can be written and
843 read with the MRS and MSR instructions.
844
845 - NZCV: Condition flags. GDB refers to this register under the CPSR
846 name.
847 - FPSR: Floating-point status register.
848 - FPCR: Floating-point control registers.
849 - TPIDR_EL0: Software thread ID register. */
850
851 enum aarch64_system_control_registers
852 {
853 /* op0 op1 crn crm op2 */
854 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
855 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
856 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
857 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
858 };
859
860 /* Write a BLR instruction into *BUF.
861
862 BLR rn
863
864 RN is the register to branch to. */
865
866 static int
867 emit_blr (uint32_t *buf, struct aarch64_register rn)
868 {
869 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
870 }
871
872 /* Write a RET instruction into *BUF.
873
874 RET xn
875
876 RN is the register to branch to. */
877
878 static int
879 emit_ret (uint32_t *buf, struct aarch64_register rn)
880 {
881 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
882 }
883
884 static int
885 emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
886 struct aarch64_register rt,
887 struct aarch64_register rt2,
888 struct aarch64_register rn,
889 struct aarch64_memory_operand operand)
890 {
891 uint32_t opc;
892 uint32_t pre_index;
893 uint32_t write_back;
894
895 if (rt.is64)
896 opc = ENCODE (2, 2, 30);
897 else
898 opc = ENCODE (0, 2, 30);
899
900 switch (operand.type)
901 {
902 case MEMORY_OPERAND_OFFSET:
903 {
904 pre_index = ENCODE (1, 1, 24);
905 write_back = ENCODE (0, 1, 23);
906 break;
907 }
908 case MEMORY_OPERAND_POSTINDEX:
909 {
910 pre_index = ENCODE (0, 1, 24);
911 write_back = ENCODE (1, 1, 23);
912 break;
913 }
914 case MEMORY_OPERAND_PREINDEX:
915 {
916 pre_index = ENCODE (1, 1, 24);
917 write_back = ENCODE (1, 1, 23);
918 break;
919 }
920 default:
921 return 0;
922 }
923
924 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
925 | ENCODE (operand.index >> 3, 7, 15)
926 | ENCODE (rt2.num, 5, 10)
927 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
928 }
929
930 /* Write a STP instruction into *BUF.
931
932 STP rt, rt2, [rn, #offset]
933 STP rt, rt2, [rn, #index]!
934 STP rt, rt2, [rn], #index
935
936 RT and RT2 are the registers to store.
937 RN is the base address register.
938 OFFSET is the immediate to add to the base address. It is limited to a
939 -512 .. 504 range (7 bits << 3). */
940
941 static int
942 emit_stp (uint32_t *buf, struct aarch64_register rt,
943 struct aarch64_register rt2, struct aarch64_register rn,
944 struct aarch64_memory_operand operand)
945 {
946 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
947 }
948
949 /* Write a LDP instruction into *BUF.
950
951 LDP rt, rt2, [rn, #offset]
952 LDP rt, rt2, [rn, #index]!
953 LDP rt, rt2, [rn], #index
954
955 RT and RT2 are the registers to store.
956 RN is the base address register.
957 OFFSET is the immediate to add to the base address. It is limited to a
958 -512 .. 504 range (7 bits << 3). */
959
960 static int
961 emit_ldp (uint32_t *buf, struct aarch64_register rt,
962 struct aarch64_register rt2, struct aarch64_register rn,
963 struct aarch64_memory_operand operand)
964 {
965 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
966 }
967
968 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
969
970 LDP qt, qt2, [rn, #offset]
971
972 RT and RT2 are the Q registers to store.
973 RN is the base address register.
974 OFFSET is the immediate to add to the base address. It is limited to
975 -1024 .. 1008 range (7 bits << 4). */
976
977 static int
978 emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
979 struct aarch64_register rn, int32_t offset)
980 {
981 uint32_t opc = ENCODE (2, 2, 30);
982 uint32_t pre_index = ENCODE (1, 1, 24);
983
984 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
985 | ENCODE (offset >> 4, 7, 15)
986 | ENCODE (rt2, 5, 10)
987 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
988 }
989
990 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
991
992 STP qt, qt2, [rn, #offset]
993
994 RT and RT2 are the Q registers to store.
995 RN is the base address register.
996 OFFSET is the immediate to add to the base address. It is limited to
997 -1024 .. 1008 range (7 bits << 4). */
998
999 static int
1000 emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1001 struct aarch64_register rn, int32_t offset)
1002 {
1003 uint32_t opc = ENCODE (2, 2, 30);
1004 uint32_t pre_index = ENCODE (1, 1, 24);
1005
1006 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
1007 | ENCODE (offset >> 4, 7, 15)
1008 | ENCODE (rt2, 5, 10)
1009 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
1010 }
1011
1012 /* Write a LDRH instruction into *BUF.
1013
1014 LDRH wt, [xn, #offset]
1015 LDRH wt, [xn, #index]!
1016 LDRH wt, [xn], #index
1017
1018 RT is the register to store.
1019 RN is the base address register.
1020 OFFSET is the immediate to add to the base address. It is limited to
1021 0 .. 32760 range (12 bits << 3). */
1022
1023 static int
1024 emit_ldrh (uint32_t *buf, struct aarch64_register rt,
1025 struct aarch64_register rn,
1026 struct aarch64_memory_operand operand)
1027 {
1028 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
1029 }
1030
1031 /* Write a LDRB instruction into *BUF.
1032
1033 LDRB wt, [xn, #offset]
1034 LDRB wt, [xn, #index]!
1035 LDRB wt, [xn], #index
1036
1037 RT is the register to store.
1038 RN is the base address register.
1039 OFFSET is the immediate to add to the base address. It is limited to
1040 0 .. 32760 range (12 bits << 3). */
1041
1042 static int
1043 emit_ldrb (uint32_t *buf, struct aarch64_register rt,
1044 struct aarch64_register rn,
1045 struct aarch64_memory_operand operand)
1046 {
1047 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
1048 }
1049
1050
1051
1052 /* Write a STR instruction into *BUF.
1053
1054 STR rt, [rn, #offset]
1055 STR rt, [rn, #index]!
1056 STR rt, [rn], #index
1057
1058 RT is the register to store.
1059 RN is the base address register.
1060 OFFSET is the immediate to add to the base address. It is limited to
1061 0 .. 32760 range (12 bits << 3). */
1062
1063 static int
1064 emit_str (uint32_t *buf, struct aarch64_register rt,
1065 struct aarch64_register rn,
1066 struct aarch64_memory_operand operand)
1067 {
1068 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
1069 }
1070
1071 /* Helper function emitting an exclusive load or store instruction. */
1072
1073 static int
1074 emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1075 enum aarch64_opcodes opcode,
1076 struct aarch64_register rs,
1077 struct aarch64_register rt,
1078 struct aarch64_register rt2,
1079 struct aarch64_register rn)
1080 {
1081 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1082 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1083 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
1084 }
1085
1086 /* Write a LAXR instruction into *BUF.
1087
1088 LDAXR rt, [xn]
1089
1090 RT is the destination register.
1091 RN is the base address register. */
1092
1093 static int
1094 emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1095 struct aarch64_register rn)
1096 {
1097 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1098 xzr, rn);
1099 }
1100
1101 /* Write a STXR instruction into *BUF.
1102
1103 STXR ws, rt, [xn]
1104
1105 RS is the result register, it indicates if the store succeeded or not.
1106 RT is the destination register.
1107 RN is the base address register. */
1108
1109 static int
1110 emit_stxr (uint32_t *buf, struct aarch64_register rs,
1111 struct aarch64_register rt, struct aarch64_register rn)
1112 {
1113 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1114 xzr, rn);
1115 }
1116
1117 /* Write a STLR instruction into *BUF.
1118
1119 STLR rt, [xn]
1120
1121 RT is the register to store.
1122 RN is the base address register. */
1123
1124 static int
1125 emit_stlr (uint32_t *buf, struct aarch64_register rt,
1126 struct aarch64_register rn)
1127 {
1128 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1129 xzr, rn);
1130 }
1131
1132 /* Helper function for data processing instructions with register sources. */
1133
1134 static int
1135 emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
1136 struct aarch64_register rd,
1137 struct aarch64_register rn,
1138 struct aarch64_register rm)
1139 {
1140 uint32_t size = ENCODE (rd.is64, 1, 31);
1141
1142 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1143 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
1144 }
1145
1146 /* Helper function for data processing instructions taking either a register
1147 or an immediate. */
1148
1149 static int
1150 emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1151 struct aarch64_register rd,
1152 struct aarch64_register rn,
1153 struct aarch64_operand operand)
1154 {
1155 uint32_t size = ENCODE (rd.is64, 1, 31);
1156 /* The opcode is different for register and immediate source operands. */
1157 uint32_t operand_opcode;
1158
1159 if (operand.type == OPERAND_IMMEDIATE)
1160 {
1161 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1162 operand_opcode = ENCODE (8, 4, 25);
1163
1164 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1165 | ENCODE (operand.imm, 12, 10)
1166 | ENCODE (rn.num, 5, 5)
1167 | ENCODE (rd.num, 5, 0));
1168 }
1169 else
1170 {
1171 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1172 operand_opcode = ENCODE (5, 4, 25);
1173
1174 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1175 rn, operand.reg);
1176 }
1177 }
1178
1179 /* Write an ADD instruction into *BUF.
1180
1181 ADD rd, rn, #imm
1182 ADD rd, rn, rm
1183
1184 This function handles both an immediate and register add.
1185
1186 RD is the destination register.
1187 RN is the input register.
1188 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1189 OPERAND_REGISTER. */
1190
1191 static int
1192 emit_add (uint32_t *buf, struct aarch64_register rd,
1193 struct aarch64_register rn, struct aarch64_operand operand)
1194 {
1195 return emit_data_processing (buf, ADD, rd, rn, operand);
1196 }
1197
1198 /* Write a SUB instruction into *BUF.
1199
1200 SUB rd, rn, #imm
1201 SUB rd, rn, rm
1202
1203 This function handles both an immediate and register sub.
1204
1205 RD is the destination register.
1206 RN is the input register.
1207 IMM is the immediate to substract to RN. */
1208
1209 static int
1210 emit_sub (uint32_t *buf, struct aarch64_register rd,
1211 struct aarch64_register rn, struct aarch64_operand operand)
1212 {
1213 return emit_data_processing (buf, SUB, rd, rn, operand);
1214 }
1215
1216 /* Write a MOV instruction into *BUF.
1217
1218 MOV rd, #imm
1219 MOV rd, rm
1220
1221 This function handles both a wide immediate move and a register move,
1222 with the condition that the source register is not xzr. xzr and the
1223 stack pointer share the same encoding and this function only supports
1224 the stack pointer.
1225
1226 RD is the destination register.
1227 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1228 OPERAND_REGISTER. */
1229
1230 static int
1231 emit_mov (uint32_t *buf, struct aarch64_register rd,
1232 struct aarch64_operand operand)
1233 {
1234 if (operand.type == OPERAND_IMMEDIATE)
1235 {
1236 uint32_t size = ENCODE (rd.is64, 1, 31);
1237 /* Do not shift the immediate. */
1238 uint32_t shift = ENCODE (0, 2, 21);
1239
1240 return aarch64_emit_insn (buf, MOV | size | shift
1241 | ENCODE (operand.imm, 16, 5)
1242 | ENCODE (rd.num, 5, 0));
1243 }
1244 else
1245 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1246 }
1247
1248 /* Write a MOVK instruction into *BUF.
1249
1250 MOVK rd, #imm, lsl #shift
1251
1252 RD is the destination register.
1253 IMM is the immediate.
1254 SHIFT is the logical shift left to apply to IMM. */
1255
1256 static int
1257 emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1258 unsigned shift)
1259 {
1260 uint32_t size = ENCODE (rd.is64, 1, 31);
1261
1262 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1263 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
1264 }
1265
1266 /* Write instructions into *BUF in order to move ADDR into a register.
1267 ADDR can be a 64-bit value.
1268
1269 This function will emit a series of MOV and MOVK instructions, such as:
1270
1271 MOV xd, #(addr)
1272 MOVK xd, #(addr >> 16), lsl #16
1273 MOVK xd, #(addr >> 32), lsl #32
1274 MOVK xd, #(addr >> 48), lsl #48 */
1275
1276 static int
1277 emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1278 {
1279 uint32_t *p = buf;
1280
1281 /* The MOV (wide immediate) instruction clears to top bits of the
1282 register. */
1283 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1284
1285 if ((addr >> 16) != 0)
1286 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1287 else
1288 return p - buf;
1289
1290 if ((addr >> 32) != 0)
1291 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1292 else
1293 return p - buf;
1294
1295 if ((addr >> 48) != 0)
1296 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1297
1298 return p - buf;
1299 }
1300
1301 /* Write a SUBS instruction into *BUF.
1302
1303 SUBS rd, rn, rm
1304
1305 This instruction update the condition flags.
1306
1307 RD is the destination register.
1308 RN and RM are the source registers. */
1309
1310 static int
1311 emit_subs (uint32_t *buf, struct aarch64_register rd,
1312 struct aarch64_register rn, struct aarch64_operand operand)
1313 {
1314 return emit_data_processing (buf, SUBS, rd, rn, operand);
1315 }
1316
1317 /* Write a CMP instruction into *BUF.
1318
1319 CMP rn, rm
1320
1321 This instruction is an alias of SUBS xzr, rn, rm.
1322
1323 RN and RM are the registers to compare. */
1324
1325 static int
1326 emit_cmp (uint32_t *buf, struct aarch64_register rn,
1327 struct aarch64_operand operand)
1328 {
1329 return emit_subs (buf, xzr, rn, operand);
1330 }
1331
1332 /* Write a AND instruction into *BUF.
1333
1334 AND rd, rn, rm
1335
1336 RD is the destination register.
1337 RN and RM are the source registers. */
1338
1339 static int
1340 emit_and (uint32_t *buf, struct aarch64_register rd,
1341 struct aarch64_register rn, struct aarch64_register rm)
1342 {
1343 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1344 }
1345
1346 /* Write a ORR instruction into *BUF.
1347
1348 ORR rd, rn, rm
1349
1350 RD is the destination register.
1351 RN and RM are the source registers. */
1352
1353 static int
1354 emit_orr (uint32_t *buf, struct aarch64_register rd,
1355 struct aarch64_register rn, struct aarch64_register rm)
1356 {
1357 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1358 }
1359
1360 /* Write a ORN instruction into *BUF.
1361
1362 ORN rd, rn, rm
1363
1364 RD is the destination register.
1365 RN and RM are the source registers. */
1366
1367 static int
1368 emit_orn (uint32_t *buf, struct aarch64_register rd,
1369 struct aarch64_register rn, struct aarch64_register rm)
1370 {
1371 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1372 }
1373
1374 /* Write a EOR instruction into *BUF.
1375
1376 EOR rd, rn, rm
1377
1378 RD is the destination register.
1379 RN and RM are the source registers. */
1380
1381 static int
1382 emit_eor (uint32_t *buf, struct aarch64_register rd,
1383 struct aarch64_register rn, struct aarch64_register rm)
1384 {
1385 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1386 }
1387
1388 /* Write a MVN instruction into *BUF.
1389
1390 MVN rd, rm
1391
1392 This is an alias for ORN rd, xzr, rm.
1393
1394 RD is the destination register.
1395 RM is the source register. */
1396
1397 static int
1398 emit_mvn (uint32_t *buf, struct aarch64_register rd,
1399 struct aarch64_register rm)
1400 {
1401 return emit_orn (buf, rd, xzr, rm);
1402 }
1403
1404 /* Write a LSLV instruction into *BUF.
1405
1406 LSLV rd, rn, rm
1407
1408 RD is the destination register.
1409 RN and RM are the source registers. */
1410
1411 static int
1412 emit_lslv (uint32_t *buf, struct aarch64_register rd,
1413 struct aarch64_register rn, struct aarch64_register rm)
1414 {
1415 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1416 }
1417
1418 /* Write a LSRV instruction into *BUF.
1419
1420 LSRV rd, rn, rm
1421
1422 RD is the destination register.
1423 RN and RM are the source registers. */
1424
1425 static int
1426 emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1427 struct aarch64_register rn, struct aarch64_register rm)
1428 {
1429 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1430 }
1431
1432 /* Write a ASRV instruction into *BUF.
1433
1434 ASRV rd, rn, rm
1435
1436 RD is the destination register.
1437 RN and RM are the source registers. */
1438
1439 static int
1440 emit_asrv (uint32_t *buf, struct aarch64_register rd,
1441 struct aarch64_register rn, struct aarch64_register rm)
1442 {
1443 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1444 }
1445
1446 /* Write a MUL instruction into *BUF.
1447
1448 MUL rd, rn, rm
1449
1450 RD is the destination register.
1451 RN and RM are the source registers. */
1452
1453 static int
1454 emit_mul (uint32_t *buf, struct aarch64_register rd,
1455 struct aarch64_register rn, struct aarch64_register rm)
1456 {
1457 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1458 }
1459
1460 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1461
1462 MRS xt, system_reg
1463
1464 RT is the destination register.
1465 SYSTEM_REG is special purpose register to read. */
1466
1467 static int
1468 emit_mrs (uint32_t *buf, struct aarch64_register rt,
1469 enum aarch64_system_control_registers system_reg)
1470 {
1471 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1472 | ENCODE (rt.num, 5, 0));
1473 }
1474
1475 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1476
1477 MSR system_reg, xt
1478
1479 SYSTEM_REG is special purpose register to write.
1480 RT is the input register. */
1481
1482 static int
1483 emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1484 struct aarch64_register rt)
1485 {
1486 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1487 | ENCODE (rt.num, 5, 0));
1488 }
1489
1490 /* Write a SEVL instruction into *BUF.
1491
1492 This is a hint instruction telling the hardware to trigger an event. */
1493
1494 static int
1495 emit_sevl (uint32_t *buf)
1496 {
1497 return aarch64_emit_insn (buf, SEVL);
1498 }
1499
1500 /* Write a WFE instruction into *BUF.
1501
1502 This is a hint instruction telling the hardware to wait for an event. */
1503
1504 static int
1505 emit_wfe (uint32_t *buf)
1506 {
1507 return aarch64_emit_insn (buf, WFE);
1508 }
1509
1510 /* Write a SBFM instruction into *BUF.
1511
1512 SBFM rd, rn, #immr, #imms
1513
1514 This instruction moves the bits from #immr to #imms into the
1515 destination, sign extending the result.
1516
1517 RD is the destination register.
1518 RN is the source register.
1519 IMMR is the bit number to start at (least significant bit).
1520 IMMS is the bit number to stop at (most significant bit). */
1521
1522 static int
1523 emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1524 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1525 {
1526 uint32_t size = ENCODE (rd.is64, 1, 31);
1527 uint32_t n = ENCODE (rd.is64, 1, 22);
1528
1529 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1530 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1531 | ENCODE (rd.num, 5, 0));
1532 }
1533
1534 /* Write a SBFX instruction into *BUF.
1535
1536 SBFX rd, rn, #lsb, #width
1537
1538 This instruction moves #width bits from #lsb into the destination, sign
1539 extending the result. This is an alias for:
1540
1541 SBFM rd, rn, #lsb, #(lsb + width - 1)
1542
1543 RD is the destination register.
1544 RN is the source register.
1545 LSB is the bit number to start at (least significant bit).
1546 WIDTH is the number of bits to move. */
1547
1548 static int
1549 emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1550 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1551 {
1552 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1553 }
1554
1555 /* Write a UBFM instruction into *BUF.
1556
1557 UBFM rd, rn, #immr, #imms
1558
1559 This instruction moves the bits from #immr to #imms into the
1560 destination, extending the result with zeros.
1561
1562 RD is the destination register.
1563 RN is the source register.
1564 IMMR is the bit number to start at (least significant bit).
1565 IMMS is the bit number to stop at (most significant bit). */
1566
1567 static int
1568 emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1569 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1570 {
1571 uint32_t size = ENCODE (rd.is64, 1, 31);
1572 uint32_t n = ENCODE (rd.is64, 1, 22);
1573
1574 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1575 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1576 | ENCODE (rd.num, 5, 0));
1577 }
1578
1579 /* Write a UBFX instruction into *BUF.
1580
1581 UBFX rd, rn, #lsb, #width
1582
1583 This instruction moves #width bits from #lsb into the destination,
1584 extending the result with zeros. This is an alias for:
1585
1586 UBFM rd, rn, #lsb, #(lsb + width - 1)
1587
1588 RD is the destination register.
1589 RN is the source register.
1590 LSB is the bit number to start at (least significant bit).
1591 WIDTH is the number of bits to move. */
1592
1593 static int
1594 emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1595 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1596 {
1597 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1598 }
1599
1600 /* Write a CSINC instruction into *BUF.
1601
1602 CSINC rd, rn, rm, cond
1603
1604 This instruction conditionally increments rn or rm and places the result
1605 in rd. rn is chosen is the condition is true.
1606
1607 RD is the destination register.
1608 RN and RM are the source registers.
1609 COND is the encoded condition. */
1610
1611 static int
1612 emit_csinc (uint32_t *buf, struct aarch64_register rd,
1613 struct aarch64_register rn, struct aarch64_register rm,
1614 unsigned cond)
1615 {
1616 uint32_t size = ENCODE (rd.is64, 1, 31);
1617
1618 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1619 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1620 | ENCODE (rd.num, 5, 0));
1621 }
1622
1623 /* Write a CSET instruction into *BUF.
1624
1625 CSET rd, cond
1626
1627 This instruction conditionally write 1 or 0 in the destination register.
1628 1 is written if the condition is true. This is an alias for:
1629
1630 CSINC rd, xzr, xzr, !cond
1631
1632 Note that the condition needs to be inverted.
1633
1634 RD is the destination register.
1635 RN and RM are the source registers.
1636 COND is the encoded condition. */
1637
1638 static int
1639 emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1640 {
1641 /* The least significant bit of the condition needs toggling in order to
1642 invert it. */
1643 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1644 }
1645
1646 /* Write LEN instructions from BUF into the inferior memory at *TO.
1647
1648 Note instructions are always little endian on AArch64, unlike data. */
1649
1650 static void
1651 append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1652 {
1653 size_t byte_len = len * sizeof (uint32_t);
1654 #if (__BYTE_ORDER == __BIG_ENDIAN)
1655 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
1656 size_t i;
1657
1658 for (i = 0; i < len; i++)
1659 le_buf[i] = htole32 (buf[i]);
1660
1661 target_write_memory (*to, (const unsigned char *) le_buf, byte_len);
1662
1663 xfree (le_buf);
1664 #else
1665 target_write_memory (*to, (const unsigned char *) buf, byte_len);
1666 #endif
1667
1668 *to += byte_len;
1669 }
1670
1671 /* Sub-class of struct aarch64_insn_data, store information of
1672 instruction relocation for fast tracepoint. Visitor can
1673 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1674 the relocated instructions in buffer pointed by INSN_PTR. */
1675
1676 struct aarch64_insn_relocation_data
1677 {
1678 struct aarch64_insn_data base;
1679
1680 /* The new address the instruction is relocated to. */
1681 CORE_ADDR new_addr;
1682 /* Pointer to the buffer of relocated instruction(s). */
1683 uint32_t *insn_ptr;
1684 };
1685
1686 /* Implementation of aarch64_insn_visitor method "b". */
1687
1688 static void
1689 aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1690 struct aarch64_insn_data *data)
1691 {
1692 struct aarch64_insn_relocation_data *insn_reloc
1693 = (struct aarch64_insn_relocation_data *) data;
1694 int64_t new_offset
1695 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1696
1697 if (can_encode_int32 (new_offset, 28))
1698 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1699 }
1700
1701 /* Implementation of aarch64_insn_visitor method "b_cond". */
1702
1703 static void
1704 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1705 struct aarch64_insn_data *data)
1706 {
1707 struct aarch64_insn_relocation_data *insn_reloc
1708 = (struct aarch64_insn_relocation_data *) data;
1709 int64_t new_offset
1710 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1711
1712 if (can_encode_int32 (new_offset, 21))
1713 {
1714 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1715 new_offset);
1716 }
1717 else if (can_encode_int32 (new_offset, 28))
1718 {
1719 /* The offset is out of range for a conditional branch
1720 instruction but not for a unconditional branch. We can use
1721 the following instructions instead:
1722
1723 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1724 B NOT_TAKEN ; Else jump over TAKEN and continue.
1725 TAKEN:
1726 B #(offset - 8)
1727 NOT_TAKEN:
1728
1729 */
1730
1731 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1732 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1733 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1734 }
1735 }
1736
1737 /* Implementation of aarch64_insn_visitor method "cb". */
1738
1739 static void
1740 aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1741 const unsigned rn, int is64,
1742 struct aarch64_insn_data *data)
1743 {
1744 struct aarch64_insn_relocation_data *insn_reloc
1745 = (struct aarch64_insn_relocation_data *) data;
1746 int64_t new_offset
1747 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1748
1749 if (can_encode_int32 (new_offset, 21))
1750 {
1751 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1752 aarch64_register (rn, is64), new_offset);
1753 }
1754 else if (can_encode_int32 (new_offset, 28))
1755 {
1756 /* The offset is out of range for a compare and branch
1757 instruction but not for a unconditional branch. We can use
1758 the following instructions instead:
1759
1760 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1761 B NOT_TAKEN ; Else jump over TAKEN and continue.
1762 TAKEN:
1763 B #(offset - 8)
1764 NOT_TAKEN:
1765
1766 */
1767 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1768 aarch64_register (rn, is64), 8);
1769 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1770 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1771 }
1772 }
1773
1774 /* Implementation of aarch64_insn_visitor method "tb". */
1775
1776 static void
1777 aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1778 const unsigned rt, unsigned bit,
1779 struct aarch64_insn_data *data)
1780 {
1781 struct aarch64_insn_relocation_data *insn_reloc
1782 = (struct aarch64_insn_relocation_data *) data;
1783 int64_t new_offset
1784 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1785
1786 if (can_encode_int32 (new_offset, 16))
1787 {
1788 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1789 aarch64_register (rt, 1), new_offset);
1790 }
1791 else if (can_encode_int32 (new_offset, 28))
1792 {
1793 /* The offset is out of range for a test bit and branch
1794 instruction but not for a unconditional branch. We can use
1795 the following instructions instead:
1796
1797 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1798 B NOT_TAKEN ; Else jump over TAKEN and continue.
1799 TAKEN:
1800 B #(offset - 8)
1801 NOT_TAKEN:
1802
1803 */
1804 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1805 aarch64_register (rt, 1), 8);
1806 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1807 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1808 new_offset - 8);
1809 }
1810 }
1811
1812 /* Implementation of aarch64_insn_visitor method "adr". */
1813
1814 static void
1815 aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1816 const int is_adrp,
1817 struct aarch64_insn_data *data)
1818 {
1819 struct aarch64_insn_relocation_data *insn_reloc
1820 = (struct aarch64_insn_relocation_data *) data;
1821 /* We know exactly the address the ADR{P,} instruction will compute.
1822 We can just write it to the destination register. */
1823 CORE_ADDR address = data->insn_addr + offset;
1824
1825 if (is_adrp)
1826 {
1827 /* Clear the lower 12 bits of the offset to get the 4K page. */
1828 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1829 aarch64_register (rd, 1),
1830 address & ~0xfff);
1831 }
1832 else
1833 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1834 aarch64_register (rd, 1), address);
1835 }
1836
1837 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1838
1839 static void
1840 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1841 const unsigned rt, const int is64,
1842 struct aarch64_insn_data *data)
1843 {
1844 struct aarch64_insn_relocation_data *insn_reloc
1845 = (struct aarch64_insn_relocation_data *) data;
1846 CORE_ADDR address = data->insn_addr + offset;
1847
1848 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1849 aarch64_register (rt, 1), address);
1850
1851 /* We know exactly what address to load from, and what register we
1852 can use:
1853
1854 MOV xd, #(oldloc + offset)
1855 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1856 ...
1857
1858 LDR xd, [xd] ; or LDRSW xd, [xd]
1859
1860 */
1861
1862 if (is_sw)
1863 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1864 aarch64_register (rt, 1),
1865 aarch64_register (rt, 1),
1866 offset_memory_operand (0));
1867 else
1868 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1869 aarch64_register (rt, is64),
1870 aarch64_register (rt, 1),
1871 offset_memory_operand (0));
1872 }
1873
1874 /* Implementation of aarch64_insn_visitor method "others". */
1875
1876 static void
1877 aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1878 struct aarch64_insn_data *data)
1879 {
1880 struct aarch64_insn_relocation_data *insn_reloc
1881 = (struct aarch64_insn_relocation_data *) data;
1882
1883 /* The instruction is not PC relative. Just re-emit it at the new
1884 location. */
1885 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
1886 }
1887
1888 static const struct aarch64_insn_visitor visitor =
1889 {
1890 aarch64_ftrace_insn_reloc_b,
1891 aarch64_ftrace_insn_reloc_b_cond,
1892 aarch64_ftrace_insn_reloc_cb,
1893 aarch64_ftrace_insn_reloc_tb,
1894 aarch64_ftrace_insn_reloc_adr,
1895 aarch64_ftrace_insn_reloc_ldr_literal,
1896 aarch64_ftrace_insn_reloc_others,
1897 };
1898
1899 /* Implementation of linux_target_ops method
1900 "install_fast_tracepoint_jump_pad". */
1901
1902 static int
1903 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1904 CORE_ADDR tpaddr,
1905 CORE_ADDR collector,
1906 CORE_ADDR lockaddr,
1907 ULONGEST orig_size,
1908 CORE_ADDR *jump_entry,
1909 CORE_ADDR *trampoline,
1910 ULONGEST *trampoline_size,
1911 unsigned char *jjump_pad_insn,
1912 ULONGEST *jjump_pad_insn_size,
1913 CORE_ADDR *adjusted_insn_addr,
1914 CORE_ADDR *adjusted_insn_addr_end,
1915 char *err)
1916 {
1917 uint32_t buf[256];
1918 uint32_t *p = buf;
1919 int64_t offset;
1920 int i;
1921 uint32_t insn;
1922 CORE_ADDR buildaddr = *jump_entry;
1923 struct aarch64_insn_relocation_data insn_data;
1924
1925 /* We need to save the current state on the stack both to restore it
1926 later and to collect register values when the tracepoint is hit.
1927
1928 The saved registers are pushed in a layout that needs to be in sync
1929 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1930 the supply_fast_tracepoint_registers function will fill in the
1931 register cache from a pointer to saved registers on the stack we build
1932 here.
1933
1934 For simplicity, we set the size of each cell on the stack to 16 bytes.
1935 This way one cell can hold any register type, from system registers
1936 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1937 has to be 16 bytes aligned anyway.
1938
1939 Note that the CPSR register does not exist on AArch64. Instead we
1940 can access system bits describing the process state with the
1941 MRS/MSR instructions, namely the condition flags. We save them as
1942 if they are part of a CPSR register because that's how GDB
1943 interprets these system bits. At the moment, only the condition
1944 flags are saved in CPSR (NZCV).
1945
1946 Stack layout, each cell is 16 bytes (descending):
1947
1948 High *-------- SIMD&FP registers from 31 down to 0. --------*
1949 | q31 |
1950 . .
1951 . . 32 cells
1952 . .
1953 | q0 |
1954 *---- General purpose registers from 30 down to 0. ----*
1955 | x30 |
1956 . .
1957 . . 31 cells
1958 . .
1959 | x0 |
1960 *------------- Special purpose registers. -------------*
1961 | SP |
1962 | PC |
1963 | CPSR (NZCV) | 5 cells
1964 | FPSR |
1965 | FPCR | <- SP + 16
1966 *------------- collecting_t object --------------------*
1967 | TPIDR_EL0 | struct tracepoint * |
1968 Low *------------------------------------------------------*
1969
1970 After this stack is set up, we issue a call to the collector, passing
1971 it the saved registers at (SP + 16). */
1972
1973 /* Push SIMD&FP registers on the stack:
1974
1975 SUB sp, sp, #(32 * 16)
1976
1977 STP q30, q31, [sp, #(30 * 16)]
1978 ...
1979 STP q0, q1, [sp]
1980
1981 */
1982 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
1983 for (i = 30; i >= 0; i -= 2)
1984 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
1985
1986 /* Push general purpose registers on the stack. Note that we do not need
1987 to push x31 as it represents the xzr register and not the stack
1988 pointer in a STR instruction.
1989
1990 SUB sp, sp, #(31 * 16)
1991
1992 STR x30, [sp, #(30 * 16)]
1993 ...
1994 STR x0, [sp]
1995
1996 */
1997 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
1998 for (i = 30; i >= 0; i -= 1)
1999 p += emit_str (p, aarch64_register (i, 1), sp,
2000 offset_memory_operand (i * 16));
2001
2002 /* Make space for 5 more cells.
2003
2004 SUB sp, sp, #(5 * 16)
2005
2006 */
2007 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
2008
2009
2010 /* Save SP:
2011
2012 ADD x4, sp, #((32 + 31 + 5) * 16)
2013 STR x4, [sp, #(4 * 16)]
2014
2015 */
2016 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
2017 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
2018
2019 /* Save PC (tracepoint address):
2020
2021 MOV x3, #(tpaddr)
2022 ...
2023
2024 STR x3, [sp, #(3 * 16)]
2025
2026 */
2027
2028 p += emit_mov_addr (p, x3, tpaddr);
2029 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
2030
2031 /* Save CPSR (NZCV), FPSR and FPCR:
2032
2033 MRS x2, nzcv
2034 MRS x1, fpsr
2035 MRS x0, fpcr
2036
2037 STR x2, [sp, #(2 * 16)]
2038 STR x1, [sp, #(1 * 16)]
2039 STR x0, [sp, #(0 * 16)]
2040
2041 */
2042 p += emit_mrs (p, x2, NZCV);
2043 p += emit_mrs (p, x1, FPSR);
2044 p += emit_mrs (p, x0, FPCR);
2045 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2046 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2047 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2048
2049 /* Push the collecting_t object. It consist of the address of the
2050 tracepoint and an ID for the current thread. We get the latter by
2051 reading the tpidr_el0 system register. It corresponds to the
2052 NT_ARM_TLS register accessible with ptrace.
2053
2054 MOV x0, #(tpoint)
2055 ...
2056
2057 MRS x1, tpidr_el0
2058
2059 STP x0, x1, [sp, #-16]!
2060
2061 */
2062
2063 p += emit_mov_addr (p, x0, tpoint);
2064 p += emit_mrs (p, x1, TPIDR_EL0);
2065 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2066
2067 /* Spin-lock:
2068
2069 The shared memory for the lock is at lockaddr. It will hold zero
2070 if no-one is holding the lock, otherwise it contains the address of
2071 the collecting_t object on the stack of the thread which acquired it.
2072
2073 At this stage, the stack pointer points to this thread's collecting_t
2074 object.
2075
2076 We use the following registers:
2077 - x0: Address of the lock.
2078 - x1: Pointer to collecting_t object.
2079 - x2: Scratch register.
2080
2081 MOV x0, #(lockaddr)
2082 ...
2083 MOV x1, sp
2084
2085 ; Trigger an event local to this core. So the following WFE
2086 ; instruction is ignored.
2087 SEVL
2088 again:
2089 ; Wait for an event. The event is triggered by either the SEVL
2090 ; or STLR instructions (store release).
2091 WFE
2092
2093 ; Atomically read at lockaddr. This marks the memory location as
2094 ; exclusive. This instruction also has memory constraints which
2095 ; make sure all previous data reads and writes are done before
2096 ; executing it.
2097 LDAXR x2, [x0]
2098
2099 ; Try again if another thread holds the lock.
2100 CBNZ x2, again
2101
2102 ; We can lock it! Write the address of the collecting_t object.
2103 ; This instruction will fail if the memory location is not marked
2104 ; as exclusive anymore. If it succeeds, it will remove the
2105 ; exclusive mark on the memory location. This way, if another
2106 ; thread executes this instruction before us, we will fail and try
2107 ; all over again.
2108 STXR w2, x1, [x0]
2109 CBNZ w2, again
2110
2111 */
2112
2113 p += emit_mov_addr (p, x0, lockaddr);
2114 p += emit_mov (p, x1, register_operand (sp));
2115
2116 p += emit_sevl (p);
2117 p += emit_wfe (p);
2118 p += emit_ldaxr (p, x2, x0);
2119 p += emit_cb (p, 1, w2, -2 * 4);
2120 p += emit_stxr (p, w2, x1, x0);
2121 p += emit_cb (p, 1, x2, -4 * 4);
2122
2123 /* Call collector (struct tracepoint *, unsigned char *):
2124
2125 MOV x0, #(tpoint)
2126 ...
2127
2128 ; Saved registers start after the collecting_t object.
2129 ADD x1, sp, #16
2130
2131 ; We use an intra-procedure-call scratch register.
2132 MOV ip0, #(collector)
2133 ...
2134
2135 ; And call back to C!
2136 BLR ip0
2137
2138 */
2139
2140 p += emit_mov_addr (p, x0, tpoint);
2141 p += emit_add (p, x1, sp, immediate_operand (16));
2142
2143 p += emit_mov_addr (p, ip0, collector);
2144 p += emit_blr (p, ip0);
2145
2146 /* Release the lock.
2147
2148 MOV x0, #(lockaddr)
2149 ...
2150
2151 ; This instruction is a normal store with memory ordering
2152 ; constraints. Thanks to this we do not have to put a data
2153 ; barrier instruction to make sure all data read and writes are done
2154 ; before this instruction is executed. Furthermore, this instruction
2155 ; will trigger an event, letting other threads know they can grab
2156 ; the lock.
2157 STLR xzr, [x0]
2158
2159 */
2160 p += emit_mov_addr (p, x0, lockaddr);
2161 p += emit_stlr (p, xzr, x0);
2162
2163 /* Free collecting_t object:
2164
2165 ADD sp, sp, #16
2166
2167 */
2168 p += emit_add (p, sp, sp, immediate_operand (16));
2169
2170 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2171 registers from the stack.
2172
2173 LDR x2, [sp, #(2 * 16)]
2174 LDR x1, [sp, #(1 * 16)]
2175 LDR x0, [sp, #(0 * 16)]
2176
2177 MSR NZCV, x2
2178 MSR FPSR, x1
2179 MSR FPCR, x0
2180
2181 ADD sp, sp #(5 * 16)
2182
2183 */
2184 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2185 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2186 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2187 p += emit_msr (p, NZCV, x2);
2188 p += emit_msr (p, FPSR, x1);
2189 p += emit_msr (p, FPCR, x0);
2190
2191 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2192
2193 /* Pop general purpose registers:
2194
2195 LDR x0, [sp]
2196 ...
2197 LDR x30, [sp, #(30 * 16)]
2198
2199 ADD sp, sp, #(31 * 16)
2200
2201 */
2202 for (i = 0; i <= 30; i += 1)
2203 p += emit_ldr (p, aarch64_register (i, 1), sp,
2204 offset_memory_operand (i * 16));
2205 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2206
2207 /* Pop SIMD&FP registers:
2208
2209 LDP q0, q1, [sp]
2210 ...
2211 LDP q30, q31, [sp, #(30 * 16)]
2212
2213 ADD sp, sp, #(32 * 16)
2214
2215 */
2216 for (i = 0; i <= 30; i += 2)
2217 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2218 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2219
2220 /* Write the code into the inferior memory. */
2221 append_insns (&buildaddr, p - buf, buf);
2222
2223 /* Now emit the relocated instruction. */
2224 *adjusted_insn_addr = buildaddr;
2225 target_read_uint32 (tpaddr, &insn);
2226
2227 insn_data.base.insn_addr = tpaddr;
2228 insn_data.new_addr = buildaddr;
2229 insn_data.insn_ptr = buf;
2230
2231 aarch64_relocate_instruction (insn, &visitor,
2232 (struct aarch64_insn_data *) &insn_data);
2233
2234 /* We may not have been able to relocate the instruction. */
2235 if (insn_data.insn_ptr == buf)
2236 {
2237 sprintf (err,
2238 "E.Could not relocate instruction from %s to %s.",
2239 core_addr_to_string_nz (tpaddr),
2240 core_addr_to_string_nz (buildaddr));
2241 return 1;
2242 }
2243 else
2244 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
2245 *adjusted_insn_addr_end = buildaddr;
2246
2247 /* Go back to the start of the buffer. */
2248 p = buf;
2249
2250 /* Emit a branch back from the jump pad. */
2251 offset = (tpaddr + orig_size - buildaddr);
2252 if (!can_encode_int32 (offset, 28))
2253 {
2254 sprintf (err,
2255 "E.Jump back from jump pad too far from tracepoint "
2256 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2257 offset);
2258 return 1;
2259 }
2260
2261 p += emit_b (p, 0, offset);
2262 append_insns (&buildaddr, p - buf, buf);
2263
2264 /* Give the caller a branch instruction into the jump pad. */
2265 offset = (*jump_entry - tpaddr);
2266 if (!can_encode_int32 (offset, 28))
2267 {
2268 sprintf (err,
2269 "E.Jump pad too far from tracepoint "
2270 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2271 offset);
2272 return 1;
2273 }
2274
2275 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2276 *jjump_pad_insn_size = 4;
2277
2278 /* Return the end address of our pad. */
2279 *jump_entry = buildaddr;
2280
2281 return 0;
2282 }
2283
2284 /* Helper function writing LEN instructions from START into
2285 current_insn_ptr. */
2286
2287 static void
2288 emit_ops_insns (const uint32_t *start, int len)
2289 {
2290 CORE_ADDR buildaddr = current_insn_ptr;
2291
2292 if (debug_threads)
2293 debug_printf ("Adding %d instrucions at %s\n",
2294 len, paddress (buildaddr));
2295
2296 append_insns (&buildaddr, len, start);
2297 current_insn_ptr = buildaddr;
2298 }
2299
2300 /* Pop a register from the stack. */
2301
2302 static int
2303 emit_pop (uint32_t *buf, struct aarch64_register rt)
2304 {
2305 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2306 }
2307
2308 /* Push a register on the stack. */
2309
2310 static int
2311 emit_push (uint32_t *buf, struct aarch64_register rt)
2312 {
2313 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2314 }
2315
2316 /* Implementation of emit_ops method "emit_prologue". */
2317
2318 static void
2319 aarch64_emit_prologue (void)
2320 {
2321 uint32_t buf[16];
2322 uint32_t *p = buf;
2323
2324 /* This function emit a prologue for the following function prototype:
2325
2326 enum eval_result_type f (unsigned char *regs,
2327 ULONGEST *value);
2328
2329 The first argument is a buffer of raw registers. The second
2330 argument is the result of
2331 evaluating the expression, which will be set to whatever is on top of
2332 the stack at the end.
2333
2334 The stack set up by the prologue is as such:
2335
2336 High *------------------------------------------------------*
2337 | LR |
2338 | FP | <- FP
2339 | x1 (ULONGEST *value) |
2340 | x0 (unsigned char *regs) |
2341 Low *------------------------------------------------------*
2342
2343 As we are implementing a stack machine, each opcode can expand the
2344 stack so we never know how far we are from the data saved by this
2345 prologue. In order to be able refer to value and regs later, we save
2346 the current stack pointer in the frame pointer. This way, it is not
2347 clobbered when calling C functions.
2348
2349 Finally, throughout every operation, we are using register x0 as the
2350 top of the stack, and x1 as a scratch register. */
2351
2352 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2353 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2354 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2355
2356 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2357
2358
2359 emit_ops_insns (buf, p - buf);
2360 }
2361
2362 /* Implementation of emit_ops method "emit_epilogue". */
2363
2364 static void
2365 aarch64_emit_epilogue (void)
2366 {
2367 uint32_t buf[16];
2368 uint32_t *p = buf;
2369
2370 /* Store the result of the expression (x0) in *value. */
2371 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2372 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2373 p += emit_str (p, x0, x1, offset_memory_operand (0));
2374
2375 /* Restore the previous state. */
2376 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2377 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2378
2379 /* Return expr_eval_no_error. */
2380 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2381 p += emit_ret (p, lr);
2382
2383 emit_ops_insns (buf, p - buf);
2384 }
2385
2386 /* Implementation of emit_ops method "emit_add". */
2387
2388 static void
2389 aarch64_emit_add (void)
2390 {
2391 uint32_t buf[16];
2392 uint32_t *p = buf;
2393
2394 p += emit_pop (p, x1);
2395 p += emit_add (p, x0, x1, register_operand (x0));
2396
2397 emit_ops_insns (buf, p - buf);
2398 }
2399
2400 /* Implementation of emit_ops method "emit_sub". */
2401
2402 static void
2403 aarch64_emit_sub (void)
2404 {
2405 uint32_t buf[16];
2406 uint32_t *p = buf;
2407
2408 p += emit_pop (p, x1);
2409 p += emit_sub (p, x0, x1, register_operand (x0));
2410
2411 emit_ops_insns (buf, p - buf);
2412 }
2413
2414 /* Implementation of emit_ops method "emit_mul". */
2415
2416 static void
2417 aarch64_emit_mul (void)
2418 {
2419 uint32_t buf[16];
2420 uint32_t *p = buf;
2421
2422 p += emit_pop (p, x1);
2423 p += emit_mul (p, x0, x1, x0);
2424
2425 emit_ops_insns (buf, p - buf);
2426 }
2427
2428 /* Implementation of emit_ops method "emit_lsh". */
2429
2430 static void
2431 aarch64_emit_lsh (void)
2432 {
2433 uint32_t buf[16];
2434 uint32_t *p = buf;
2435
2436 p += emit_pop (p, x1);
2437 p += emit_lslv (p, x0, x1, x0);
2438
2439 emit_ops_insns (buf, p - buf);
2440 }
2441
2442 /* Implementation of emit_ops method "emit_rsh_signed". */
2443
2444 static void
2445 aarch64_emit_rsh_signed (void)
2446 {
2447 uint32_t buf[16];
2448 uint32_t *p = buf;
2449
2450 p += emit_pop (p, x1);
2451 p += emit_asrv (p, x0, x1, x0);
2452
2453 emit_ops_insns (buf, p - buf);
2454 }
2455
2456 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2457
2458 static void
2459 aarch64_emit_rsh_unsigned (void)
2460 {
2461 uint32_t buf[16];
2462 uint32_t *p = buf;
2463
2464 p += emit_pop (p, x1);
2465 p += emit_lsrv (p, x0, x1, x0);
2466
2467 emit_ops_insns (buf, p - buf);
2468 }
2469
2470 /* Implementation of emit_ops method "emit_ext". */
2471
2472 static void
2473 aarch64_emit_ext (int arg)
2474 {
2475 uint32_t buf[16];
2476 uint32_t *p = buf;
2477
2478 p += emit_sbfx (p, x0, x0, 0, arg);
2479
2480 emit_ops_insns (buf, p - buf);
2481 }
2482
2483 /* Implementation of emit_ops method "emit_log_not". */
2484
2485 static void
2486 aarch64_emit_log_not (void)
2487 {
2488 uint32_t buf[16];
2489 uint32_t *p = buf;
2490
2491 /* If the top of the stack is 0, replace it with 1. Else replace it with
2492 0. */
2493
2494 p += emit_cmp (p, x0, immediate_operand (0));
2495 p += emit_cset (p, x0, EQ);
2496
2497 emit_ops_insns (buf, p - buf);
2498 }
2499
2500 /* Implementation of emit_ops method "emit_bit_and". */
2501
2502 static void
2503 aarch64_emit_bit_and (void)
2504 {
2505 uint32_t buf[16];
2506 uint32_t *p = buf;
2507
2508 p += emit_pop (p, x1);
2509 p += emit_and (p, x0, x0, x1);
2510
2511 emit_ops_insns (buf, p - buf);
2512 }
2513
2514 /* Implementation of emit_ops method "emit_bit_or". */
2515
2516 static void
2517 aarch64_emit_bit_or (void)
2518 {
2519 uint32_t buf[16];
2520 uint32_t *p = buf;
2521
2522 p += emit_pop (p, x1);
2523 p += emit_orr (p, x0, x0, x1);
2524
2525 emit_ops_insns (buf, p - buf);
2526 }
2527
2528 /* Implementation of emit_ops method "emit_bit_xor". */
2529
2530 static void
2531 aarch64_emit_bit_xor (void)
2532 {
2533 uint32_t buf[16];
2534 uint32_t *p = buf;
2535
2536 p += emit_pop (p, x1);
2537 p += emit_eor (p, x0, x0, x1);
2538
2539 emit_ops_insns (buf, p - buf);
2540 }
2541
2542 /* Implementation of emit_ops method "emit_bit_not". */
2543
2544 static void
2545 aarch64_emit_bit_not (void)
2546 {
2547 uint32_t buf[16];
2548 uint32_t *p = buf;
2549
2550 p += emit_mvn (p, x0, x0);
2551
2552 emit_ops_insns (buf, p - buf);
2553 }
2554
2555 /* Implementation of emit_ops method "emit_equal". */
2556
2557 static void
2558 aarch64_emit_equal (void)
2559 {
2560 uint32_t buf[16];
2561 uint32_t *p = buf;
2562
2563 p += emit_pop (p, x1);
2564 p += emit_cmp (p, x0, register_operand (x1));
2565 p += emit_cset (p, x0, EQ);
2566
2567 emit_ops_insns (buf, p - buf);
2568 }
2569
2570 /* Implementation of emit_ops method "emit_less_signed". */
2571
2572 static void
2573 aarch64_emit_less_signed (void)
2574 {
2575 uint32_t buf[16];
2576 uint32_t *p = buf;
2577
2578 p += emit_pop (p, x1);
2579 p += emit_cmp (p, x1, register_operand (x0));
2580 p += emit_cset (p, x0, LT);
2581
2582 emit_ops_insns (buf, p - buf);
2583 }
2584
2585 /* Implementation of emit_ops method "emit_less_unsigned". */
2586
2587 static void
2588 aarch64_emit_less_unsigned (void)
2589 {
2590 uint32_t buf[16];
2591 uint32_t *p = buf;
2592
2593 p += emit_pop (p, x1);
2594 p += emit_cmp (p, x1, register_operand (x0));
2595 p += emit_cset (p, x0, LO);
2596
2597 emit_ops_insns (buf, p - buf);
2598 }
2599
2600 /* Implementation of emit_ops method "emit_ref". */
2601
2602 static void
2603 aarch64_emit_ref (int size)
2604 {
2605 uint32_t buf[16];
2606 uint32_t *p = buf;
2607
2608 switch (size)
2609 {
2610 case 1:
2611 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2612 break;
2613 case 2:
2614 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2615 break;
2616 case 4:
2617 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2618 break;
2619 case 8:
2620 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2621 break;
2622 default:
2623 /* Unknown size, bail on compilation. */
2624 emit_error = 1;
2625 break;
2626 }
2627
2628 emit_ops_insns (buf, p - buf);
2629 }
2630
2631 /* Implementation of emit_ops method "emit_if_goto". */
2632
2633 static void
2634 aarch64_emit_if_goto (int *offset_p, int *size_p)
2635 {
2636 uint32_t buf[16];
2637 uint32_t *p = buf;
2638
2639 /* The Z flag is set or cleared here. */
2640 p += emit_cmp (p, x0, immediate_operand (0));
2641 /* This instruction must not change the Z flag. */
2642 p += emit_pop (p, x0);
2643 /* Branch over the next instruction if x0 == 0. */
2644 p += emit_bcond (p, EQ, 8);
2645
2646 /* The NOP instruction will be patched with an unconditional branch. */
2647 if (offset_p)
2648 *offset_p = (p - buf) * 4;
2649 if (size_p)
2650 *size_p = 4;
2651 p += emit_nop (p);
2652
2653 emit_ops_insns (buf, p - buf);
2654 }
2655
2656 /* Implementation of emit_ops method "emit_goto". */
2657
2658 static void
2659 aarch64_emit_goto (int *offset_p, int *size_p)
2660 {
2661 uint32_t buf[16];
2662 uint32_t *p = buf;
2663
2664 /* The NOP instruction will be patched with an unconditional branch. */
2665 if (offset_p)
2666 *offset_p = 0;
2667 if (size_p)
2668 *size_p = 4;
2669 p += emit_nop (p);
2670
2671 emit_ops_insns (buf, p - buf);
2672 }
2673
2674 /* Implementation of emit_ops method "write_goto_address". */
2675
2676 static void
2677 aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2678 {
2679 uint32_t insn;
2680
2681 emit_b (&insn, 0, to - from);
2682 append_insns (&from, 1, &insn);
2683 }
2684
2685 /* Implementation of emit_ops method "emit_const". */
2686
2687 static void
2688 aarch64_emit_const (LONGEST num)
2689 {
2690 uint32_t buf[16];
2691 uint32_t *p = buf;
2692
2693 p += emit_mov_addr (p, x0, num);
2694
2695 emit_ops_insns (buf, p - buf);
2696 }
2697
2698 /* Implementation of emit_ops method "emit_call". */
2699
2700 static void
2701 aarch64_emit_call (CORE_ADDR fn)
2702 {
2703 uint32_t buf[16];
2704 uint32_t *p = buf;
2705
2706 p += emit_mov_addr (p, ip0, fn);
2707 p += emit_blr (p, ip0);
2708
2709 emit_ops_insns (buf, p - buf);
2710 }
2711
2712 /* Implementation of emit_ops method "emit_reg". */
2713
2714 static void
2715 aarch64_emit_reg (int reg)
2716 {
2717 uint32_t buf[16];
2718 uint32_t *p = buf;
2719
2720 /* Set x0 to unsigned char *regs. */
2721 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2722 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2723 p += emit_mov (p, x1, immediate_operand (reg));
2724
2725 emit_ops_insns (buf, p - buf);
2726
2727 aarch64_emit_call (get_raw_reg_func_addr ());
2728 }
2729
2730 /* Implementation of emit_ops method "emit_pop". */
2731
2732 static void
2733 aarch64_emit_pop (void)
2734 {
2735 uint32_t buf[16];
2736 uint32_t *p = buf;
2737
2738 p += emit_pop (p, x0);
2739
2740 emit_ops_insns (buf, p - buf);
2741 }
2742
2743 /* Implementation of emit_ops method "emit_stack_flush". */
2744
2745 static void
2746 aarch64_emit_stack_flush (void)
2747 {
2748 uint32_t buf[16];
2749 uint32_t *p = buf;
2750
2751 p += emit_push (p, x0);
2752
2753 emit_ops_insns (buf, p - buf);
2754 }
2755
2756 /* Implementation of emit_ops method "emit_zero_ext". */
2757
2758 static void
2759 aarch64_emit_zero_ext (int arg)
2760 {
2761 uint32_t buf[16];
2762 uint32_t *p = buf;
2763
2764 p += emit_ubfx (p, x0, x0, 0, arg);
2765
2766 emit_ops_insns (buf, p - buf);
2767 }
2768
2769 /* Implementation of emit_ops method "emit_swap". */
2770
2771 static void
2772 aarch64_emit_swap (void)
2773 {
2774 uint32_t buf[16];
2775 uint32_t *p = buf;
2776
2777 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2778 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2779 p += emit_mov (p, x0, register_operand (x1));
2780
2781 emit_ops_insns (buf, p - buf);
2782 }
2783
2784 /* Implementation of emit_ops method "emit_stack_adjust". */
2785
2786 static void
2787 aarch64_emit_stack_adjust (int n)
2788 {
2789 /* This is not needed with our design. */
2790 uint32_t buf[16];
2791 uint32_t *p = buf;
2792
2793 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2794
2795 emit_ops_insns (buf, p - buf);
2796 }
2797
2798 /* Implementation of emit_ops method "emit_int_call_1". */
2799
2800 static void
2801 aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2802 {
2803 uint32_t buf[16];
2804 uint32_t *p = buf;
2805
2806 p += emit_mov (p, x0, immediate_operand (arg1));
2807
2808 emit_ops_insns (buf, p - buf);
2809
2810 aarch64_emit_call (fn);
2811 }
2812
2813 /* Implementation of emit_ops method "emit_void_call_2". */
2814
2815 static void
2816 aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2817 {
2818 uint32_t buf[16];
2819 uint32_t *p = buf;
2820
2821 /* Push x0 on the stack. */
2822 aarch64_emit_stack_flush ();
2823
2824 /* Setup arguments for the function call:
2825
2826 x0: arg1
2827 x1: top of the stack
2828
2829 MOV x1, x0
2830 MOV x0, #arg1 */
2831
2832 p += emit_mov (p, x1, register_operand (x0));
2833 p += emit_mov (p, x0, immediate_operand (arg1));
2834
2835 emit_ops_insns (buf, p - buf);
2836
2837 aarch64_emit_call (fn);
2838
2839 /* Restore x0. */
2840 aarch64_emit_pop ();
2841 }
2842
2843 /* Implementation of emit_ops method "emit_eq_goto". */
2844
2845 static void
2846 aarch64_emit_eq_goto (int *offset_p, int *size_p)
2847 {
2848 uint32_t buf[16];
2849 uint32_t *p = buf;
2850
2851 p += emit_pop (p, x1);
2852 p += emit_cmp (p, x1, register_operand (x0));
2853 /* Branch over the next instruction if x0 != x1. */
2854 p += emit_bcond (p, NE, 8);
2855 /* The NOP instruction will be patched with an unconditional branch. */
2856 if (offset_p)
2857 *offset_p = (p - buf) * 4;
2858 if (size_p)
2859 *size_p = 4;
2860 p += emit_nop (p);
2861
2862 emit_ops_insns (buf, p - buf);
2863 }
2864
2865 /* Implementation of emit_ops method "emit_ne_goto". */
2866
2867 static void
2868 aarch64_emit_ne_goto (int *offset_p, int *size_p)
2869 {
2870 uint32_t buf[16];
2871 uint32_t *p = buf;
2872
2873 p += emit_pop (p, x1);
2874 p += emit_cmp (p, x1, register_operand (x0));
2875 /* Branch over the next instruction if x0 == x1. */
2876 p += emit_bcond (p, EQ, 8);
2877 /* The NOP instruction will be patched with an unconditional branch. */
2878 if (offset_p)
2879 *offset_p = (p - buf) * 4;
2880 if (size_p)
2881 *size_p = 4;
2882 p += emit_nop (p);
2883
2884 emit_ops_insns (buf, p - buf);
2885 }
2886
2887 /* Implementation of emit_ops method "emit_lt_goto". */
2888
2889 static void
2890 aarch64_emit_lt_goto (int *offset_p, int *size_p)
2891 {
2892 uint32_t buf[16];
2893 uint32_t *p = buf;
2894
2895 p += emit_pop (p, x1);
2896 p += emit_cmp (p, x1, register_operand (x0));
2897 /* Branch over the next instruction if x0 >= x1. */
2898 p += emit_bcond (p, GE, 8);
2899 /* The NOP instruction will be patched with an unconditional branch. */
2900 if (offset_p)
2901 *offset_p = (p - buf) * 4;
2902 if (size_p)
2903 *size_p = 4;
2904 p += emit_nop (p);
2905
2906 emit_ops_insns (buf, p - buf);
2907 }
2908
2909 /* Implementation of emit_ops method "emit_le_goto". */
2910
2911 static void
2912 aarch64_emit_le_goto (int *offset_p, int *size_p)
2913 {
2914 uint32_t buf[16];
2915 uint32_t *p = buf;
2916
2917 p += emit_pop (p, x1);
2918 p += emit_cmp (p, x1, register_operand (x0));
2919 /* Branch over the next instruction if x0 > x1. */
2920 p += emit_bcond (p, GT, 8);
2921 /* The NOP instruction will be patched with an unconditional branch. */
2922 if (offset_p)
2923 *offset_p = (p - buf) * 4;
2924 if (size_p)
2925 *size_p = 4;
2926 p += emit_nop (p);
2927
2928 emit_ops_insns (buf, p - buf);
2929 }
2930
2931 /* Implementation of emit_ops method "emit_gt_goto". */
2932
2933 static void
2934 aarch64_emit_gt_goto (int *offset_p, int *size_p)
2935 {
2936 uint32_t buf[16];
2937 uint32_t *p = buf;
2938
2939 p += emit_pop (p, x1);
2940 p += emit_cmp (p, x1, register_operand (x0));
2941 /* Branch over the next instruction if x0 <= x1. */
2942 p += emit_bcond (p, LE, 8);
2943 /* The NOP instruction will be patched with an unconditional branch. */
2944 if (offset_p)
2945 *offset_p = (p - buf) * 4;
2946 if (size_p)
2947 *size_p = 4;
2948 p += emit_nop (p);
2949
2950 emit_ops_insns (buf, p - buf);
2951 }
2952
2953 /* Implementation of emit_ops method "emit_ge_got". */
2954
2955 static void
2956 aarch64_emit_ge_got (int *offset_p, int *size_p)
2957 {
2958 uint32_t buf[16];
2959 uint32_t *p = buf;
2960
2961 p += emit_pop (p, x1);
2962 p += emit_cmp (p, x1, register_operand (x0));
2963 /* Branch over the next instruction if x0 <= x1. */
2964 p += emit_bcond (p, LT, 8);
2965 /* The NOP instruction will be patched with an unconditional branch. */
2966 if (offset_p)
2967 *offset_p = (p - buf) * 4;
2968 if (size_p)
2969 *size_p = 4;
2970 p += emit_nop (p);
2971
2972 emit_ops_insns (buf, p - buf);
2973 }
2974
2975 static struct emit_ops aarch64_emit_ops_impl =
2976 {
2977 aarch64_emit_prologue,
2978 aarch64_emit_epilogue,
2979 aarch64_emit_add,
2980 aarch64_emit_sub,
2981 aarch64_emit_mul,
2982 aarch64_emit_lsh,
2983 aarch64_emit_rsh_signed,
2984 aarch64_emit_rsh_unsigned,
2985 aarch64_emit_ext,
2986 aarch64_emit_log_not,
2987 aarch64_emit_bit_and,
2988 aarch64_emit_bit_or,
2989 aarch64_emit_bit_xor,
2990 aarch64_emit_bit_not,
2991 aarch64_emit_equal,
2992 aarch64_emit_less_signed,
2993 aarch64_emit_less_unsigned,
2994 aarch64_emit_ref,
2995 aarch64_emit_if_goto,
2996 aarch64_emit_goto,
2997 aarch64_write_goto_address,
2998 aarch64_emit_const,
2999 aarch64_emit_call,
3000 aarch64_emit_reg,
3001 aarch64_emit_pop,
3002 aarch64_emit_stack_flush,
3003 aarch64_emit_zero_ext,
3004 aarch64_emit_swap,
3005 aarch64_emit_stack_adjust,
3006 aarch64_emit_int_call_1,
3007 aarch64_emit_void_call_2,
3008 aarch64_emit_eq_goto,
3009 aarch64_emit_ne_goto,
3010 aarch64_emit_lt_goto,
3011 aarch64_emit_le_goto,
3012 aarch64_emit_gt_goto,
3013 aarch64_emit_ge_got,
3014 };
3015
3016 /* Implementation of linux_target_ops method "emit_ops". */
3017
3018 static struct emit_ops *
3019 aarch64_emit_ops (void)
3020 {
3021 return &aarch64_emit_ops_impl;
3022 }
3023
3024 /* Implementation of linux_target_ops method
3025 "get_min_fast_tracepoint_insn_len". */
3026
3027 static int
3028 aarch64_get_min_fast_tracepoint_insn_len (void)
3029 {
3030 return 4;
3031 }
3032
3033 /* Implementation of linux_target_ops method "supports_range_stepping". */
3034
3035 static int
3036 aarch64_supports_range_stepping (void)
3037 {
3038 return 1;
3039 }
3040
3041 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
3042
3043 static const gdb_byte *
3044 aarch64_sw_breakpoint_from_kind (int kind, int *size)
3045 {
3046 if (is_64bit_tdesc ())
3047 {
3048 *size = aarch64_breakpoint_len;
3049 return aarch64_breakpoint;
3050 }
3051 else
3052 return arm_sw_breakpoint_from_kind (kind, size);
3053 }
3054
3055 /* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
3056
3057 static int
3058 aarch64_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
3059 {
3060 if (is_64bit_tdesc ())
3061 return aarch64_breakpoint_len;
3062 else
3063 return arm_breakpoint_kind_from_pc (pcptr);
3064 }
3065
3066 /* Implementation of the linux_target_ops method
3067 "breakpoint_kind_from_current_state". */
3068
3069 static int
3070 aarch64_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
3071 {
3072 if (is_64bit_tdesc ())
3073 return aarch64_breakpoint_len;
3074 else
3075 return arm_breakpoint_kind_from_current_state (pcptr);
3076 }
3077
3078 /* Support for hardware single step. */
3079
3080 static int
3081 aarch64_supports_hardware_single_step (void)
3082 {
3083 return 1;
3084 }
3085
3086 struct linux_target_ops the_low_target =
3087 {
3088 aarch64_get_pc,
3089 aarch64_set_pc,
3090 aarch64_breakpoint_kind_from_pc,
3091 aarch64_sw_breakpoint_from_kind,
3092 NULL, /* get_next_pcs */
3093 0, /* decr_pc_after_break */
3094 aarch64_breakpoint_at,
3095 aarch64_supports_z_point_type,
3096 aarch64_insert_point,
3097 aarch64_remove_point,
3098 aarch64_stopped_by_watchpoint,
3099 aarch64_stopped_data_address,
3100 NULL, /* collect_ptrace_register */
3101 NULL, /* supply_ptrace_register */
3102 aarch64_linux_siginfo_fixup,
3103 aarch64_linux_new_process,
3104 aarch64_linux_delete_process,
3105 aarch64_linux_new_thread,
3106 aarch64_linux_delete_thread,
3107 aarch64_linux_new_fork,
3108 aarch64_linux_prepare_to_resume,
3109 NULL, /* process_qsupported */
3110 aarch64_supports_tracepoints,
3111 aarch64_get_thread_area,
3112 aarch64_install_fast_tracepoint_jump_pad,
3113 aarch64_emit_ops,
3114 aarch64_get_min_fast_tracepoint_insn_len,
3115 aarch64_supports_range_stepping,
3116 aarch64_breakpoint_kind_from_current_state,
3117 aarch64_supports_hardware_single_step,
3118 aarch64_get_syscall_trapinfo,
3119 };
3120
3121 /* The linux target ops object. */
3122
3123 linux_process_target *the_linux_target = &the_aarch64_target;
3124
3125 void
3126 initialize_low_arch (void)
3127 {
3128 initialize_low_arch_aarch32 ();
3129
3130 initialize_regsets_info (&aarch64_regsets_info);
3131 initialize_regsets_info (&aarch64_sve_regsets_info);
3132 }
This page took 0.154623 seconds and 3 git commands to generate.