Fix TBI handling for watchpoints
[deliverable/binutils-gdb.git] / gdbserver / linux-aarch64-low.cc
1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "server.h"
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
29 #include "ax.h"
30 #include "tracepoint.h"
31 #include "debug.h"
32
33 #include <signal.h>
34 #include <sys/user.h>
35 #include "nat/gdb_ptrace.h"
36 #include <asm/ptrace.h>
37 #include <inttypes.h>
38 #include <endian.h>
39 #include <sys/uio.h>
40
41 #include "gdb_proc_service.h"
42 #include "arch/aarch64.h"
43 #include "linux-aarch32-tdesc.h"
44 #include "linux-aarch64-tdesc.h"
45 #include "nat/aarch64-sve-linux-ptrace.h"
46 #include "tdesc.h"
47
48 #ifdef HAVE_SYS_REG_H
49 #include <sys/reg.h>
50 #endif
51
52 /* Linux target op definitions for the AArch64 architecture. */
53
54 class aarch64_target : public linux_process_target
55 {
56 public:
57
58 const regs_info *get_regs_info () override;
59
60 int breakpoint_kind_from_pc (CORE_ADDR *pcptr) override;
61
62 int breakpoint_kind_from_current_state (CORE_ADDR *pcptr) override;
63
64 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
65
66 bool supports_z_point_type (char z_type) override;
67
68 bool supports_tracepoints () override;
69
70 bool supports_fast_tracepoints () override;
71
72 int install_fast_tracepoint_jump_pad
73 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
74 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
75 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
76 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
77 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
78 char *err) override;
79
80 int get_min_fast_tracepoint_insn_len () override;
81
82 struct emit_ops *emit_ops () override;
83
84 protected:
85
86 void low_arch_setup () override;
87
88 bool low_cannot_fetch_register (int regno) override;
89
90 bool low_cannot_store_register (int regno) override;
91
92 bool low_supports_breakpoints () override;
93
94 CORE_ADDR low_get_pc (regcache *regcache) override;
95
96 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
97
98 bool low_breakpoint_at (CORE_ADDR pc) override;
99
100 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
101 int size, raw_breakpoint *bp) override;
102
103 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
104 int size, raw_breakpoint *bp) override;
105
106 bool low_stopped_by_watchpoint () override;
107
108 CORE_ADDR low_stopped_data_address () override;
109
110 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
111 int direction) override;
112
113 arch_process_info *low_new_process () override;
114
115 void low_delete_process (arch_process_info *info) override;
116
117 void low_new_thread (lwp_info *) override;
118
119 void low_delete_thread (arch_lwp_info *) override;
120
121 void low_new_fork (process_info *parent, process_info *child) override;
122
123 void low_prepare_to_resume (lwp_info *lwp) override;
124
125 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
126
127 bool low_supports_range_stepping () override;
128
129 bool low_supports_catch_syscall () override;
130
131 void low_get_syscall_trapinfo (regcache *regcache, int *sysno) override;
132 };
133
134 /* The singleton target ops object. */
135
136 static aarch64_target the_aarch64_target;
137
138 bool
139 aarch64_target::low_cannot_fetch_register (int regno)
140 {
141 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
142 "is not implemented by the target");
143 }
144
145 bool
146 aarch64_target::low_cannot_store_register (int regno)
147 {
148 gdb_assert_not_reached ("linux target op low_cannot_store_register "
149 "is not implemented by the target");
150 }
151
152 void
153 aarch64_target::low_prepare_to_resume (lwp_info *lwp)
154 {
155 aarch64_linux_prepare_to_resume (lwp);
156 }
157
158 /* Per-process arch-specific data we want to keep. */
159
160 struct arch_process_info
161 {
162 /* Hardware breakpoint/watchpoint data.
163 The reason for them to be per-process rather than per-thread is
164 due to the lack of information in the gdbserver environment;
165 gdbserver is not told that whether a requested hardware
166 breakpoint/watchpoint is thread specific or not, so it has to set
167 each hw bp/wp for every thread in the current process. The
168 higher level bp/wp management in gdb will resume a thread if a hw
169 bp/wp trap is not expected for it. Since the hw bp/wp setting is
170 same for each thread, it is reasonable for the data to live here.
171 */
172 struct aarch64_debug_reg_state debug_reg_state;
173 };
174
175 /* Return true if the size of register 0 is 8 byte. */
176
177 static int
178 is_64bit_tdesc (void)
179 {
180 struct regcache *regcache = get_thread_regcache (current_thread, 0);
181
182 return register_size (regcache->tdesc, 0) == 8;
183 }
184
185 /* Return true if the regcache contains the number of SVE registers. */
186
187 static bool
188 is_sve_tdesc (void)
189 {
190 struct regcache *regcache = get_thread_regcache (current_thread, 0);
191
192 return tdesc_contains_feature (regcache->tdesc, "org.gnu.gdb.aarch64.sve");
193 }
194
195 static void
196 aarch64_fill_gregset (struct regcache *regcache, void *buf)
197 {
198 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
199 int i;
200
201 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
202 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
203 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
204 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
205 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
206 }
207
208 static void
209 aarch64_store_gregset (struct regcache *regcache, const void *buf)
210 {
211 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
212 int i;
213
214 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
215 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
216 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
217 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
218 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
219 }
220
221 static void
222 aarch64_fill_fpregset (struct regcache *regcache, void *buf)
223 {
224 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
225 int i;
226
227 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
228 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
229 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
230 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
231 }
232
233 static void
234 aarch64_store_fpregset (struct regcache *regcache, const void *buf)
235 {
236 const struct user_fpsimd_state *regset
237 = (const struct user_fpsimd_state *) buf;
238 int i;
239
240 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
241 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
242 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
243 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
244 }
245
246 /* Store the pauth registers to regcache. */
247
248 static void
249 aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
250 {
251 uint64_t *pauth_regset = (uint64_t *) buf;
252 int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
253
254 if (pauth_base == 0)
255 return;
256
257 supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
258 &pauth_regset[0]);
259 supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
260 &pauth_regset[1]);
261 }
262
263 bool
264 aarch64_target::low_supports_breakpoints ()
265 {
266 return true;
267 }
268
269 /* Implementation of linux target ops method "low_get_pc". */
270
271 CORE_ADDR
272 aarch64_target::low_get_pc (regcache *regcache)
273 {
274 if (register_size (regcache->tdesc, 0) == 8)
275 return linux_get_pc_64bit (regcache);
276 else
277 return linux_get_pc_32bit (regcache);
278 }
279
280 /* Implementation of linux target ops method "low_set_pc". */
281
282 void
283 aarch64_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
284 {
285 if (register_size (regcache->tdesc, 0) == 8)
286 linux_set_pc_64bit (regcache, pc);
287 else
288 linux_set_pc_32bit (regcache, pc);
289 }
290
291 #define aarch64_breakpoint_len 4
292
293 /* AArch64 BRK software debug mode instruction.
294 This instruction needs to match gdb/aarch64-tdep.c
295 (aarch64_default_breakpoint). */
296 static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
297
298 /* Implementation of linux target ops method "low_breakpoint_at". */
299
300 bool
301 aarch64_target::low_breakpoint_at (CORE_ADDR where)
302 {
303 if (is_64bit_tdesc ())
304 {
305 gdb_byte insn[aarch64_breakpoint_len];
306
307 read_memory (where, (unsigned char *) &insn, aarch64_breakpoint_len);
308 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
309 return true;
310
311 return false;
312 }
313 else
314 return arm_breakpoint_at (where);
315 }
316
317 static void
318 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
319 {
320 int i;
321
322 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
323 {
324 state->dr_addr_bp[i] = 0;
325 state->dr_ctrl_bp[i] = 0;
326 state->dr_ref_count_bp[i] = 0;
327 }
328
329 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
330 {
331 state->dr_addr_wp[i] = 0;
332 state->dr_ctrl_wp[i] = 0;
333 state->dr_ref_count_wp[i] = 0;
334 }
335 }
336
337 /* Return the pointer to the debug register state structure in the
338 current process' arch-specific data area. */
339
340 struct aarch64_debug_reg_state *
341 aarch64_get_debug_reg_state (pid_t pid)
342 {
343 struct process_info *proc = find_process_pid (pid);
344
345 return &proc->priv->arch_private->debug_reg_state;
346 }
347
348 /* Implementation of target ops method "supports_z_point_type". */
349
350 bool
351 aarch64_target::supports_z_point_type (char z_type)
352 {
353 switch (z_type)
354 {
355 case Z_PACKET_SW_BP:
356 case Z_PACKET_HW_BP:
357 case Z_PACKET_WRITE_WP:
358 case Z_PACKET_READ_WP:
359 case Z_PACKET_ACCESS_WP:
360 return true;
361 default:
362 return false;
363 }
364 }
365
366 /* Implementation of linux target ops method "low_insert_point".
367
368 It actually only records the info of the to-be-inserted bp/wp;
369 the actual insertion will happen when threads are resumed. */
370
371 int
372 aarch64_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
373 int len, raw_breakpoint *bp)
374 {
375 int ret;
376 enum target_hw_bp_type targ_type;
377 struct aarch64_debug_reg_state *state
378 = aarch64_get_debug_reg_state (pid_of (current_thread));
379
380 if (show_debug_regs)
381 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
382 (unsigned long) addr, len);
383
384 /* Determine the type from the raw breakpoint type. */
385 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
386
387 if (targ_type != hw_execute)
388 {
389 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
390 ret = aarch64_handle_watchpoint (targ_type, addr, len,
391 1 /* is_insert */, state);
392 else
393 ret = -1;
394 }
395 else
396 {
397 if (len == 3)
398 {
399 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
400 instruction. Set it to 2 to correctly encode length bit
401 mask in hardware/watchpoint control register. */
402 len = 2;
403 }
404 ret = aarch64_handle_breakpoint (targ_type, addr, len,
405 1 /* is_insert */, state);
406 }
407
408 if (show_debug_regs)
409 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
410 targ_type);
411
412 return ret;
413 }
414
415 /* Implementation of linux target ops method "low_remove_point".
416
417 It actually only records the info of the to-be-removed bp/wp,
418 the actual removal will be done when threads are resumed. */
419
420 int
421 aarch64_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
422 int len, raw_breakpoint *bp)
423 {
424 int ret;
425 enum target_hw_bp_type targ_type;
426 struct aarch64_debug_reg_state *state
427 = aarch64_get_debug_reg_state (pid_of (current_thread));
428
429 if (show_debug_regs)
430 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
431 (unsigned long) addr, len);
432
433 /* Determine the type from the raw breakpoint type. */
434 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
435
436 /* Set up state pointers. */
437 if (targ_type != hw_execute)
438 ret =
439 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
440 state);
441 else
442 {
443 if (len == 3)
444 {
445 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
446 instruction. Set it to 2 to correctly encode length bit
447 mask in hardware/watchpoint control register. */
448 len = 2;
449 }
450 ret = aarch64_handle_breakpoint (targ_type, addr, len,
451 0 /* is_insert */, state);
452 }
453
454 if (show_debug_regs)
455 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
456 targ_type);
457
458 return ret;
459 }
460
461 /* Return the address only having significant bits. This is used to ignore
462 the top byte (TBI). */
463
464 static CORE_ADDR
465 address_significant (CORE_ADDR addr)
466 {
467 /* Clear insignificant bits of a target address and sign extend resulting
468 address. */
469 int addr_bit = 56;
470
471 CORE_ADDR sign = (CORE_ADDR) 1 << (addr_bit - 1);
472 addr &= ((CORE_ADDR) 1 << addr_bit) - 1;
473 addr = (addr ^ sign) - sign;
474
475 return addr;
476 }
477
478 /* Implementation of linux target ops method "low_stopped_data_address". */
479
480 CORE_ADDR
481 aarch64_target::low_stopped_data_address ()
482 {
483 siginfo_t siginfo;
484 int pid, i;
485 struct aarch64_debug_reg_state *state;
486
487 pid = lwpid_of (current_thread);
488
489 /* Get the siginfo. */
490 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
491 return (CORE_ADDR) 0;
492
493 /* Need to be a hardware breakpoint/watchpoint trap. */
494 if (siginfo.si_signo != SIGTRAP
495 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
496 return (CORE_ADDR) 0;
497
498 /* Make sure to ignore the top byte, otherwise we may not recognize a
499 hardware watchpoint hit. The stopped data addresses coming from the
500 kernel can potentially be tagged addresses. */
501 const CORE_ADDR addr_trap
502 = address_significant ((CORE_ADDR) siginfo.si_addr);
503
504 /* Check if the address matches any watched address. */
505 state = aarch64_get_debug_reg_state (pid_of (current_thread));
506 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
507 {
508 const unsigned int offset
509 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
510 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
511 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
512 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
513 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
514
515 if (state->dr_ref_count_wp[i]
516 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
517 && addr_trap >= addr_watch_aligned
518 && addr_trap < addr_watch + len)
519 {
520 /* ADDR_TRAP reports the first address of the memory range
521 accessed by the CPU, regardless of what was the memory
522 range watched. Thus, a large CPU access that straddles
523 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
524 ADDR_TRAP that is lower than the
525 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
526
527 addr: | 4 | 5 | 6 | 7 | 8 |
528 |---- range watched ----|
529 |----------- range accessed ------------|
530
531 In this case, ADDR_TRAP will be 4.
532
533 To match a watchpoint known to GDB core, we must never
534 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
535 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
536 positive on kernels older than 4.10. See PR
537 external/20207. */
538 return addr_orig;
539 }
540 }
541
542 return (CORE_ADDR) 0;
543 }
544
545 /* Implementation of linux target ops method "low_stopped_by_watchpoint". */
546
547 bool
548 aarch64_target::low_stopped_by_watchpoint ()
549 {
550 return (low_stopped_data_address () != 0);
551 }
552
553 /* Fetch the thread-local storage pointer for libthread_db. */
554
555 ps_err_e
556 ps_get_thread_area (struct ps_prochandle *ph,
557 lwpid_t lwpid, int idx, void **base)
558 {
559 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
560 is_64bit_tdesc ());
561 }
562
563 /* Implementation of linux target ops method "low_siginfo_fixup". */
564
565 bool
566 aarch64_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
567 int direction)
568 {
569 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
570 if (!is_64bit_tdesc ())
571 {
572 if (direction == 0)
573 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
574 native);
575 else
576 aarch64_siginfo_from_compat_siginfo (native,
577 (struct compat_siginfo *) inf);
578
579 return true;
580 }
581
582 return false;
583 }
584
585 /* Implementation of linux target ops method "low_new_process". */
586
587 arch_process_info *
588 aarch64_target::low_new_process ()
589 {
590 struct arch_process_info *info = XCNEW (struct arch_process_info);
591
592 aarch64_init_debug_reg_state (&info->debug_reg_state);
593
594 return info;
595 }
596
597 /* Implementation of linux target ops method "low_delete_process". */
598
599 void
600 aarch64_target::low_delete_process (arch_process_info *info)
601 {
602 xfree (info);
603 }
604
605 void
606 aarch64_target::low_new_thread (lwp_info *lwp)
607 {
608 aarch64_linux_new_thread (lwp);
609 }
610
611 void
612 aarch64_target::low_delete_thread (arch_lwp_info *arch_lwp)
613 {
614 aarch64_linux_delete_thread (arch_lwp);
615 }
616
617 /* Implementation of linux target ops method "low_new_fork". */
618
619 void
620 aarch64_target::low_new_fork (process_info *parent,
621 process_info *child)
622 {
623 /* These are allocated by linux_add_process. */
624 gdb_assert (parent->priv != NULL
625 && parent->priv->arch_private != NULL);
626 gdb_assert (child->priv != NULL
627 && child->priv->arch_private != NULL);
628
629 /* Linux kernel before 2.6.33 commit
630 72f674d203cd230426437cdcf7dd6f681dad8b0d
631 will inherit hardware debug registers from parent
632 on fork/vfork/clone. Newer Linux kernels create such tasks with
633 zeroed debug registers.
634
635 GDB core assumes the child inherits the watchpoints/hw
636 breakpoints of the parent, and will remove them all from the
637 forked off process. Copy the debug registers mirrors into the
638 new process so that all breakpoints and watchpoints can be
639 removed together. The debug registers mirror will become zeroed
640 in the end before detaching the forked off process, thus making
641 this compatible with older Linux kernels too. */
642
643 *child->priv->arch_private = *parent->priv->arch_private;
644 }
645
646 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
647 #define AARCH64_HWCAP_PACA (1 << 30)
648
649 /* Implementation of linux target ops method "low_arch_setup". */
650
651 void
652 aarch64_target::low_arch_setup ()
653 {
654 unsigned int machine;
655 int is_elf64;
656 int tid;
657
658 tid = lwpid_of (current_thread);
659
660 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
661
662 if (is_elf64)
663 {
664 uint64_t vq = aarch64_sve_get_vq (tid);
665 unsigned long hwcap = linux_get_hwcap (8);
666 bool pauth_p = hwcap & AARCH64_HWCAP_PACA;
667
668 current_process ()->tdesc = aarch64_linux_read_description (vq, pauth_p);
669 }
670 else
671 current_process ()->tdesc = aarch32_linux_read_description ();
672
673 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
674 }
675
676 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
677
678 static void
679 aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
680 {
681 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
682 }
683
684 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
685
686 static void
687 aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
688 {
689 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
690 }
691
692 static struct regset_info aarch64_regsets[] =
693 {
694 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
695 sizeof (struct user_pt_regs), GENERAL_REGS,
696 aarch64_fill_gregset, aarch64_store_gregset },
697 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
698 sizeof (struct user_fpsimd_state), FP_REGS,
699 aarch64_fill_fpregset, aarch64_store_fpregset
700 },
701 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
702 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
703 NULL, aarch64_store_pauthregset },
704 NULL_REGSET
705 };
706
707 static struct regsets_info aarch64_regsets_info =
708 {
709 aarch64_regsets, /* regsets */
710 0, /* num_regsets */
711 NULL, /* disabled_regsets */
712 };
713
714 static struct regs_info regs_info_aarch64 =
715 {
716 NULL, /* regset_bitmap */
717 NULL, /* usrregs */
718 &aarch64_regsets_info,
719 };
720
721 static struct regset_info aarch64_sve_regsets[] =
722 {
723 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
724 sizeof (struct user_pt_regs), GENERAL_REGS,
725 aarch64_fill_gregset, aarch64_store_gregset },
726 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
727 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
728 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
729 },
730 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
731 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
732 NULL, aarch64_store_pauthregset },
733 NULL_REGSET
734 };
735
736 static struct regsets_info aarch64_sve_regsets_info =
737 {
738 aarch64_sve_regsets, /* regsets. */
739 0, /* num_regsets. */
740 NULL, /* disabled_regsets. */
741 };
742
743 static struct regs_info regs_info_aarch64_sve =
744 {
745 NULL, /* regset_bitmap. */
746 NULL, /* usrregs. */
747 &aarch64_sve_regsets_info,
748 };
749
750 /* Implementation of linux target ops method "get_regs_info". */
751
752 const regs_info *
753 aarch64_target::get_regs_info ()
754 {
755 if (!is_64bit_tdesc ())
756 return &regs_info_aarch32;
757
758 if (is_sve_tdesc ())
759 return &regs_info_aarch64_sve;
760
761 return &regs_info_aarch64;
762 }
763
764 /* Implementation of target ops method "supports_tracepoints". */
765
766 bool
767 aarch64_target::supports_tracepoints ()
768 {
769 if (current_thread == NULL)
770 return true;
771 else
772 {
773 /* We don't support tracepoints on aarch32 now. */
774 return is_64bit_tdesc ();
775 }
776 }
777
778 /* Implementation of linux target ops method "low_get_thread_area". */
779
780 int
781 aarch64_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
782 {
783 struct iovec iovec;
784 uint64_t reg;
785
786 iovec.iov_base = &reg;
787 iovec.iov_len = sizeof (reg);
788
789 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
790 return -1;
791
792 *addrp = reg;
793
794 return 0;
795 }
796
797 bool
798 aarch64_target::low_supports_catch_syscall ()
799 {
800 return true;
801 }
802
803 /* Implementation of linux target ops method "low_get_syscall_trapinfo". */
804
805 void
806 aarch64_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
807 {
808 int use_64bit = register_size (regcache->tdesc, 0) == 8;
809
810 if (use_64bit)
811 {
812 long l_sysno;
813
814 collect_register_by_name (regcache, "x8", &l_sysno);
815 *sysno = (int) l_sysno;
816 }
817 else
818 collect_register_by_name (regcache, "r7", sysno);
819 }
820
821 /* List of condition codes that we need. */
822
823 enum aarch64_condition_codes
824 {
825 EQ = 0x0,
826 NE = 0x1,
827 LO = 0x3,
828 GE = 0xa,
829 LT = 0xb,
830 GT = 0xc,
831 LE = 0xd,
832 };
833
834 enum aarch64_operand_type
835 {
836 OPERAND_IMMEDIATE,
837 OPERAND_REGISTER,
838 };
839
840 /* Representation of an operand. At this time, it only supports register
841 and immediate types. */
842
843 struct aarch64_operand
844 {
845 /* Type of the operand. */
846 enum aarch64_operand_type type;
847
848 /* Value of the operand according to the type. */
849 union
850 {
851 uint32_t imm;
852 struct aarch64_register reg;
853 };
854 };
855
856 /* List of registers that we are currently using, we can add more here as
857 we need to use them. */
858
859 /* General purpose scratch registers (64 bit). */
860 static const struct aarch64_register x0 = { 0, 1 };
861 static const struct aarch64_register x1 = { 1, 1 };
862 static const struct aarch64_register x2 = { 2, 1 };
863 static const struct aarch64_register x3 = { 3, 1 };
864 static const struct aarch64_register x4 = { 4, 1 };
865
866 /* General purpose scratch registers (32 bit). */
867 static const struct aarch64_register w0 = { 0, 0 };
868 static const struct aarch64_register w2 = { 2, 0 };
869
870 /* Intra-procedure scratch registers. */
871 static const struct aarch64_register ip0 = { 16, 1 };
872
873 /* Special purpose registers. */
874 static const struct aarch64_register fp = { 29, 1 };
875 static const struct aarch64_register lr = { 30, 1 };
876 static const struct aarch64_register sp = { 31, 1 };
877 static const struct aarch64_register xzr = { 31, 1 };
878
879 /* Dynamically allocate a new register. If we know the register
880 statically, we should make it a global as above instead of using this
881 helper function. */
882
883 static struct aarch64_register
884 aarch64_register (unsigned num, int is64)
885 {
886 return (struct aarch64_register) { num, is64 };
887 }
888
889 /* Helper function to create a register operand, for instructions with
890 different types of operands.
891
892 For example:
893 p += emit_mov (p, x0, register_operand (x1)); */
894
895 static struct aarch64_operand
896 register_operand (struct aarch64_register reg)
897 {
898 struct aarch64_operand operand;
899
900 operand.type = OPERAND_REGISTER;
901 operand.reg = reg;
902
903 return operand;
904 }
905
906 /* Helper function to create an immediate operand, for instructions with
907 different types of operands.
908
909 For example:
910 p += emit_mov (p, x0, immediate_operand (12)); */
911
912 static struct aarch64_operand
913 immediate_operand (uint32_t imm)
914 {
915 struct aarch64_operand operand;
916
917 operand.type = OPERAND_IMMEDIATE;
918 operand.imm = imm;
919
920 return operand;
921 }
922
923 /* Helper function to create an offset memory operand.
924
925 For example:
926 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
927
928 static struct aarch64_memory_operand
929 offset_memory_operand (int32_t offset)
930 {
931 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
932 }
933
934 /* Helper function to create a pre-index memory operand.
935
936 For example:
937 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
938
939 static struct aarch64_memory_operand
940 preindex_memory_operand (int32_t index)
941 {
942 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
943 }
944
945 /* Helper function to create a post-index memory operand.
946
947 For example:
948 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
949
950 static struct aarch64_memory_operand
951 postindex_memory_operand (int32_t index)
952 {
953 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
954 }
955
956 /* System control registers. These special registers can be written and
957 read with the MRS and MSR instructions.
958
959 - NZCV: Condition flags. GDB refers to this register under the CPSR
960 name.
961 - FPSR: Floating-point status register.
962 - FPCR: Floating-point control registers.
963 - TPIDR_EL0: Software thread ID register. */
964
965 enum aarch64_system_control_registers
966 {
967 /* op0 op1 crn crm op2 */
968 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
969 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
970 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
971 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
972 };
973
974 /* Write a BLR instruction into *BUF.
975
976 BLR rn
977
978 RN is the register to branch to. */
979
980 static int
981 emit_blr (uint32_t *buf, struct aarch64_register rn)
982 {
983 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
984 }
985
986 /* Write a RET instruction into *BUF.
987
988 RET xn
989
990 RN is the register to branch to. */
991
992 static int
993 emit_ret (uint32_t *buf, struct aarch64_register rn)
994 {
995 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
996 }
997
998 static int
999 emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
1000 struct aarch64_register rt,
1001 struct aarch64_register rt2,
1002 struct aarch64_register rn,
1003 struct aarch64_memory_operand operand)
1004 {
1005 uint32_t opc;
1006 uint32_t pre_index;
1007 uint32_t write_back;
1008
1009 if (rt.is64)
1010 opc = ENCODE (2, 2, 30);
1011 else
1012 opc = ENCODE (0, 2, 30);
1013
1014 switch (operand.type)
1015 {
1016 case MEMORY_OPERAND_OFFSET:
1017 {
1018 pre_index = ENCODE (1, 1, 24);
1019 write_back = ENCODE (0, 1, 23);
1020 break;
1021 }
1022 case MEMORY_OPERAND_POSTINDEX:
1023 {
1024 pre_index = ENCODE (0, 1, 24);
1025 write_back = ENCODE (1, 1, 23);
1026 break;
1027 }
1028 case MEMORY_OPERAND_PREINDEX:
1029 {
1030 pre_index = ENCODE (1, 1, 24);
1031 write_back = ENCODE (1, 1, 23);
1032 break;
1033 }
1034 default:
1035 return 0;
1036 }
1037
1038 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
1039 | ENCODE (operand.index >> 3, 7, 15)
1040 | ENCODE (rt2.num, 5, 10)
1041 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
1042 }
1043
1044 /* Write a STP instruction into *BUF.
1045
1046 STP rt, rt2, [rn, #offset]
1047 STP rt, rt2, [rn, #index]!
1048 STP rt, rt2, [rn], #index
1049
1050 RT and RT2 are the registers to store.
1051 RN is the base address register.
1052 OFFSET is the immediate to add to the base address. It is limited to a
1053 -512 .. 504 range (7 bits << 3). */
1054
1055 static int
1056 emit_stp (uint32_t *buf, struct aarch64_register rt,
1057 struct aarch64_register rt2, struct aarch64_register rn,
1058 struct aarch64_memory_operand operand)
1059 {
1060 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
1061 }
1062
1063 /* Write a LDP instruction into *BUF.
1064
1065 LDP rt, rt2, [rn, #offset]
1066 LDP rt, rt2, [rn, #index]!
1067 LDP rt, rt2, [rn], #index
1068
1069 RT and RT2 are the registers to store.
1070 RN is the base address register.
1071 OFFSET is the immediate to add to the base address. It is limited to a
1072 -512 .. 504 range (7 bits << 3). */
1073
1074 static int
1075 emit_ldp (uint32_t *buf, struct aarch64_register rt,
1076 struct aarch64_register rt2, struct aarch64_register rn,
1077 struct aarch64_memory_operand operand)
1078 {
1079 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
1080 }
1081
1082 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
1083
1084 LDP qt, qt2, [rn, #offset]
1085
1086 RT and RT2 are the Q registers to store.
1087 RN is the base address register.
1088 OFFSET is the immediate to add to the base address. It is limited to
1089 -1024 .. 1008 range (7 bits << 4). */
1090
1091 static int
1092 emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1093 struct aarch64_register rn, int32_t offset)
1094 {
1095 uint32_t opc = ENCODE (2, 2, 30);
1096 uint32_t pre_index = ENCODE (1, 1, 24);
1097
1098 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
1099 | ENCODE (offset >> 4, 7, 15)
1100 | ENCODE (rt2, 5, 10)
1101 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
1102 }
1103
1104 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1105
1106 STP qt, qt2, [rn, #offset]
1107
1108 RT and RT2 are the Q registers to store.
1109 RN is the base address register.
1110 OFFSET is the immediate to add to the base address. It is limited to
1111 -1024 .. 1008 range (7 bits << 4). */
1112
1113 static int
1114 emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1115 struct aarch64_register rn, int32_t offset)
1116 {
1117 uint32_t opc = ENCODE (2, 2, 30);
1118 uint32_t pre_index = ENCODE (1, 1, 24);
1119
1120 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
1121 | ENCODE (offset >> 4, 7, 15)
1122 | ENCODE (rt2, 5, 10)
1123 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
1124 }
1125
1126 /* Write a LDRH instruction into *BUF.
1127
1128 LDRH wt, [xn, #offset]
1129 LDRH wt, [xn, #index]!
1130 LDRH wt, [xn], #index
1131
1132 RT is the register to store.
1133 RN is the base address register.
1134 OFFSET is the immediate to add to the base address. It is limited to
1135 0 .. 32760 range (12 bits << 3). */
1136
1137 static int
1138 emit_ldrh (uint32_t *buf, struct aarch64_register rt,
1139 struct aarch64_register rn,
1140 struct aarch64_memory_operand operand)
1141 {
1142 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
1143 }
1144
1145 /* Write a LDRB instruction into *BUF.
1146
1147 LDRB wt, [xn, #offset]
1148 LDRB wt, [xn, #index]!
1149 LDRB wt, [xn], #index
1150
1151 RT is the register to store.
1152 RN is the base address register.
1153 OFFSET is the immediate to add to the base address. It is limited to
1154 0 .. 32760 range (12 bits << 3). */
1155
1156 static int
1157 emit_ldrb (uint32_t *buf, struct aarch64_register rt,
1158 struct aarch64_register rn,
1159 struct aarch64_memory_operand operand)
1160 {
1161 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
1162 }
1163
1164
1165
1166 /* Write a STR instruction into *BUF.
1167
1168 STR rt, [rn, #offset]
1169 STR rt, [rn, #index]!
1170 STR rt, [rn], #index
1171
1172 RT is the register to store.
1173 RN is the base address register.
1174 OFFSET is the immediate to add to the base address. It is limited to
1175 0 .. 32760 range (12 bits << 3). */
1176
1177 static int
1178 emit_str (uint32_t *buf, struct aarch64_register rt,
1179 struct aarch64_register rn,
1180 struct aarch64_memory_operand operand)
1181 {
1182 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
1183 }
1184
1185 /* Helper function emitting an exclusive load or store instruction. */
1186
1187 static int
1188 emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1189 enum aarch64_opcodes opcode,
1190 struct aarch64_register rs,
1191 struct aarch64_register rt,
1192 struct aarch64_register rt2,
1193 struct aarch64_register rn)
1194 {
1195 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1196 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1197 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
1198 }
1199
1200 /* Write a LAXR instruction into *BUF.
1201
1202 LDAXR rt, [xn]
1203
1204 RT is the destination register.
1205 RN is the base address register. */
1206
1207 static int
1208 emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1209 struct aarch64_register rn)
1210 {
1211 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1212 xzr, rn);
1213 }
1214
1215 /* Write a STXR instruction into *BUF.
1216
1217 STXR ws, rt, [xn]
1218
1219 RS is the result register, it indicates if the store succeeded or not.
1220 RT is the destination register.
1221 RN is the base address register. */
1222
1223 static int
1224 emit_stxr (uint32_t *buf, struct aarch64_register rs,
1225 struct aarch64_register rt, struct aarch64_register rn)
1226 {
1227 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1228 xzr, rn);
1229 }
1230
1231 /* Write a STLR instruction into *BUF.
1232
1233 STLR rt, [xn]
1234
1235 RT is the register to store.
1236 RN is the base address register. */
1237
1238 static int
1239 emit_stlr (uint32_t *buf, struct aarch64_register rt,
1240 struct aarch64_register rn)
1241 {
1242 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1243 xzr, rn);
1244 }
1245
1246 /* Helper function for data processing instructions with register sources. */
1247
1248 static int
1249 emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
1250 struct aarch64_register rd,
1251 struct aarch64_register rn,
1252 struct aarch64_register rm)
1253 {
1254 uint32_t size = ENCODE (rd.is64, 1, 31);
1255
1256 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1257 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
1258 }
1259
1260 /* Helper function for data processing instructions taking either a register
1261 or an immediate. */
1262
1263 static int
1264 emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1265 struct aarch64_register rd,
1266 struct aarch64_register rn,
1267 struct aarch64_operand operand)
1268 {
1269 uint32_t size = ENCODE (rd.is64, 1, 31);
1270 /* The opcode is different for register and immediate source operands. */
1271 uint32_t operand_opcode;
1272
1273 if (operand.type == OPERAND_IMMEDIATE)
1274 {
1275 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1276 operand_opcode = ENCODE (8, 4, 25);
1277
1278 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1279 | ENCODE (operand.imm, 12, 10)
1280 | ENCODE (rn.num, 5, 5)
1281 | ENCODE (rd.num, 5, 0));
1282 }
1283 else
1284 {
1285 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1286 operand_opcode = ENCODE (5, 4, 25);
1287
1288 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1289 rn, operand.reg);
1290 }
1291 }
1292
1293 /* Write an ADD instruction into *BUF.
1294
1295 ADD rd, rn, #imm
1296 ADD rd, rn, rm
1297
1298 This function handles both an immediate and register add.
1299
1300 RD is the destination register.
1301 RN is the input register.
1302 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1303 OPERAND_REGISTER. */
1304
1305 static int
1306 emit_add (uint32_t *buf, struct aarch64_register rd,
1307 struct aarch64_register rn, struct aarch64_operand operand)
1308 {
1309 return emit_data_processing (buf, ADD, rd, rn, operand);
1310 }
1311
1312 /* Write a SUB instruction into *BUF.
1313
1314 SUB rd, rn, #imm
1315 SUB rd, rn, rm
1316
1317 This function handles both an immediate and register sub.
1318
1319 RD is the destination register.
1320 RN is the input register.
1321 IMM is the immediate to substract to RN. */
1322
1323 static int
1324 emit_sub (uint32_t *buf, struct aarch64_register rd,
1325 struct aarch64_register rn, struct aarch64_operand operand)
1326 {
1327 return emit_data_processing (buf, SUB, rd, rn, operand);
1328 }
1329
1330 /* Write a MOV instruction into *BUF.
1331
1332 MOV rd, #imm
1333 MOV rd, rm
1334
1335 This function handles both a wide immediate move and a register move,
1336 with the condition that the source register is not xzr. xzr and the
1337 stack pointer share the same encoding and this function only supports
1338 the stack pointer.
1339
1340 RD is the destination register.
1341 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1342 OPERAND_REGISTER. */
1343
1344 static int
1345 emit_mov (uint32_t *buf, struct aarch64_register rd,
1346 struct aarch64_operand operand)
1347 {
1348 if (operand.type == OPERAND_IMMEDIATE)
1349 {
1350 uint32_t size = ENCODE (rd.is64, 1, 31);
1351 /* Do not shift the immediate. */
1352 uint32_t shift = ENCODE (0, 2, 21);
1353
1354 return aarch64_emit_insn (buf, MOV | size | shift
1355 | ENCODE (operand.imm, 16, 5)
1356 | ENCODE (rd.num, 5, 0));
1357 }
1358 else
1359 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1360 }
1361
1362 /* Write a MOVK instruction into *BUF.
1363
1364 MOVK rd, #imm, lsl #shift
1365
1366 RD is the destination register.
1367 IMM is the immediate.
1368 SHIFT is the logical shift left to apply to IMM. */
1369
1370 static int
1371 emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1372 unsigned shift)
1373 {
1374 uint32_t size = ENCODE (rd.is64, 1, 31);
1375
1376 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1377 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
1378 }
1379
1380 /* Write instructions into *BUF in order to move ADDR into a register.
1381 ADDR can be a 64-bit value.
1382
1383 This function will emit a series of MOV and MOVK instructions, such as:
1384
1385 MOV xd, #(addr)
1386 MOVK xd, #(addr >> 16), lsl #16
1387 MOVK xd, #(addr >> 32), lsl #32
1388 MOVK xd, #(addr >> 48), lsl #48 */
1389
1390 static int
1391 emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1392 {
1393 uint32_t *p = buf;
1394
1395 /* The MOV (wide immediate) instruction clears to top bits of the
1396 register. */
1397 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1398
1399 if ((addr >> 16) != 0)
1400 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1401 else
1402 return p - buf;
1403
1404 if ((addr >> 32) != 0)
1405 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1406 else
1407 return p - buf;
1408
1409 if ((addr >> 48) != 0)
1410 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1411
1412 return p - buf;
1413 }
1414
1415 /* Write a SUBS instruction into *BUF.
1416
1417 SUBS rd, rn, rm
1418
1419 This instruction update the condition flags.
1420
1421 RD is the destination register.
1422 RN and RM are the source registers. */
1423
1424 static int
1425 emit_subs (uint32_t *buf, struct aarch64_register rd,
1426 struct aarch64_register rn, struct aarch64_operand operand)
1427 {
1428 return emit_data_processing (buf, SUBS, rd, rn, operand);
1429 }
1430
1431 /* Write a CMP instruction into *BUF.
1432
1433 CMP rn, rm
1434
1435 This instruction is an alias of SUBS xzr, rn, rm.
1436
1437 RN and RM are the registers to compare. */
1438
1439 static int
1440 emit_cmp (uint32_t *buf, struct aarch64_register rn,
1441 struct aarch64_operand operand)
1442 {
1443 return emit_subs (buf, xzr, rn, operand);
1444 }
1445
1446 /* Write a AND instruction into *BUF.
1447
1448 AND rd, rn, rm
1449
1450 RD is the destination register.
1451 RN and RM are the source registers. */
1452
1453 static int
1454 emit_and (uint32_t *buf, struct aarch64_register rd,
1455 struct aarch64_register rn, struct aarch64_register rm)
1456 {
1457 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1458 }
1459
1460 /* Write a ORR instruction into *BUF.
1461
1462 ORR rd, rn, rm
1463
1464 RD is the destination register.
1465 RN and RM are the source registers. */
1466
1467 static int
1468 emit_orr (uint32_t *buf, struct aarch64_register rd,
1469 struct aarch64_register rn, struct aarch64_register rm)
1470 {
1471 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1472 }
1473
1474 /* Write a ORN instruction into *BUF.
1475
1476 ORN rd, rn, rm
1477
1478 RD is the destination register.
1479 RN and RM are the source registers. */
1480
1481 static int
1482 emit_orn (uint32_t *buf, struct aarch64_register rd,
1483 struct aarch64_register rn, struct aarch64_register rm)
1484 {
1485 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1486 }
1487
1488 /* Write a EOR instruction into *BUF.
1489
1490 EOR rd, rn, rm
1491
1492 RD is the destination register.
1493 RN and RM are the source registers. */
1494
1495 static int
1496 emit_eor (uint32_t *buf, struct aarch64_register rd,
1497 struct aarch64_register rn, struct aarch64_register rm)
1498 {
1499 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1500 }
1501
1502 /* Write a MVN instruction into *BUF.
1503
1504 MVN rd, rm
1505
1506 This is an alias for ORN rd, xzr, rm.
1507
1508 RD is the destination register.
1509 RM is the source register. */
1510
1511 static int
1512 emit_mvn (uint32_t *buf, struct aarch64_register rd,
1513 struct aarch64_register rm)
1514 {
1515 return emit_orn (buf, rd, xzr, rm);
1516 }
1517
1518 /* Write a LSLV instruction into *BUF.
1519
1520 LSLV rd, rn, rm
1521
1522 RD is the destination register.
1523 RN and RM are the source registers. */
1524
1525 static int
1526 emit_lslv (uint32_t *buf, struct aarch64_register rd,
1527 struct aarch64_register rn, struct aarch64_register rm)
1528 {
1529 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1530 }
1531
1532 /* Write a LSRV instruction into *BUF.
1533
1534 LSRV rd, rn, rm
1535
1536 RD is the destination register.
1537 RN and RM are the source registers. */
1538
1539 static int
1540 emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1541 struct aarch64_register rn, struct aarch64_register rm)
1542 {
1543 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1544 }
1545
1546 /* Write a ASRV instruction into *BUF.
1547
1548 ASRV rd, rn, rm
1549
1550 RD is the destination register.
1551 RN and RM are the source registers. */
1552
1553 static int
1554 emit_asrv (uint32_t *buf, struct aarch64_register rd,
1555 struct aarch64_register rn, struct aarch64_register rm)
1556 {
1557 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1558 }
1559
1560 /* Write a MUL instruction into *BUF.
1561
1562 MUL rd, rn, rm
1563
1564 RD is the destination register.
1565 RN and RM are the source registers. */
1566
1567 static int
1568 emit_mul (uint32_t *buf, struct aarch64_register rd,
1569 struct aarch64_register rn, struct aarch64_register rm)
1570 {
1571 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1572 }
1573
1574 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1575
1576 MRS xt, system_reg
1577
1578 RT is the destination register.
1579 SYSTEM_REG is special purpose register to read. */
1580
1581 static int
1582 emit_mrs (uint32_t *buf, struct aarch64_register rt,
1583 enum aarch64_system_control_registers system_reg)
1584 {
1585 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1586 | ENCODE (rt.num, 5, 0));
1587 }
1588
1589 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1590
1591 MSR system_reg, xt
1592
1593 SYSTEM_REG is special purpose register to write.
1594 RT is the input register. */
1595
1596 static int
1597 emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1598 struct aarch64_register rt)
1599 {
1600 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1601 | ENCODE (rt.num, 5, 0));
1602 }
1603
1604 /* Write a SEVL instruction into *BUF.
1605
1606 This is a hint instruction telling the hardware to trigger an event. */
1607
1608 static int
1609 emit_sevl (uint32_t *buf)
1610 {
1611 return aarch64_emit_insn (buf, SEVL);
1612 }
1613
1614 /* Write a WFE instruction into *BUF.
1615
1616 This is a hint instruction telling the hardware to wait for an event. */
1617
1618 static int
1619 emit_wfe (uint32_t *buf)
1620 {
1621 return aarch64_emit_insn (buf, WFE);
1622 }
1623
1624 /* Write a SBFM instruction into *BUF.
1625
1626 SBFM rd, rn, #immr, #imms
1627
1628 This instruction moves the bits from #immr to #imms into the
1629 destination, sign extending the result.
1630
1631 RD is the destination register.
1632 RN is the source register.
1633 IMMR is the bit number to start at (least significant bit).
1634 IMMS is the bit number to stop at (most significant bit). */
1635
1636 static int
1637 emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1638 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1639 {
1640 uint32_t size = ENCODE (rd.is64, 1, 31);
1641 uint32_t n = ENCODE (rd.is64, 1, 22);
1642
1643 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1644 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1645 | ENCODE (rd.num, 5, 0));
1646 }
1647
1648 /* Write a SBFX instruction into *BUF.
1649
1650 SBFX rd, rn, #lsb, #width
1651
1652 This instruction moves #width bits from #lsb into the destination, sign
1653 extending the result. This is an alias for:
1654
1655 SBFM rd, rn, #lsb, #(lsb + width - 1)
1656
1657 RD is the destination register.
1658 RN is the source register.
1659 LSB is the bit number to start at (least significant bit).
1660 WIDTH is the number of bits to move. */
1661
1662 static int
1663 emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1664 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1665 {
1666 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1667 }
1668
1669 /* Write a UBFM instruction into *BUF.
1670
1671 UBFM rd, rn, #immr, #imms
1672
1673 This instruction moves the bits from #immr to #imms into the
1674 destination, extending the result with zeros.
1675
1676 RD is the destination register.
1677 RN is the source register.
1678 IMMR is the bit number to start at (least significant bit).
1679 IMMS is the bit number to stop at (most significant bit). */
1680
1681 static int
1682 emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1683 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1684 {
1685 uint32_t size = ENCODE (rd.is64, 1, 31);
1686 uint32_t n = ENCODE (rd.is64, 1, 22);
1687
1688 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1689 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1690 | ENCODE (rd.num, 5, 0));
1691 }
1692
1693 /* Write a UBFX instruction into *BUF.
1694
1695 UBFX rd, rn, #lsb, #width
1696
1697 This instruction moves #width bits from #lsb into the destination,
1698 extending the result with zeros. This is an alias for:
1699
1700 UBFM rd, rn, #lsb, #(lsb + width - 1)
1701
1702 RD is the destination register.
1703 RN is the source register.
1704 LSB is the bit number to start at (least significant bit).
1705 WIDTH is the number of bits to move. */
1706
1707 static int
1708 emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1709 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1710 {
1711 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1712 }
1713
1714 /* Write a CSINC instruction into *BUF.
1715
1716 CSINC rd, rn, rm, cond
1717
1718 This instruction conditionally increments rn or rm and places the result
1719 in rd. rn is chosen is the condition is true.
1720
1721 RD is the destination register.
1722 RN and RM are the source registers.
1723 COND is the encoded condition. */
1724
1725 static int
1726 emit_csinc (uint32_t *buf, struct aarch64_register rd,
1727 struct aarch64_register rn, struct aarch64_register rm,
1728 unsigned cond)
1729 {
1730 uint32_t size = ENCODE (rd.is64, 1, 31);
1731
1732 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1733 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1734 | ENCODE (rd.num, 5, 0));
1735 }
1736
1737 /* Write a CSET instruction into *BUF.
1738
1739 CSET rd, cond
1740
1741 This instruction conditionally write 1 or 0 in the destination register.
1742 1 is written if the condition is true. This is an alias for:
1743
1744 CSINC rd, xzr, xzr, !cond
1745
1746 Note that the condition needs to be inverted.
1747
1748 RD is the destination register.
1749 RN and RM are the source registers.
1750 COND is the encoded condition. */
1751
1752 static int
1753 emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1754 {
1755 /* The least significant bit of the condition needs toggling in order to
1756 invert it. */
1757 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1758 }
1759
1760 /* Write LEN instructions from BUF into the inferior memory at *TO.
1761
1762 Note instructions are always little endian on AArch64, unlike data. */
1763
1764 static void
1765 append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1766 {
1767 size_t byte_len = len * sizeof (uint32_t);
1768 #if (__BYTE_ORDER == __BIG_ENDIAN)
1769 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
1770 size_t i;
1771
1772 for (i = 0; i < len; i++)
1773 le_buf[i] = htole32 (buf[i]);
1774
1775 target_write_memory (*to, (const unsigned char *) le_buf, byte_len);
1776
1777 xfree (le_buf);
1778 #else
1779 target_write_memory (*to, (const unsigned char *) buf, byte_len);
1780 #endif
1781
1782 *to += byte_len;
1783 }
1784
1785 /* Sub-class of struct aarch64_insn_data, store information of
1786 instruction relocation for fast tracepoint. Visitor can
1787 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1788 the relocated instructions in buffer pointed by INSN_PTR. */
1789
1790 struct aarch64_insn_relocation_data
1791 {
1792 struct aarch64_insn_data base;
1793
1794 /* The new address the instruction is relocated to. */
1795 CORE_ADDR new_addr;
1796 /* Pointer to the buffer of relocated instruction(s). */
1797 uint32_t *insn_ptr;
1798 };
1799
1800 /* Implementation of aarch64_insn_visitor method "b". */
1801
1802 static void
1803 aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1804 struct aarch64_insn_data *data)
1805 {
1806 struct aarch64_insn_relocation_data *insn_reloc
1807 = (struct aarch64_insn_relocation_data *) data;
1808 int64_t new_offset
1809 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1810
1811 if (can_encode_int32 (new_offset, 28))
1812 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1813 }
1814
1815 /* Implementation of aarch64_insn_visitor method "b_cond". */
1816
1817 static void
1818 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1819 struct aarch64_insn_data *data)
1820 {
1821 struct aarch64_insn_relocation_data *insn_reloc
1822 = (struct aarch64_insn_relocation_data *) data;
1823 int64_t new_offset
1824 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1825
1826 if (can_encode_int32 (new_offset, 21))
1827 {
1828 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1829 new_offset);
1830 }
1831 else if (can_encode_int32 (new_offset, 28))
1832 {
1833 /* The offset is out of range for a conditional branch
1834 instruction but not for a unconditional branch. We can use
1835 the following instructions instead:
1836
1837 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1838 B NOT_TAKEN ; Else jump over TAKEN and continue.
1839 TAKEN:
1840 B #(offset - 8)
1841 NOT_TAKEN:
1842
1843 */
1844
1845 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1846 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1847 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1848 }
1849 }
1850
1851 /* Implementation of aarch64_insn_visitor method "cb". */
1852
1853 static void
1854 aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1855 const unsigned rn, int is64,
1856 struct aarch64_insn_data *data)
1857 {
1858 struct aarch64_insn_relocation_data *insn_reloc
1859 = (struct aarch64_insn_relocation_data *) data;
1860 int64_t new_offset
1861 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1862
1863 if (can_encode_int32 (new_offset, 21))
1864 {
1865 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1866 aarch64_register (rn, is64), new_offset);
1867 }
1868 else if (can_encode_int32 (new_offset, 28))
1869 {
1870 /* The offset is out of range for a compare and branch
1871 instruction but not for a unconditional branch. We can use
1872 the following instructions instead:
1873
1874 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1875 B NOT_TAKEN ; Else jump over TAKEN and continue.
1876 TAKEN:
1877 B #(offset - 8)
1878 NOT_TAKEN:
1879
1880 */
1881 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1882 aarch64_register (rn, is64), 8);
1883 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1884 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1885 }
1886 }
1887
1888 /* Implementation of aarch64_insn_visitor method "tb". */
1889
1890 static void
1891 aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1892 const unsigned rt, unsigned bit,
1893 struct aarch64_insn_data *data)
1894 {
1895 struct aarch64_insn_relocation_data *insn_reloc
1896 = (struct aarch64_insn_relocation_data *) data;
1897 int64_t new_offset
1898 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1899
1900 if (can_encode_int32 (new_offset, 16))
1901 {
1902 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1903 aarch64_register (rt, 1), new_offset);
1904 }
1905 else if (can_encode_int32 (new_offset, 28))
1906 {
1907 /* The offset is out of range for a test bit and branch
1908 instruction but not for a unconditional branch. We can use
1909 the following instructions instead:
1910
1911 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1912 B NOT_TAKEN ; Else jump over TAKEN and continue.
1913 TAKEN:
1914 B #(offset - 8)
1915 NOT_TAKEN:
1916
1917 */
1918 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1919 aarch64_register (rt, 1), 8);
1920 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1921 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1922 new_offset - 8);
1923 }
1924 }
1925
1926 /* Implementation of aarch64_insn_visitor method "adr". */
1927
1928 static void
1929 aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1930 const int is_adrp,
1931 struct aarch64_insn_data *data)
1932 {
1933 struct aarch64_insn_relocation_data *insn_reloc
1934 = (struct aarch64_insn_relocation_data *) data;
1935 /* We know exactly the address the ADR{P,} instruction will compute.
1936 We can just write it to the destination register. */
1937 CORE_ADDR address = data->insn_addr + offset;
1938
1939 if (is_adrp)
1940 {
1941 /* Clear the lower 12 bits of the offset to get the 4K page. */
1942 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1943 aarch64_register (rd, 1),
1944 address & ~0xfff);
1945 }
1946 else
1947 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1948 aarch64_register (rd, 1), address);
1949 }
1950
1951 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1952
1953 static void
1954 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1955 const unsigned rt, const int is64,
1956 struct aarch64_insn_data *data)
1957 {
1958 struct aarch64_insn_relocation_data *insn_reloc
1959 = (struct aarch64_insn_relocation_data *) data;
1960 CORE_ADDR address = data->insn_addr + offset;
1961
1962 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1963 aarch64_register (rt, 1), address);
1964
1965 /* We know exactly what address to load from, and what register we
1966 can use:
1967
1968 MOV xd, #(oldloc + offset)
1969 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1970 ...
1971
1972 LDR xd, [xd] ; or LDRSW xd, [xd]
1973
1974 */
1975
1976 if (is_sw)
1977 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1978 aarch64_register (rt, 1),
1979 aarch64_register (rt, 1),
1980 offset_memory_operand (0));
1981 else
1982 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1983 aarch64_register (rt, is64),
1984 aarch64_register (rt, 1),
1985 offset_memory_operand (0));
1986 }
1987
1988 /* Implementation of aarch64_insn_visitor method "others". */
1989
1990 static void
1991 aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1992 struct aarch64_insn_data *data)
1993 {
1994 struct aarch64_insn_relocation_data *insn_reloc
1995 = (struct aarch64_insn_relocation_data *) data;
1996
1997 /* The instruction is not PC relative. Just re-emit it at the new
1998 location. */
1999 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
2000 }
2001
2002 static const struct aarch64_insn_visitor visitor =
2003 {
2004 aarch64_ftrace_insn_reloc_b,
2005 aarch64_ftrace_insn_reloc_b_cond,
2006 aarch64_ftrace_insn_reloc_cb,
2007 aarch64_ftrace_insn_reloc_tb,
2008 aarch64_ftrace_insn_reloc_adr,
2009 aarch64_ftrace_insn_reloc_ldr_literal,
2010 aarch64_ftrace_insn_reloc_others,
2011 };
2012
2013 bool
2014 aarch64_target::supports_fast_tracepoints ()
2015 {
2016 return true;
2017 }
2018
2019 /* Implementation of target ops method
2020 "install_fast_tracepoint_jump_pad". */
2021
2022 int
2023 aarch64_target::install_fast_tracepoint_jump_pad
2024 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
2025 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
2026 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
2027 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
2028 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
2029 char *err)
2030 {
2031 uint32_t buf[256];
2032 uint32_t *p = buf;
2033 int64_t offset;
2034 int i;
2035 uint32_t insn;
2036 CORE_ADDR buildaddr = *jump_entry;
2037 struct aarch64_insn_relocation_data insn_data;
2038
2039 /* We need to save the current state on the stack both to restore it
2040 later and to collect register values when the tracepoint is hit.
2041
2042 The saved registers are pushed in a layout that needs to be in sync
2043 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
2044 the supply_fast_tracepoint_registers function will fill in the
2045 register cache from a pointer to saved registers on the stack we build
2046 here.
2047
2048 For simplicity, we set the size of each cell on the stack to 16 bytes.
2049 This way one cell can hold any register type, from system registers
2050 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
2051 has to be 16 bytes aligned anyway.
2052
2053 Note that the CPSR register does not exist on AArch64. Instead we
2054 can access system bits describing the process state with the
2055 MRS/MSR instructions, namely the condition flags. We save them as
2056 if they are part of a CPSR register because that's how GDB
2057 interprets these system bits. At the moment, only the condition
2058 flags are saved in CPSR (NZCV).
2059
2060 Stack layout, each cell is 16 bytes (descending):
2061
2062 High *-------- SIMD&FP registers from 31 down to 0. --------*
2063 | q31 |
2064 . .
2065 . . 32 cells
2066 . .
2067 | q0 |
2068 *---- General purpose registers from 30 down to 0. ----*
2069 | x30 |
2070 . .
2071 . . 31 cells
2072 . .
2073 | x0 |
2074 *------------- Special purpose registers. -------------*
2075 | SP |
2076 | PC |
2077 | CPSR (NZCV) | 5 cells
2078 | FPSR |
2079 | FPCR | <- SP + 16
2080 *------------- collecting_t object --------------------*
2081 | TPIDR_EL0 | struct tracepoint * |
2082 Low *------------------------------------------------------*
2083
2084 After this stack is set up, we issue a call to the collector, passing
2085 it the saved registers at (SP + 16). */
2086
2087 /* Push SIMD&FP registers on the stack:
2088
2089 SUB sp, sp, #(32 * 16)
2090
2091 STP q30, q31, [sp, #(30 * 16)]
2092 ...
2093 STP q0, q1, [sp]
2094
2095 */
2096 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
2097 for (i = 30; i >= 0; i -= 2)
2098 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
2099
2100 /* Push general purpose registers on the stack. Note that we do not need
2101 to push x31 as it represents the xzr register and not the stack
2102 pointer in a STR instruction.
2103
2104 SUB sp, sp, #(31 * 16)
2105
2106 STR x30, [sp, #(30 * 16)]
2107 ...
2108 STR x0, [sp]
2109
2110 */
2111 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
2112 for (i = 30; i >= 0; i -= 1)
2113 p += emit_str (p, aarch64_register (i, 1), sp,
2114 offset_memory_operand (i * 16));
2115
2116 /* Make space for 5 more cells.
2117
2118 SUB sp, sp, #(5 * 16)
2119
2120 */
2121 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
2122
2123
2124 /* Save SP:
2125
2126 ADD x4, sp, #((32 + 31 + 5) * 16)
2127 STR x4, [sp, #(4 * 16)]
2128
2129 */
2130 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
2131 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
2132
2133 /* Save PC (tracepoint address):
2134
2135 MOV x3, #(tpaddr)
2136 ...
2137
2138 STR x3, [sp, #(3 * 16)]
2139
2140 */
2141
2142 p += emit_mov_addr (p, x3, tpaddr);
2143 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
2144
2145 /* Save CPSR (NZCV), FPSR and FPCR:
2146
2147 MRS x2, nzcv
2148 MRS x1, fpsr
2149 MRS x0, fpcr
2150
2151 STR x2, [sp, #(2 * 16)]
2152 STR x1, [sp, #(1 * 16)]
2153 STR x0, [sp, #(0 * 16)]
2154
2155 */
2156 p += emit_mrs (p, x2, NZCV);
2157 p += emit_mrs (p, x1, FPSR);
2158 p += emit_mrs (p, x0, FPCR);
2159 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2160 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2161 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2162
2163 /* Push the collecting_t object. It consist of the address of the
2164 tracepoint and an ID for the current thread. We get the latter by
2165 reading the tpidr_el0 system register. It corresponds to the
2166 NT_ARM_TLS register accessible with ptrace.
2167
2168 MOV x0, #(tpoint)
2169 ...
2170
2171 MRS x1, tpidr_el0
2172
2173 STP x0, x1, [sp, #-16]!
2174
2175 */
2176
2177 p += emit_mov_addr (p, x0, tpoint);
2178 p += emit_mrs (p, x1, TPIDR_EL0);
2179 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2180
2181 /* Spin-lock:
2182
2183 The shared memory for the lock is at lockaddr. It will hold zero
2184 if no-one is holding the lock, otherwise it contains the address of
2185 the collecting_t object on the stack of the thread which acquired it.
2186
2187 At this stage, the stack pointer points to this thread's collecting_t
2188 object.
2189
2190 We use the following registers:
2191 - x0: Address of the lock.
2192 - x1: Pointer to collecting_t object.
2193 - x2: Scratch register.
2194
2195 MOV x0, #(lockaddr)
2196 ...
2197 MOV x1, sp
2198
2199 ; Trigger an event local to this core. So the following WFE
2200 ; instruction is ignored.
2201 SEVL
2202 again:
2203 ; Wait for an event. The event is triggered by either the SEVL
2204 ; or STLR instructions (store release).
2205 WFE
2206
2207 ; Atomically read at lockaddr. This marks the memory location as
2208 ; exclusive. This instruction also has memory constraints which
2209 ; make sure all previous data reads and writes are done before
2210 ; executing it.
2211 LDAXR x2, [x0]
2212
2213 ; Try again if another thread holds the lock.
2214 CBNZ x2, again
2215
2216 ; We can lock it! Write the address of the collecting_t object.
2217 ; This instruction will fail if the memory location is not marked
2218 ; as exclusive anymore. If it succeeds, it will remove the
2219 ; exclusive mark on the memory location. This way, if another
2220 ; thread executes this instruction before us, we will fail and try
2221 ; all over again.
2222 STXR w2, x1, [x0]
2223 CBNZ w2, again
2224
2225 */
2226
2227 p += emit_mov_addr (p, x0, lockaddr);
2228 p += emit_mov (p, x1, register_operand (sp));
2229
2230 p += emit_sevl (p);
2231 p += emit_wfe (p);
2232 p += emit_ldaxr (p, x2, x0);
2233 p += emit_cb (p, 1, w2, -2 * 4);
2234 p += emit_stxr (p, w2, x1, x0);
2235 p += emit_cb (p, 1, x2, -4 * 4);
2236
2237 /* Call collector (struct tracepoint *, unsigned char *):
2238
2239 MOV x0, #(tpoint)
2240 ...
2241
2242 ; Saved registers start after the collecting_t object.
2243 ADD x1, sp, #16
2244
2245 ; We use an intra-procedure-call scratch register.
2246 MOV ip0, #(collector)
2247 ...
2248
2249 ; And call back to C!
2250 BLR ip0
2251
2252 */
2253
2254 p += emit_mov_addr (p, x0, tpoint);
2255 p += emit_add (p, x1, sp, immediate_operand (16));
2256
2257 p += emit_mov_addr (p, ip0, collector);
2258 p += emit_blr (p, ip0);
2259
2260 /* Release the lock.
2261
2262 MOV x0, #(lockaddr)
2263 ...
2264
2265 ; This instruction is a normal store with memory ordering
2266 ; constraints. Thanks to this we do not have to put a data
2267 ; barrier instruction to make sure all data read and writes are done
2268 ; before this instruction is executed. Furthermore, this instruction
2269 ; will trigger an event, letting other threads know they can grab
2270 ; the lock.
2271 STLR xzr, [x0]
2272
2273 */
2274 p += emit_mov_addr (p, x0, lockaddr);
2275 p += emit_stlr (p, xzr, x0);
2276
2277 /* Free collecting_t object:
2278
2279 ADD sp, sp, #16
2280
2281 */
2282 p += emit_add (p, sp, sp, immediate_operand (16));
2283
2284 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2285 registers from the stack.
2286
2287 LDR x2, [sp, #(2 * 16)]
2288 LDR x1, [sp, #(1 * 16)]
2289 LDR x0, [sp, #(0 * 16)]
2290
2291 MSR NZCV, x2
2292 MSR FPSR, x1
2293 MSR FPCR, x0
2294
2295 ADD sp, sp #(5 * 16)
2296
2297 */
2298 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2299 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2300 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2301 p += emit_msr (p, NZCV, x2);
2302 p += emit_msr (p, FPSR, x1);
2303 p += emit_msr (p, FPCR, x0);
2304
2305 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2306
2307 /* Pop general purpose registers:
2308
2309 LDR x0, [sp]
2310 ...
2311 LDR x30, [sp, #(30 * 16)]
2312
2313 ADD sp, sp, #(31 * 16)
2314
2315 */
2316 for (i = 0; i <= 30; i += 1)
2317 p += emit_ldr (p, aarch64_register (i, 1), sp,
2318 offset_memory_operand (i * 16));
2319 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2320
2321 /* Pop SIMD&FP registers:
2322
2323 LDP q0, q1, [sp]
2324 ...
2325 LDP q30, q31, [sp, #(30 * 16)]
2326
2327 ADD sp, sp, #(32 * 16)
2328
2329 */
2330 for (i = 0; i <= 30; i += 2)
2331 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2332 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2333
2334 /* Write the code into the inferior memory. */
2335 append_insns (&buildaddr, p - buf, buf);
2336
2337 /* Now emit the relocated instruction. */
2338 *adjusted_insn_addr = buildaddr;
2339 target_read_uint32 (tpaddr, &insn);
2340
2341 insn_data.base.insn_addr = tpaddr;
2342 insn_data.new_addr = buildaddr;
2343 insn_data.insn_ptr = buf;
2344
2345 aarch64_relocate_instruction (insn, &visitor,
2346 (struct aarch64_insn_data *) &insn_data);
2347
2348 /* We may not have been able to relocate the instruction. */
2349 if (insn_data.insn_ptr == buf)
2350 {
2351 sprintf (err,
2352 "E.Could not relocate instruction from %s to %s.",
2353 core_addr_to_string_nz (tpaddr),
2354 core_addr_to_string_nz (buildaddr));
2355 return 1;
2356 }
2357 else
2358 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
2359 *adjusted_insn_addr_end = buildaddr;
2360
2361 /* Go back to the start of the buffer. */
2362 p = buf;
2363
2364 /* Emit a branch back from the jump pad. */
2365 offset = (tpaddr + orig_size - buildaddr);
2366 if (!can_encode_int32 (offset, 28))
2367 {
2368 sprintf (err,
2369 "E.Jump back from jump pad too far from tracepoint "
2370 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2371 offset);
2372 return 1;
2373 }
2374
2375 p += emit_b (p, 0, offset);
2376 append_insns (&buildaddr, p - buf, buf);
2377
2378 /* Give the caller a branch instruction into the jump pad. */
2379 offset = (*jump_entry - tpaddr);
2380 if (!can_encode_int32 (offset, 28))
2381 {
2382 sprintf (err,
2383 "E.Jump pad too far from tracepoint "
2384 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2385 offset);
2386 return 1;
2387 }
2388
2389 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2390 *jjump_pad_insn_size = 4;
2391
2392 /* Return the end address of our pad. */
2393 *jump_entry = buildaddr;
2394
2395 return 0;
2396 }
2397
2398 /* Helper function writing LEN instructions from START into
2399 current_insn_ptr. */
2400
2401 static void
2402 emit_ops_insns (const uint32_t *start, int len)
2403 {
2404 CORE_ADDR buildaddr = current_insn_ptr;
2405
2406 if (debug_threads)
2407 debug_printf ("Adding %d instrucions at %s\n",
2408 len, paddress (buildaddr));
2409
2410 append_insns (&buildaddr, len, start);
2411 current_insn_ptr = buildaddr;
2412 }
2413
2414 /* Pop a register from the stack. */
2415
2416 static int
2417 emit_pop (uint32_t *buf, struct aarch64_register rt)
2418 {
2419 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2420 }
2421
2422 /* Push a register on the stack. */
2423
2424 static int
2425 emit_push (uint32_t *buf, struct aarch64_register rt)
2426 {
2427 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2428 }
2429
2430 /* Implementation of emit_ops method "emit_prologue". */
2431
2432 static void
2433 aarch64_emit_prologue (void)
2434 {
2435 uint32_t buf[16];
2436 uint32_t *p = buf;
2437
2438 /* This function emit a prologue for the following function prototype:
2439
2440 enum eval_result_type f (unsigned char *regs,
2441 ULONGEST *value);
2442
2443 The first argument is a buffer of raw registers. The second
2444 argument is the result of
2445 evaluating the expression, which will be set to whatever is on top of
2446 the stack at the end.
2447
2448 The stack set up by the prologue is as such:
2449
2450 High *------------------------------------------------------*
2451 | LR |
2452 | FP | <- FP
2453 | x1 (ULONGEST *value) |
2454 | x0 (unsigned char *regs) |
2455 Low *------------------------------------------------------*
2456
2457 As we are implementing a stack machine, each opcode can expand the
2458 stack so we never know how far we are from the data saved by this
2459 prologue. In order to be able refer to value and regs later, we save
2460 the current stack pointer in the frame pointer. This way, it is not
2461 clobbered when calling C functions.
2462
2463 Finally, throughout every operation, we are using register x0 as the
2464 top of the stack, and x1 as a scratch register. */
2465
2466 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2467 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2468 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2469
2470 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2471
2472
2473 emit_ops_insns (buf, p - buf);
2474 }
2475
2476 /* Implementation of emit_ops method "emit_epilogue". */
2477
2478 static void
2479 aarch64_emit_epilogue (void)
2480 {
2481 uint32_t buf[16];
2482 uint32_t *p = buf;
2483
2484 /* Store the result of the expression (x0) in *value. */
2485 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2486 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2487 p += emit_str (p, x0, x1, offset_memory_operand (0));
2488
2489 /* Restore the previous state. */
2490 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2491 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2492
2493 /* Return expr_eval_no_error. */
2494 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2495 p += emit_ret (p, lr);
2496
2497 emit_ops_insns (buf, p - buf);
2498 }
2499
2500 /* Implementation of emit_ops method "emit_add". */
2501
2502 static void
2503 aarch64_emit_add (void)
2504 {
2505 uint32_t buf[16];
2506 uint32_t *p = buf;
2507
2508 p += emit_pop (p, x1);
2509 p += emit_add (p, x0, x1, register_operand (x0));
2510
2511 emit_ops_insns (buf, p - buf);
2512 }
2513
2514 /* Implementation of emit_ops method "emit_sub". */
2515
2516 static void
2517 aarch64_emit_sub (void)
2518 {
2519 uint32_t buf[16];
2520 uint32_t *p = buf;
2521
2522 p += emit_pop (p, x1);
2523 p += emit_sub (p, x0, x1, register_operand (x0));
2524
2525 emit_ops_insns (buf, p - buf);
2526 }
2527
2528 /* Implementation of emit_ops method "emit_mul". */
2529
2530 static void
2531 aarch64_emit_mul (void)
2532 {
2533 uint32_t buf[16];
2534 uint32_t *p = buf;
2535
2536 p += emit_pop (p, x1);
2537 p += emit_mul (p, x0, x1, x0);
2538
2539 emit_ops_insns (buf, p - buf);
2540 }
2541
2542 /* Implementation of emit_ops method "emit_lsh". */
2543
2544 static void
2545 aarch64_emit_lsh (void)
2546 {
2547 uint32_t buf[16];
2548 uint32_t *p = buf;
2549
2550 p += emit_pop (p, x1);
2551 p += emit_lslv (p, x0, x1, x0);
2552
2553 emit_ops_insns (buf, p - buf);
2554 }
2555
2556 /* Implementation of emit_ops method "emit_rsh_signed". */
2557
2558 static void
2559 aarch64_emit_rsh_signed (void)
2560 {
2561 uint32_t buf[16];
2562 uint32_t *p = buf;
2563
2564 p += emit_pop (p, x1);
2565 p += emit_asrv (p, x0, x1, x0);
2566
2567 emit_ops_insns (buf, p - buf);
2568 }
2569
2570 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2571
2572 static void
2573 aarch64_emit_rsh_unsigned (void)
2574 {
2575 uint32_t buf[16];
2576 uint32_t *p = buf;
2577
2578 p += emit_pop (p, x1);
2579 p += emit_lsrv (p, x0, x1, x0);
2580
2581 emit_ops_insns (buf, p - buf);
2582 }
2583
2584 /* Implementation of emit_ops method "emit_ext". */
2585
2586 static void
2587 aarch64_emit_ext (int arg)
2588 {
2589 uint32_t buf[16];
2590 uint32_t *p = buf;
2591
2592 p += emit_sbfx (p, x0, x0, 0, arg);
2593
2594 emit_ops_insns (buf, p - buf);
2595 }
2596
2597 /* Implementation of emit_ops method "emit_log_not". */
2598
2599 static void
2600 aarch64_emit_log_not (void)
2601 {
2602 uint32_t buf[16];
2603 uint32_t *p = buf;
2604
2605 /* If the top of the stack is 0, replace it with 1. Else replace it with
2606 0. */
2607
2608 p += emit_cmp (p, x0, immediate_operand (0));
2609 p += emit_cset (p, x0, EQ);
2610
2611 emit_ops_insns (buf, p - buf);
2612 }
2613
2614 /* Implementation of emit_ops method "emit_bit_and". */
2615
2616 static void
2617 aarch64_emit_bit_and (void)
2618 {
2619 uint32_t buf[16];
2620 uint32_t *p = buf;
2621
2622 p += emit_pop (p, x1);
2623 p += emit_and (p, x0, x0, x1);
2624
2625 emit_ops_insns (buf, p - buf);
2626 }
2627
2628 /* Implementation of emit_ops method "emit_bit_or". */
2629
2630 static void
2631 aarch64_emit_bit_or (void)
2632 {
2633 uint32_t buf[16];
2634 uint32_t *p = buf;
2635
2636 p += emit_pop (p, x1);
2637 p += emit_orr (p, x0, x0, x1);
2638
2639 emit_ops_insns (buf, p - buf);
2640 }
2641
2642 /* Implementation of emit_ops method "emit_bit_xor". */
2643
2644 static void
2645 aarch64_emit_bit_xor (void)
2646 {
2647 uint32_t buf[16];
2648 uint32_t *p = buf;
2649
2650 p += emit_pop (p, x1);
2651 p += emit_eor (p, x0, x0, x1);
2652
2653 emit_ops_insns (buf, p - buf);
2654 }
2655
2656 /* Implementation of emit_ops method "emit_bit_not". */
2657
2658 static void
2659 aarch64_emit_bit_not (void)
2660 {
2661 uint32_t buf[16];
2662 uint32_t *p = buf;
2663
2664 p += emit_mvn (p, x0, x0);
2665
2666 emit_ops_insns (buf, p - buf);
2667 }
2668
2669 /* Implementation of emit_ops method "emit_equal". */
2670
2671 static void
2672 aarch64_emit_equal (void)
2673 {
2674 uint32_t buf[16];
2675 uint32_t *p = buf;
2676
2677 p += emit_pop (p, x1);
2678 p += emit_cmp (p, x0, register_operand (x1));
2679 p += emit_cset (p, x0, EQ);
2680
2681 emit_ops_insns (buf, p - buf);
2682 }
2683
2684 /* Implementation of emit_ops method "emit_less_signed". */
2685
2686 static void
2687 aarch64_emit_less_signed (void)
2688 {
2689 uint32_t buf[16];
2690 uint32_t *p = buf;
2691
2692 p += emit_pop (p, x1);
2693 p += emit_cmp (p, x1, register_operand (x0));
2694 p += emit_cset (p, x0, LT);
2695
2696 emit_ops_insns (buf, p - buf);
2697 }
2698
2699 /* Implementation of emit_ops method "emit_less_unsigned". */
2700
2701 static void
2702 aarch64_emit_less_unsigned (void)
2703 {
2704 uint32_t buf[16];
2705 uint32_t *p = buf;
2706
2707 p += emit_pop (p, x1);
2708 p += emit_cmp (p, x1, register_operand (x0));
2709 p += emit_cset (p, x0, LO);
2710
2711 emit_ops_insns (buf, p - buf);
2712 }
2713
2714 /* Implementation of emit_ops method "emit_ref". */
2715
2716 static void
2717 aarch64_emit_ref (int size)
2718 {
2719 uint32_t buf[16];
2720 uint32_t *p = buf;
2721
2722 switch (size)
2723 {
2724 case 1:
2725 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2726 break;
2727 case 2:
2728 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2729 break;
2730 case 4:
2731 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2732 break;
2733 case 8:
2734 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2735 break;
2736 default:
2737 /* Unknown size, bail on compilation. */
2738 emit_error = 1;
2739 break;
2740 }
2741
2742 emit_ops_insns (buf, p - buf);
2743 }
2744
2745 /* Implementation of emit_ops method "emit_if_goto". */
2746
2747 static void
2748 aarch64_emit_if_goto (int *offset_p, int *size_p)
2749 {
2750 uint32_t buf[16];
2751 uint32_t *p = buf;
2752
2753 /* The Z flag is set or cleared here. */
2754 p += emit_cmp (p, x0, immediate_operand (0));
2755 /* This instruction must not change the Z flag. */
2756 p += emit_pop (p, x0);
2757 /* Branch over the next instruction if x0 == 0. */
2758 p += emit_bcond (p, EQ, 8);
2759
2760 /* The NOP instruction will be patched with an unconditional branch. */
2761 if (offset_p)
2762 *offset_p = (p - buf) * 4;
2763 if (size_p)
2764 *size_p = 4;
2765 p += emit_nop (p);
2766
2767 emit_ops_insns (buf, p - buf);
2768 }
2769
2770 /* Implementation of emit_ops method "emit_goto". */
2771
2772 static void
2773 aarch64_emit_goto (int *offset_p, int *size_p)
2774 {
2775 uint32_t buf[16];
2776 uint32_t *p = buf;
2777
2778 /* The NOP instruction will be patched with an unconditional branch. */
2779 if (offset_p)
2780 *offset_p = 0;
2781 if (size_p)
2782 *size_p = 4;
2783 p += emit_nop (p);
2784
2785 emit_ops_insns (buf, p - buf);
2786 }
2787
2788 /* Implementation of emit_ops method "write_goto_address". */
2789
2790 static void
2791 aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2792 {
2793 uint32_t insn;
2794
2795 emit_b (&insn, 0, to - from);
2796 append_insns (&from, 1, &insn);
2797 }
2798
2799 /* Implementation of emit_ops method "emit_const". */
2800
2801 static void
2802 aarch64_emit_const (LONGEST num)
2803 {
2804 uint32_t buf[16];
2805 uint32_t *p = buf;
2806
2807 p += emit_mov_addr (p, x0, num);
2808
2809 emit_ops_insns (buf, p - buf);
2810 }
2811
2812 /* Implementation of emit_ops method "emit_call". */
2813
2814 static void
2815 aarch64_emit_call (CORE_ADDR fn)
2816 {
2817 uint32_t buf[16];
2818 uint32_t *p = buf;
2819
2820 p += emit_mov_addr (p, ip0, fn);
2821 p += emit_blr (p, ip0);
2822
2823 emit_ops_insns (buf, p - buf);
2824 }
2825
2826 /* Implementation of emit_ops method "emit_reg". */
2827
2828 static void
2829 aarch64_emit_reg (int reg)
2830 {
2831 uint32_t buf[16];
2832 uint32_t *p = buf;
2833
2834 /* Set x0 to unsigned char *regs. */
2835 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2836 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2837 p += emit_mov (p, x1, immediate_operand (reg));
2838
2839 emit_ops_insns (buf, p - buf);
2840
2841 aarch64_emit_call (get_raw_reg_func_addr ());
2842 }
2843
2844 /* Implementation of emit_ops method "emit_pop". */
2845
2846 static void
2847 aarch64_emit_pop (void)
2848 {
2849 uint32_t buf[16];
2850 uint32_t *p = buf;
2851
2852 p += emit_pop (p, x0);
2853
2854 emit_ops_insns (buf, p - buf);
2855 }
2856
2857 /* Implementation of emit_ops method "emit_stack_flush". */
2858
2859 static void
2860 aarch64_emit_stack_flush (void)
2861 {
2862 uint32_t buf[16];
2863 uint32_t *p = buf;
2864
2865 p += emit_push (p, x0);
2866
2867 emit_ops_insns (buf, p - buf);
2868 }
2869
2870 /* Implementation of emit_ops method "emit_zero_ext". */
2871
2872 static void
2873 aarch64_emit_zero_ext (int arg)
2874 {
2875 uint32_t buf[16];
2876 uint32_t *p = buf;
2877
2878 p += emit_ubfx (p, x0, x0, 0, arg);
2879
2880 emit_ops_insns (buf, p - buf);
2881 }
2882
2883 /* Implementation of emit_ops method "emit_swap". */
2884
2885 static void
2886 aarch64_emit_swap (void)
2887 {
2888 uint32_t buf[16];
2889 uint32_t *p = buf;
2890
2891 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2892 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2893 p += emit_mov (p, x0, register_operand (x1));
2894
2895 emit_ops_insns (buf, p - buf);
2896 }
2897
2898 /* Implementation of emit_ops method "emit_stack_adjust". */
2899
2900 static void
2901 aarch64_emit_stack_adjust (int n)
2902 {
2903 /* This is not needed with our design. */
2904 uint32_t buf[16];
2905 uint32_t *p = buf;
2906
2907 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2908
2909 emit_ops_insns (buf, p - buf);
2910 }
2911
2912 /* Implementation of emit_ops method "emit_int_call_1". */
2913
2914 static void
2915 aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2916 {
2917 uint32_t buf[16];
2918 uint32_t *p = buf;
2919
2920 p += emit_mov (p, x0, immediate_operand (arg1));
2921
2922 emit_ops_insns (buf, p - buf);
2923
2924 aarch64_emit_call (fn);
2925 }
2926
2927 /* Implementation of emit_ops method "emit_void_call_2". */
2928
2929 static void
2930 aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2931 {
2932 uint32_t buf[16];
2933 uint32_t *p = buf;
2934
2935 /* Push x0 on the stack. */
2936 aarch64_emit_stack_flush ();
2937
2938 /* Setup arguments for the function call:
2939
2940 x0: arg1
2941 x1: top of the stack
2942
2943 MOV x1, x0
2944 MOV x0, #arg1 */
2945
2946 p += emit_mov (p, x1, register_operand (x0));
2947 p += emit_mov (p, x0, immediate_operand (arg1));
2948
2949 emit_ops_insns (buf, p - buf);
2950
2951 aarch64_emit_call (fn);
2952
2953 /* Restore x0. */
2954 aarch64_emit_pop ();
2955 }
2956
2957 /* Implementation of emit_ops method "emit_eq_goto". */
2958
2959 static void
2960 aarch64_emit_eq_goto (int *offset_p, int *size_p)
2961 {
2962 uint32_t buf[16];
2963 uint32_t *p = buf;
2964
2965 p += emit_pop (p, x1);
2966 p += emit_cmp (p, x1, register_operand (x0));
2967 /* Branch over the next instruction if x0 != x1. */
2968 p += emit_bcond (p, NE, 8);
2969 /* The NOP instruction will be patched with an unconditional branch. */
2970 if (offset_p)
2971 *offset_p = (p - buf) * 4;
2972 if (size_p)
2973 *size_p = 4;
2974 p += emit_nop (p);
2975
2976 emit_ops_insns (buf, p - buf);
2977 }
2978
2979 /* Implementation of emit_ops method "emit_ne_goto". */
2980
2981 static void
2982 aarch64_emit_ne_goto (int *offset_p, int *size_p)
2983 {
2984 uint32_t buf[16];
2985 uint32_t *p = buf;
2986
2987 p += emit_pop (p, x1);
2988 p += emit_cmp (p, x1, register_operand (x0));
2989 /* Branch over the next instruction if x0 == x1. */
2990 p += emit_bcond (p, EQ, 8);
2991 /* The NOP instruction will be patched with an unconditional branch. */
2992 if (offset_p)
2993 *offset_p = (p - buf) * 4;
2994 if (size_p)
2995 *size_p = 4;
2996 p += emit_nop (p);
2997
2998 emit_ops_insns (buf, p - buf);
2999 }
3000
3001 /* Implementation of emit_ops method "emit_lt_goto". */
3002
3003 static void
3004 aarch64_emit_lt_goto (int *offset_p, int *size_p)
3005 {
3006 uint32_t buf[16];
3007 uint32_t *p = buf;
3008
3009 p += emit_pop (p, x1);
3010 p += emit_cmp (p, x1, register_operand (x0));
3011 /* Branch over the next instruction if x0 >= x1. */
3012 p += emit_bcond (p, GE, 8);
3013 /* The NOP instruction will be patched with an unconditional branch. */
3014 if (offset_p)
3015 *offset_p = (p - buf) * 4;
3016 if (size_p)
3017 *size_p = 4;
3018 p += emit_nop (p);
3019
3020 emit_ops_insns (buf, p - buf);
3021 }
3022
3023 /* Implementation of emit_ops method "emit_le_goto". */
3024
3025 static void
3026 aarch64_emit_le_goto (int *offset_p, int *size_p)
3027 {
3028 uint32_t buf[16];
3029 uint32_t *p = buf;
3030
3031 p += emit_pop (p, x1);
3032 p += emit_cmp (p, x1, register_operand (x0));
3033 /* Branch over the next instruction if x0 > x1. */
3034 p += emit_bcond (p, GT, 8);
3035 /* The NOP instruction will be patched with an unconditional branch. */
3036 if (offset_p)
3037 *offset_p = (p - buf) * 4;
3038 if (size_p)
3039 *size_p = 4;
3040 p += emit_nop (p);
3041
3042 emit_ops_insns (buf, p - buf);
3043 }
3044
3045 /* Implementation of emit_ops method "emit_gt_goto". */
3046
3047 static void
3048 aarch64_emit_gt_goto (int *offset_p, int *size_p)
3049 {
3050 uint32_t buf[16];
3051 uint32_t *p = buf;
3052
3053 p += emit_pop (p, x1);
3054 p += emit_cmp (p, x1, register_operand (x0));
3055 /* Branch over the next instruction if x0 <= x1. */
3056 p += emit_bcond (p, LE, 8);
3057 /* The NOP instruction will be patched with an unconditional branch. */
3058 if (offset_p)
3059 *offset_p = (p - buf) * 4;
3060 if (size_p)
3061 *size_p = 4;
3062 p += emit_nop (p);
3063
3064 emit_ops_insns (buf, p - buf);
3065 }
3066
3067 /* Implementation of emit_ops method "emit_ge_got". */
3068
3069 static void
3070 aarch64_emit_ge_got (int *offset_p, int *size_p)
3071 {
3072 uint32_t buf[16];
3073 uint32_t *p = buf;
3074
3075 p += emit_pop (p, x1);
3076 p += emit_cmp (p, x1, register_operand (x0));
3077 /* Branch over the next instruction if x0 <= x1. */
3078 p += emit_bcond (p, LT, 8);
3079 /* The NOP instruction will be patched with an unconditional branch. */
3080 if (offset_p)
3081 *offset_p = (p - buf) * 4;
3082 if (size_p)
3083 *size_p = 4;
3084 p += emit_nop (p);
3085
3086 emit_ops_insns (buf, p - buf);
3087 }
3088
3089 static struct emit_ops aarch64_emit_ops_impl =
3090 {
3091 aarch64_emit_prologue,
3092 aarch64_emit_epilogue,
3093 aarch64_emit_add,
3094 aarch64_emit_sub,
3095 aarch64_emit_mul,
3096 aarch64_emit_lsh,
3097 aarch64_emit_rsh_signed,
3098 aarch64_emit_rsh_unsigned,
3099 aarch64_emit_ext,
3100 aarch64_emit_log_not,
3101 aarch64_emit_bit_and,
3102 aarch64_emit_bit_or,
3103 aarch64_emit_bit_xor,
3104 aarch64_emit_bit_not,
3105 aarch64_emit_equal,
3106 aarch64_emit_less_signed,
3107 aarch64_emit_less_unsigned,
3108 aarch64_emit_ref,
3109 aarch64_emit_if_goto,
3110 aarch64_emit_goto,
3111 aarch64_write_goto_address,
3112 aarch64_emit_const,
3113 aarch64_emit_call,
3114 aarch64_emit_reg,
3115 aarch64_emit_pop,
3116 aarch64_emit_stack_flush,
3117 aarch64_emit_zero_ext,
3118 aarch64_emit_swap,
3119 aarch64_emit_stack_adjust,
3120 aarch64_emit_int_call_1,
3121 aarch64_emit_void_call_2,
3122 aarch64_emit_eq_goto,
3123 aarch64_emit_ne_goto,
3124 aarch64_emit_lt_goto,
3125 aarch64_emit_le_goto,
3126 aarch64_emit_gt_goto,
3127 aarch64_emit_ge_got,
3128 };
3129
3130 /* Implementation of target ops method "emit_ops". */
3131
3132 emit_ops *
3133 aarch64_target::emit_ops ()
3134 {
3135 return &aarch64_emit_ops_impl;
3136 }
3137
3138 /* Implementation of target ops method
3139 "get_min_fast_tracepoint_insn_len". */
3140
3141 int
3142 aarch64_target::get_min_fast_tracepoint_insn_len ()
3143 {
3144 return 4;
3145 }
3146
3147 /* Implementation of linux target ops method "low_supports_range_stepping". */
3148
3149 bool
3150 aarch64_target::low_supports_range_stepping ()
3151 {
3152 return true;
3153 }
3154
3155 /* Implementation of target ops method "sw_breakpoint_from_kind". */
3156
3157 const gdb_byte *
3158 aarch64_target::sw_breakpoint_from_kind (int kind, int *size)
3159 {
3160 if (is_64bit_tdesc ())
3161 {
3162 *size = aarch64_breakpoint_len;
3163 return aarch64_breakpoint;
3164 }
3165 else
3166 return arm_sw_breakpoint_from_kind (kind, size);
3167 }
3168
3169 /* Implementation of target ops method "breakpoint_kind_from_pc". */
3170
3171 int
3172 aarch64_target::breakpoint_kind_from_pc (CORE_ADDR *pcptr)
3173 {
3174 if (is_64bit_tdesc ())
3175 return aarch64_breakpoint_len;
3176 else
3177 return arm_breakpoint_kind_from_pc (pcptr);
3178 }
3179
3180 /* Implementation of the target ops method
3181 "breakpoint_kind_from_current_state". */
3182
3183 int
3184 aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
3185 {
3186 if (is_64bit_tdesc ())
3187 return aarch64_breakpoint_len;
3188 else
3189 return arm_breakpoint_kind_from_current_state (pcptr);
3190 }
3191
3192 /* The linux target ops object. */
3193
3194 linux_process_target *the_linux_target = &the_aarch64_target;
3195
3196 void
3197 initialize_low_arch (void)
3198 {
3199 initialize_low_arch_aarch32 ();
3200
3201 initialize_regsets_info (&aarch64_regsets_info);
3202 initialize_regsets_info (&aarch64_sve_regsets_info);
3203 }
This page took 0.142726 seconds and 4 git commands to generate.