gdbserver/linux-low: turn 'emit_ops' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-aarch64-low.cc
1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "server.h"
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
29 #include "ax.h"
30 #include "tracepoint.h"
31 #include "debug.h"
32
33 #include <signal.h>
34 #include <sys/user.h>
35 #include "nat/gdb_ptrace.h"
36 #include <asm/ptrace.h>
37 #include <inttypes.h>
38 #include <endian.h>
39 #include <sys/uio.h>
40
41 #include "gdb_proc_service.h"
42 #include "arch/aarch64.h"
43 #include "linux-aarch32-tdesc.h"
44 #include "linux-aarch64-tdesc.h"
45 #include "nat/aarch64-sve-linux-ptrace.h"
46 #include "tdesc.h"
47
48 #ifdef HAVE_SYS_REG_H
49 #include <sys/reg.h>
50 #endif
51
52 /* Linux target op definitions for the AArch64 architecture. */
53
54 class aarch64_target : public linux_process_target
55 {
56 public:
57
58 const regs_info *get_regs_info () override;
59
60 int breakpoint_kind_from_pc (CORE_ADDR *pcptr) override;
61
62 int breakpoint_kind_from_current_state (CORE_ADDR *pcptr) override;
63
64 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
65
66 bool supports_z_point_type (char z_type) override;
67
68 bool supports_tracepoints () override;
69
70 bool supports_fast_tracepoints () override;
71
72 int install_fast_tracepoint_jump_pad
73 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
74 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
75 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
76 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
77 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
78 char *err) override;
79
80 int get_min_fast_tracepoint_insn_len () override;
81
82 struct emit_ops *emit_ops () override;
83
84 protected:
85
86 void low_arch_setup () override;
87
88 bool low_cannot_fetch_register (int regno) override;
89
90 bool low_cannot_store_register (int regno) override;
91
92 bool low_supports_breakpoints () override;
93
94 CORE_ADDR low_get_pc (regcache *regcache) override;
95
96 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
97
98 bool low_breakpoint_at (CORE_ADDR pc) override;
99
100 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
101 int size, raw_breakpoint *bp) override;
102
103 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
104 int size, raw_breakpoint *bp) override;
105
106 bool low_stopped_by_watchpoint () override;
107
108 CORE_ADDR low_stopped_data_address () override;
109
110 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
111 int direction) override;
112
113 arch_process_info *low_new_process () override;
114
115 void low_delete_process (arch_process_info *info) override;
116
117 void low_new_thread (lwp_info *) override;
118
119 void low_delete_thread (arch_lwp_info *) override;
120
121 void low_new_fork (process_info *parent, process_info *child) override;
122
123 void low_prepare_to_resume (lwp_info *lwp) override;
124
125 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
126 };
127
128 /* The singleton target ops object. */
129
130 static aarch64_target the_aarch64_target;
131
132 bool
133 aarch64_target::low_cannot_fetch_register (int regno)
134 {
135 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
136 "is not implemented by the target");
137 }
138
139 bool
140 aarch64_target::low_cannot_store_register (int regno)
141 {
142 gdb_assert_not_reached ("linux target op low_cannot_store_register "
143 "is not implemented by the target");
144 }
145
146 void
147 aarch64_target::low_prepare_to_resume (lwp_info *lwp)
148 {
149 aarch64_linux_prepare_to_resume (lwp);
150 }
151
152 /* Per-process arch-specific data we want to keep. */
153
154 struct arch_process_info
155 {
156 /* Hardware breakpoint/watchpoint data.
157 The reason for them to be per-process rather than per-thread is
158 due to the lack of information in the gdbserver environment;
159 gdbserver is not told that whether a requested hardware
160 breakpoint/watchpoint is thread specific or not, so it has to set
161 each hw bp/wp for every thread in the current process. The
162 higher level bp/wp management in gdb will resume a thread if a hw
163 bp/wp trap is not expected for it. Since the hw bp/wp setting is
164 same for each thread, it is reasonable for the data to live here.
165 */
166 struct aarch64_debug_reg_state debug_reg_state;
167 };
168
169 /* Return true if the size of register 0 is 8 byte. */
170
171 static int
172 is_64bit_tdesc (void)
173 {
174 struct regcache *regcache = get_thread_regcache (current_thread, 0);
175
176 return register_size (regcache->tdesc, 0) == 8;
177 }
178
179 /* Return true if the regcache contains the number of SVE registers. */
180
181 static bool
182 is_sve_tdesc (void)
183 {
184 struct regcache *regcache = get_thread_regcache (current_thread, 0);
185
186 return tdesc_contains_feature (regcache->tdesc, "org.gnu.gdb.aarch64.sve");
187 }
188
189 static void
190 aarch64_fill_gregset (struct regcache *regcache, void *buf)
191 {
192 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
193 int i;
194
195 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
196 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
197 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
198 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
199 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
200 }
201
202 static void
203 aarch64_store_gregset (struct regcache *regcache, const void *buf)
204 {
205 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
206 int i;
207
208 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
209 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
210 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
211 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
212 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
213 }
214
215 static void
216 aarch64_fill_fpregset (struct regcache *regcache, void *buf)
217 {
218 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
219 int i;
220
221 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
222 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
223 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
224 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
225 }
226
227 static void
228 aarch64_store_fpregset (struct regcache *regcache, const void *buf)
229 {
230 const struct user_fpsimd_state *regset
231 = (const struct user_fpsimd_state *) buf;
232 int i;
233
234 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
235 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
236 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
237 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
238 }
239
240 /* Store the pauth registers to regcache. */
241
242 static void
243 aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
244 {
245 uint64_t *pauth_regset = (uint64_t *) buf;
246 int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
247
248 if (pauth_base == 0)
249 return;
250
251 supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
252 &pauth_regset[0]);
253 supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
254 &pauth_regset[1]);
255 }
256
257 bool
258 aarch64_target::low_supports_breakpoints ()
259 {
260 return true;
261 }
262
263 /* Implementation of linux target ops method "low_get_pc". */
264
265 CORE_ADDR
266 aarch64_target::low_get_pc (regcache *regcache)
267 {
268 if (register_size (regcache->tdesc, 0) == 8)
269 return linux_get_pc_64bit (regcache);
270 else
271 return linux_get_pc_32bit (regcache);
272 }
273
274 /* Implementation of linux target ops method "low_set_pc". */
275
276 void
277 aarch64_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
278 {
279 if (register_size (regcache->tdesc, 0) == 8)
280 linux_set_pc_64bit (regcache, pc);
281 else
282 linux_set_pc_32bit (regcache, pc);
283 }
284
285 #define aarch64_breakpoint_len 4
286
287 /* AArch64 BRK software debug mode instruction.
288 This instruction needs to match gdb/aarch64-tdep.c
289 (aarch64_default_breakpoint). */
290 static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
291
292 /* Implementation of linux target ops method "low_breakpoint_at". */
293
294 bool
295 aarch64_target::low_breakpoint_at (CORE_ADDR where)
296 {
297 if (is_64bit_tdesc ())
298 {
299 gdb_byte insn[aarch64_breakpoint_len];
300
301 read_memory (where, (unsigned char *) &insn, aarch64_breakpoint_len);
302 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
303 return true;
304
305 return false;
306 }
307 else
308 return arm_breakpoint_at (where);
309 }
310
311 static void
312 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
313 {
314 int i;
315
316 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
317 {
318 state->dr_addr_bp[i] = 0;
319 state->dr_ctrl_bp[i] = 0;
320 state->dr_ref_count_bp[i] = 0;
321 }
322
323 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
324 {
325 state->dr_addr_wp[i] = 0;
326 state->dr_ctrl_wp[i] = 0;
327 state->dr_ref_count_wp[i] = 0;
328 }
329 }
330
331 /* Return the pointer to the debug register state structure in the
332 current process' arch-specific data area. */
333
334 struct aarch64_debug_reg_state *
335 aarch64_get_debug_reg_state (pid_t pid)
336 {
337 struct process_info *proc = find_process_pid (pid);
338
339 return &proc->priv->arch_private->debug_reg_state;
340 }
341
342 /* Implementation of target ops method "supports_z_point_type". */
343
344 bool
345 aarch64_target::supports_z_point_type (char z_type)
346 {
347 switch (z_type)
348 {
349 case Z_PACKET_SW_BP:
350 case Z_PACKET_HW_BP:
351 case Z_PACKET_WRITE_WP:
352 case Z_PACKET_READ_WP:
353 case Z_PACKET_ACCESS_WP:
354 return true;
355 default:
356 return false;
357 }
358 }
359
360 /* Implementation of linux target ops method "low_insert_point".
361
362 It actually only records the info of the to-be-inserted bp/wp;
363 the actual insertion will happen when threads are resumed. */
364
365 int
366 aarch64_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
367 int len, raw_breakpoint *bp)
368 {
369 int ret;
370 enum target_hw_bp_type targ_type;
371 struct aarch64_debug_reg_state *state
372 = aarch64_get_debug_reg_state (pid_of (current_thread));
373
374 if (show_debug_regs)
375 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
376 (unsigned long) addr, len);
377
378 /* Determine the type from the raw breakpoint type. */
379 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
380
381 if (targ_type != hw_execute)
382 {
383 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
384 ret = aarch64_handle_watchpoint (targ_type, addr, len,
385 1 /* is_insert */, state);
386 else
387 ret = -1;
388 }
389 else
390 {
391 if (len == 3)
392 {
393 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
394 instruction. Set it to 2 to correctly encode length bit
395 mask in hardware/watchpoint control register. */
396 len = 2;
397 }
398 ret = aarch64_handle_breakpoint (targ_type, addr, len,
399 1 /* is_insert */, state);
400 }
401
402 if (show_debug_regs)
403 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
404 targ_type);
405
406 return ret;
407 }
408
409 /* Implementation of linux target ops method "low_remove_point".
410
411 It actually only records the info of the to-be-removed bp/wp,
412 the actual removal will be done when threads are resumed. */
413
414 int
415 aarch64_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
416 int len, raw_breakpoint *bp)
417 {
418 int ret;
419 enum target_hw_bp_type targ_type;
420 struct aarch64_debug_reg_state *state
421 = aarch64_get_debug_reg_state (pid_of (current_thread));
422
423 if (show_debug_regs)
424 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
425 (unsigned long) addr, len);
426
427 /* Determine the type from the raw breakpoint type. */
428 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
429
430 /* Set up state pointers. */
431 if (targ_type != hw_execute)
432 ret =
433 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
434 state);
435 else
436 {
437 if (len == 3)
438 {
439 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
440 instruction. Set it to 2 to correctly encode length bit
441 mask in hardware/watchpoint control register. */
442 len = 2;
443 }
444 ret = aarch64_handle_breakpoint (targ_type, addr, len,
445 0 /* is_insert */, state);
446 }
447
448 if (show_debug_regs)
449 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
450 targ_type);
451
452 return ret;
453 }
454
455 /* Implementation of linux target ops method "low_stopped_data_address". */
456
457 CORE_ADDR
458 aarch64_target::low_stopped_data_address ()
459 {
460 siginfo_t siginfo;
461 int pid, i;
462 struct aarch64_debug_reg_state *state;
463
464 pid = lwpid_of (current_thread);
465
466 /* Get the siginfo. */
467 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
468 return (CORE_ADDR) 0;
469
470 /* Need to be a hardware breakpoint/watchpoint trap. */
471 if (siginfo.si_signo != SIGTRAP
472 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
473 return (CORE_ADDR) 0;
474
475 /* Check if the address matches any watched address. */
476 state = aarch64_get_debug_reg_state (pid_of (current_thread));
477 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
478 {
479 const unsigned int offset
480 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
481 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
482 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
483 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
484 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
485 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
486
487 if (state->dr_ref_count_wp[i]
488 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
489 && addr_trap >= addr_watch_aligned
490 && addr_trap < addr_watch + len)
491 {
492 /* ADDR_TRAP reports the first address of the memory range
493 accessed by the CPU, regardless of what was the memory
494 range watched. Thus, a large CPU access that straddles
495 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
496 ADDR_TRAP that is lower than the
497 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
498
499 addr: | 4 | 5 | 6 | 7 | 8 |
500 |---- range watched ----|
501 |----------- range accessed ------------|
502
503 In this case, ADDR_TRAP will be 4.
504
505 To match a watchpoint known to GDB core, we must never
506 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
507 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
508 positive on kernels older than 4.10. See PR
509 external/20207. */
510 return addr_orig;
511 }
512 }
513
514 return (CORE_ADDR) 0;
515 }
516
517 /* Implementation of linux target ops method "low_stopped_by_watchpoint". */
518
519 bool
520 aarch64_target::low_stopped_by_watchpoint ()
521 {
522 return (low_stopped_data_address () != 0);
523 }
524
525 /* Fetch the thread-local storage pointer for libthread_db. */
526
527 ps_err_e
528 ps_get_thread_area (struct ps_prochandle *ph,
529 lwpid_t lwpid, int idx, void **base)
530 {
531 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
532 is_64bit_tdesc ());
533 }
534
535 /* Implementation of linux target ops method "low_siginfo_fixup". */
536
537 bool
538 aarch64_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
539 int direction)
540 {
541 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
542 if (!is_64bit_tdesc ())
543 {
544 if (direction == 0)
545 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
546 native);
547 else
548 aarch64_siginfo_from_compat_siginfo (native,
549 (struct compat_siginfo *) inf);
550
551 return true;
552 }
553
554 return false;
555 }
556
557 /* Implementation of linux target ops method "low_new_process". */
558
559 arch_process_info *
560 aarch64_target::low_new_process ()
561 {
562 struct arch_process_info *info = XCNEW (struct arch_process_info);
563
564 aarch64_init_debug_reg_state (&info->debug_reg_state);
565
566 return info;
567 }
568
569 /* Implementation of linux target ops method "low_delete_process". */
570
571 void
572 aarch64_target::low_delete_process (arch_process_info *info)
573 {
574 xfree (info);
575 }
576
577 void
578 aarch64_target::low_new_thread (lwp_info *lwp)
579 {
580 aarch64_linux_new_thread (lwp);
581 }
582
583 void
584 aarch64_target::low_delete_thread (arch_lwp_info *arch_lwp)
585 {
586 aarch64_linux_delete_thread (arch_lwp);
587 }
588
589 /* Implementation of linux target ops method "low_new_fork". */
590
591 void
592 aarch64_target::low_new_fork (process_info *parent,
593 process_info *child)
594 {
595 /* These are allocated by linux_add_process. */
596 gdb_assert (parent->priv != NULL
597 && parent->priv->arch_private != NULL);
598 gdb_assert (child->priv != NULL
599 && child->priv->arch_private != NULL);
600
601 /* Linux kernel before 2.6.33 commit
602 72f674d203cd230426437cdcf7dd6f681dad8b0d
603 will inherit hardware debug registers from parent
604 on fork/vfork/clone. Newer Linux kernels create such tasks with
605 zeroed debug registers.
606
607 GDB core assumes the child inherits the watchpoints/hw
608 breakpoints of the parent, and will remove them all from the
609 forked off process. Copy the debug registers mirrors into the
610 new process so that all breakpoints and watchpoints can be
611 removed together. The debug registers mirror will become zeroed
612 in the end before detaching the forked off process, thus making
613 this compatible with older Linux kernels too. */
614
615 *child->priv->arch_private = *parent->priv->arch_private;
616 }
617
618 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
619 #define AARCH64_HWCAP_PACA (1 << 30)
620
621 /* Implementation of linux target ops method "low_arch_setup". */
622
623 void
624 aarch64_target::low_arch_setup ()
625 {
626 unsigned int machine;
627 int is_elf64;
628 int tid;
629
630 tid = lwpid_of (current_thread);
631
632 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
633
634 if (is_elf64)
635 {
636 uint64_t vq = aarch64_sve_get_vq (tid);
637 unsigned long hwcap = linux_get_hwcap (8);
638 bool pauth_p = hwcap & AARCH64_HWCAP_PACA;
639
640 current_process ()->tdesc = aarch64_linux_read_description (vq, pauth_p);
641 }
642 else
643 current_process ()->tdesc = aarch32_linux_read_description ();
644
645 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
646 }
647
648 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
649
650 static void
651 aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
652 {
653 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
654 }
655
656 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
657
658 static void
659 aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
660 {
661 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
662 }
663
664 static struct regset_info aarch64_regsets[] =
665 {
666 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
667 sizeof (struct user_pt_regs), GENERAL_REGS,
668 aarch64_fill_gregset, aarch64_store_gregset },
669 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
670 sizeof (struct user_fpsimd_state), FP_REGS,
671 aarch64_fill_fpregset, aarch64_store_fpregset
672 },
673 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
674 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
675 NULL, aarch64_store_pauthregset },
676 NULL_REGSET
677 };
678
679 static struct regsets_info aarch64_regsets_info =
680 {
681 aarch64_regsets, /* regsets */
682 0, /* num_regsets */
683 NULL, /* disabled_regsets */
684 };
685
686 static struct regs_info regs_info_aarch64 =
687 {
688 NULL, /* regset_bitmap */
689 NULL, /* usrregs */
690 &aarch64_regsets_info,
691 };
692
693 static struct regset_info aarch64_sve_regsets[] =
694 {
695 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
696 sizeof (struct user_pt_regs), GENERAL_REGS,
697 aarch64_fill_gregset, aarch64_store_gregset },
698 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
699 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
700 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
701 },
702 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
703 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
704 NULL, aarch64_store_pauthregset },
705 NULL_REGSET
706 };
707
708 static struct regsets_info aarch64_sve_regsets_info =
709 {
710 aarch64_sve_regsets, /* regsets. */
711 0, /* num_regsets. */
712 NULL, /* disabled_regsets. */
713 };
714
715 static struct regs_info regs_info_aarch64_sve =
716 {
717 NULL, /* regset_bitmap. */
718 NULL, /* usrregs. */
719 &aarch64_sve_regsets_info,
720 };
721
722 /* Implementation of linux target ops method "get_regs_info". */
723
724 const regs_info *
725 aarch64_target::get_regs_info ()
726 {
727 if (!is_64bit_tdesc ())
728 return &regs_info_aarch32;
729
730 if (is_sve_tdesc ())
731 return &regs_info_aarch64_sve;
732
733 return &regs_info_aarch64;
734 }
735
736 /* Implementation of target ops method "supports_tracepoints". */
737
738 bool
739 aarch64_target::supports_tracepoints ()
740 {
741 if (current_thread == NULL)
742 return true;
743 else
744 {
745 /* We don't support tracepoints on aarch32 now. */
746 return is_64bit_tdesc ();
747 }
748 }
749
750 /* Implementation of linux target ops method "low_get_thread_area". */
751
752 int
753 aarch64_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
754 {
755 struct iovec iovec;
756 uint64_t reg;
757
758 iovec.iov_base = &reg;
759 iovec.iov_len = sizeof (reg);
760
761 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
762 return -1;
763
764 *addrp = reg;
765
766 return 0;
767 }
768
769 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
770
771 static void
772 aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
773 {
774 int use_64bit = register_size (regcache->tdesc, 0) == 8;
775
776 if (use_64bit)
777 {
778 long l_sysno;
779
780 collect_register_by_name (regcache, "x8", &l_sysno);
781 *sysno = (int) l_sysno;
782 }
783 else
784 collect_register_by_name (regcache, "r7", sysno);
785 }
786
787 /* List of condition codes that we need. */
788
789 enum aarch64_condition_codes
790 {
791 EQ = 0x0,
792 NE = 0x1,
793 LO = 0x3,
794 GE = 0xa,
795 LT = 0xb,
796 GT = 0xc,
797 LE = 0xd,
798 };
799
800 enum aarch64_operand_type
801 {
802 OPERAND_IMMEDIATE,
803 OPERAND_REGISTER,
804 };
805
806 /* Representation of an operand. At this time, it only supports register
807 and immediate types. */
808
809 struct aarch64_operand
810 {
811 /* Type of the operand. */
812 enum aarch64_operand_type type;
813
814 /* Value of the operand according to the type. */
815 union
816 {
817 uint32_t imm;
818 struct aarch64_register reg;
819 };
820 };
821
822 /* List of registers that we are currently using, we can add more here as
823 we need to use them. */
824
825 /* General purpose scratch registers (64 bit). */
826 static const struct aarch64_register x0 = { 0, 1 };
827 static const struct aarch64_register x1 = { 1, 1 };
828 static const struct aarch64_register x2 = { 2, 1 };
829 static const struct aarch64_register x3 = { 3, 1 };
830 static const struct aarch64_register x4 = { 4, 1 };
831
832 /* General purpose scratch registers (32 bit). */
833 static const struct aarch64_register w0 = { 0, 0 };
834 static const struct aarch64_register w2 = { 2, 0 };
835
836 /* Intra-procedure scratch registers. */
837 static const struct aarch64_register ip0 = { 16, 1 };
838
839 /* Special purpose registers. */
840 static const struct aarch64_register fp = { 29, 1 };
841 static const struct aarch64_register lr = { 30, 1 };
842 static const struct aarch64_register sp = { 31, 1 };
843 static const struct aarch64_register xzr = { 31, 1 };
844
845 /* Dynamically allocate a new register. If we know the register
846 statically, we should make it a global as above instead of using this
847 helper function. */
848
849 static struct aarch64_register
850 aarch64_register (unsigned num, int is64)
851 {
852 return (struct aarch64_register) { num, is64 };
853 }
854
855 /* Helper function to create a register operand, for instructions with
856 different types of operands.
857
858 For example:
859 p += emit_mov (p, x0, register_operand (x1)); */
860
861 static struct aarch64_operand
862 register_operand (struct aarch64_register reg)
863 {
864 struct aarch64_operand operand;
865
866 operand.type = OPERAND_REGISTER;
867 operand.reg = reg;
868
869 return operand;
870 }
871
872 /* Helper function to create an immediate operand, for instructions with
873 different types of operands.
874
875 For example:
876 p += emit_mov (p, x0, immediate_operand (12)); */
877
878 static struct aarch64_operand
879 immediate_operand (uint32_t imm)
880 {
881 struct aarch64_operand operand;
882
883 operand.type = OPERAND_IMMEDIATE;
884 operand.imm = imm;
885
886 return operand;
887 }
888
889 /* Helper function to create an offset memory operand.
890
891 For example:
892 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
893
894 static struct aarch64_memory_operand
895 offset_memory_operand (int32_t offset)
896 {
897 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
898 }
899
900 /* Helper function to create a pre-index memory operand.
901
902 For example:
903 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
904
905 static struct aarch64_memory_operand
906 preindex_memory_operand (int32_t index)
907 {
908 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
909 }
910
911 /* Helper function to create a post-index memory operand.
912
913 For example:
914 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
915
916 static struct aarch64_memory_operand
917 postindex_memory_operand (int32_t index)
918 {
919 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
920 }
921
922 /* System control registers. These special registers can be written and
923 read with the MRS and MSR instructions.
924
925 - NZCV: Condition flags. GDB refers to this register under the CPSR
926 name.
927 - FPSR: Floating-point status register.
928 - FPCR: Floating-point control registers.
929 - TPIDR_EL0: Software thread ID register. */
930
931 enum aarch64_system_control_registers
932 {
933 /* op0 op1 crn crm op2 */
934 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
935 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
936 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
937 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
938 };
939
940 /* Write a BLR instruction into *BUF.
941
942 BLR rn
943
944 RN is the register to branch to. */
945
946 static int
947 emit_blr (uint32_t *buf, struct aarch64_register rn)
948 {
949 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
950 }
951
952 /* Write a RET instruction into *BUF.
953
954 RET xn
955
956 RN is the register to branch to. */
957
958 static int
959 emit_ret (uint32_t *buf, struct aarch64_register rn)
960 {
961 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
962 }
963
964 static int
965 emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
966 struct aarch64_register rt,
967 struct aarch64_register rt2,
968 struct aarch64_register rn,
969 struct aarch64_memory_operand operand)
970 {
971 uint32_t opc;
972 uint32_t pre_index;
973 uint32_t write_back;
974
975 if (rt.is64)
976 opc = ENCODE (2, 2, 30);
977 else
978 opc = ENCODE (0, 2, 30);
979
980 switch (operand.type)
981 {
982 case MEMORY_OPERAND_OFFSET:
983 {
984 pre_index = ENCODE (1, 1, 24);
985 write_back = ENCODE (0, 1, 23);
986 break;
987 }
988 case MEMORY_OPERAND_POSTINDEX:
989 {
990 pre_index = ENCODE (0, 1, 24);
991 write_back = ENCODE (1, 1, 23);
992 break;
993 }
994 case MEMORY_OPERAND_PREINDEX:
995 {
996 pre_index = ENCODE (1, 1, 24);
997 write_back = ENCODE (1, 1, 23);
998 break;
999 }
1000 default:
1001 return 0;
1002 }
1003
1004 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
1005 | ENCODE (operand.index >> 3, 7, 15)
1006 | ENCODE (rt2.num, 5, 10)
1007 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
1008 }
1009
1010 /* Write a STP instruction into *BUF.
1011
1012 STP rt, rt2, [rn, #offset]
1013 STP rt, rt2, [rn, #index]!
1014 STP rt, rt2, [rn], #index
1015
1016 RT and RT2 are the registers to store.
1017 RN is the base address register.
1018 OFFSET is the immediate to add to the base address. It is limited to a
1019 -512 .. 504 range (7 bits << 3). */
1020
1021 static int
1022 emit_stp (uint32_t *buf, struct aarch64_register rt,
1023 struct aarch64_register rt2, struct aarch64_register rn,
1024 struct aarch64_memory_operand operand)
1025 {
1026 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
1027 }
1028
1029 /* Write a LDP instruction into *BUF.
1030
1031 LDP rt, rt2, [rn, #offset]
1032 LDP rt, rt2, [rn, #index]!
1033 LDP rt, rt2, [rn], #index
1034
1035 RT and RT2 are the registers to store.
1036 RN is the base address register.
1037 OFFSET is the immediate to add to the base address. It is limited to a
1038 -512 .. 504 range (7 bits << 3). */
1039
1040 static int
1041 emit_ldp (uint32_t *buf, struct aarch64_register rt,
1042 struct aarch64_register rt2, struct aarch64_register rn,
1043 struct aarch64_memory_operand operand)
1044 {
1045 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
1046 }
1047
1048 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
1049
1050 LDP qt, qt2, [rn, #offset]
1051
1052 RT and RT2 are the Q registers to store.
1053 RN is the base address register.
1054 OFFSET is the immediate to add to the base address. It is limited to
1055 -1024 .. 1008 range (7 bits << 4). */
1056
1057 static int
1058 emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1059 struct aarch64_register rn, int32_t offset)
1060 {
1061 uint32_t opc = ENCODE (2, 2, 30);
1062 uint32_t pre_index = ENCODE (1, 1, 24);
1063
1064 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
1065 | ENCODE (offset >> 4, 7, 15)
1066 | ENCODE (rt2, 5, 10)
1067 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
1068 }
1069
1070 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1071
1072 STP qt, qt2, [rn, #offset]
1073
1074 RT and RT2 are the Q registers to store.
1075 RN is the base address register.
1076 OFFSET is the immediate to add to the base address. It is limited to
1077 -1024 .. 1008 range (7 bits << 4). */
1078
1079 static int
1080 emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1081 struct aarch64_register rn, int32_t offset)
1082 {
1083 uint32_t opc = ENCODE (2, 2, 30);
1084 uint32_t pre_index = ENCODE (1, 1, 24);
1085
1086 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
1087 | ENCODE (offset >> 4, 7, 15)
1088 | ENCODE (rt2, 5, 10)
1089 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
1090 }
1091
1092 /* Write a LDRH instruction into *BUF.
1093
1094 LDRH wt, [xn, #offset]
1095 LDRH wt, [xn, #index]!
1096 LDRH wt, [xn], #index
1097
1098 RT is the register to store.
1099 RN is the base address register.
1100 OFFSET is the immediate to add to the base address. It is limited to
1101 0 .. 32760 range (12 bits << 3). */
1102
1103 static int
1104 emit_ldrh (uint32_t *buf, struct aarch64_register rt,
1105 struct aarch64_register rn,
1106 struct aarch64_memory_operand operand)
1107 {
1108 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
1109 }
1110
1111 /* Write a LDRB instruction into *BUF.
1112
1113 LDRB wt, [xn, #offset]
1114 LDRB wt, [xn, #index]!
1115 LDRB wt, [xn], #index
1116
1117 RT is the register to store.
1118 RN is the base address register.
1119 OFFSET is the immediate to add to the base address. It is limited to
1120 0 .. 32760 range (12 bits << 3). */
1121
1122 static int
1123 emit_ldrb (uint32_t *buf, struct aarch64_register rt,
1124 struct aarch64_register rn,
1125 struct aarch64_memory_operand operand)
1126 {
1127 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
1128 }
1129
1130
1131
1132 /* Write a STR instruction into *BUF.
1133
1134 STR rt, [rn, #offset]
1135 STR rt, [rn, #index]!
1136 STR rt, [rn], #index
1137
1138 RT is the register to store.
1139 RN is the base address register.
1140 OFFSET is the immediate to add to the base address. It is limited to
1141 0 .. 32760 range (12 bits << 3). */
1142
1143 static int
1144 emit_str (uint32_t *buf, struct aarch64_register rt,
1145 struct aarch64_register rn,
1146 struct aarch64_memory_operand operand)
1147 {
1148 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
1149 }
1150
1151 /* Helper function emitting an exclusive load or store instruction. */
1152
1153 static int
1154 emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1155 enum aarch64_opcodes opcode,
1156 struct aarch64_register rs,
1157 struct aarch64_register rt,
1158 struct aarch64_register rt2,
1159 struct aarch64_register rn)
1160 {
1161 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1162 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1163 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
1164 }
1165
1166 /* Write a LAXR instruction into *BUF.
1167
1168 LDAXR rt, [xn]
1169
1170 RT is the destination register.
1171 RN is the base address register. */
1172
1173 static int
1174 emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1175 struct aarch64_register rn)
1176 {
1177 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1178 xzr, rn);
1179 }
1180
1181 /* Write a STXR instruction into *BUF.
1182
1183 STXR ws, rt, [xn]
1184
1185 RS is the result register, it indicates if the store succeeded or not.
1186 RT is the destination register.
1187 RN is the base address register. */
1188
1189 static int
1190 emit_stxr (uint32_t *buf, struct aarch64_register rs,
1191 struct aarch64_register rt, struct aarch64_register rn)
1192 {
1193 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1194 xzr, rn);
1195 }
1196
1197 /* Write a STLR instruction into *BUF.
1198
1199 STLR rt, [xn]
1200
1201 RT is the register to store.
1202 RN is the base address register. */
1203
1204 static int
1205 emit_stlr (uint32_t *buf, struct aarch64_register rt,
1206 struct aarch64_register rn)
1207 {
1208 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1209 xzr, rn);
1210 }
1211
1212 /* Helper function for data processing instructions with register sources. */
1213
1214 static int
1215 emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
1216 struct aarch64_register rd,
1217 struct aarch64_register rn,
1218 struct aarch64_register rm)
1219 {
1220 uint32_t size = ENCODE (rd.is64, 1, 31);
1221
1222 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1223 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
1224 }
1225
1226 /* Helper function for data processing instructions taking either a register
1227 or an immediate. */
1228
1229 static int
1230 emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1231 struct aarch64_register rd,
1232 struct aarch64_register rn,
1233 struct aarch64_operand operand)
1234 {
1235 uint32_t size = ENCODE (rd.is64, 1, 31);
1236 /* The opcode is different for register and immediate source operands. */
1237 uint32_t operand_opcode;
1238
1239 if (operand.type == OPERAND_IMMEDIATE)
1240 {
1241 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1242 operand_opcode = ENCODE (8, 4, 25);
1243
1244 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1245 | ENCODE (operand.imm, 12, 10)
1246 | ENCODE (rn.num, 5, 5)
1247 | ENCODE (rd.num, 5, 0));
1248 }
1249 else
1250 {
1251 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1252 operand_opcode = ENCODE (5, 4, 25);
1253
1254 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1255 rn, operand.reg);
1256 }
1257 }
1258
1259 /* Write an ADD instruction into *BUF.
1260
1261 ADD rd, rn, #imm
1262 ADD rd, rn, rm
1263
1264 This function handles both an immediate and register add.
1265
1266 RD is the destination register.
1267 RN is the input register.
1268 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1269 OPERAND_REGISTER. */
1270
1271 static int
1272 emit_add (uint32_t *buf, struct aarch64_register rd,
1273 struct aarch64_register rn, struct aarch64_operand operand)
1274 {
1275 return emit_data_processing (buf, ADD, rd, rn, operand);
1276 }
1277
1278 /* Write a SUB instruction into *BUF.
1279
1280 SUB rd, rn, #imm
1281 SUB rd, rn, rm
1282
1283 This function handles both an immediate and register sub.
1284
1285 RD is the destination register.
1286 RN is the input register.
1287 IMM is the immediate to substract to RN. */
1288
1289 static int
1290 emit_sub (uint32_t *buf, struct aarch64_register rd,
1291 struct aarch64_register rn, struct aarch64_operand operand)
1292 {
1293 return emit_data_processing (buf, SUB, rd, rn, operand);
1294 }
1295
1296 /* Write a MOV instruction into *BUF.
1297
1298 MOV rd, #imm
1299 MOV rd, rm
1300
1301 This function handles both a wide immediate move and a register move,
1302 with the condition that the source register is not xzr. xzr and the
1303 stack pointer share the same encoding and this function only supports
1304 the stack pointer.
1305
1306 RD is the destination register.
1307 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1308 OPERAND_REGISTER. */
1309
1310 static int
1311 emit_mov (uint32_t *buf, struct aarch64_register rd,
1312 struct aarch64_operand operand)
1313 {
1314 if (operand.type == OPERAND_IMMEDIATE)
1315 {
1316 uint32_t size = ENCODE (rd.is64, 1, 31);
1317 /* Do not shift the immediate. */
1318 uint32_t shift = ENCODE (0, 2, 21);
1319
1320 return aarch64_emit_insn (buf, MOV | size | shift
1321 | ENCODE (operand.imm, 16, 5)
1322 | ENCODE (rd.num, 5, 0));
1323 }
1324 else
1325 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1326 }
1327
1328 /* Write a MOVK instruction into *BUF.
1329
1330 MOVK rd, #imm, lsl #shift
1331
1332 RD is the destination register.
1333 IMM is the immediate.
1334 SHIFT is the logical shift left to apply to IMM. */
1335
1336 static int
1337 emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1338 unsigned shift)
1339 {
1340 uint32_t size = ENCODE (rd.is64, 1, 31);
1341
1342 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1343 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
1344 }
1345
1346 /* Write instructions into *BUF in order to move ADDR into a register.
1347 ADDR can be a 64-bit value.
1348
1349 This function will emit a series of MOV and MOVK instructions, such as:
1350
1351 MOV xd, #(addr)
1352 MOVK xd, #(addr >> 16), lsl #16
1353 MOVK xd, #(addr >> 32), lsl #32
1354 MOVK xd, #(addr >> 48), lsl #48 */
1355
1356 static int
1357 emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1358 {
1359 uint32_t *p = buf;
1360
1361 /* The MOV (wide immediate) instruction clears to top bits of the
1362 register. */
1363 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1364
1365 if ((addr >> 16) != 0)
1366 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1367 else
1368 return p - buf;
1369
1370 if ((addr >> 32) != 0)
1371 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1372 else
1373 return p - buf;
1374
1375 if ((addr >> 48) != 0)
1376 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1377
1378 return p - buf;
1379 }
1380
1381 /* Write a SUBS instruction into *BUF.
1382
1383 SUBS rd, rn, rm
1384
1385 This instruction update the condition flags.
1386
1387 RD is the destination register.
1388 RN and RM are the source registers. */
1389
1390 static int
1391 emit_subs (uint32_t *buf, struct aarch64_register rd,
1392 struct aarch64_register rn, struct aarch64_operand operand)
1393 {
1394 return emit_data_processing (buf, SUBS, rd, rn, operand);
1395 }
1396
1397 /* Write a CMP instruction into *BUF.
1398
1399 CMP rn, rm
1400
1401 This instruction is an alias of SUBS xzr, rn, rm.
1402
1403 RN and RM are the registers to compare. */
1404
1405 static int
1406 emit_cmp (uint32_t *buf, struct aarch64_register rn,
1407 struct aarch64_operand operand)
1408 {
1409 return emit_subs (buf, xzr, rn, operand);
1410 }
1411
1412 /* Write a AND instruction into *BUF.
1413
1414 AND rd, rn, rm
1415
1416 RD is the destination register.
1417 RN and RM are the source registers. */
1418
1419 static int
1420 emit_and (uint32_t *buf, struct aarch64_register rd,
1421 struct aarch64_register rn, struct aarch64_register rm)
1422 {
1423 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1424 }
1425
1426 /* Write a ORR instruction into *BUF.
1427
1428 ORR rd, rn, rm
1429
1430 RD is the destination register.
1431 RN and RM are the source registers. */
1432
1433 static int
1434 emit_orr (uint32_t *buf, struct aarch64_register rd,
1435 struct aarch64_register rn, struct aarch64_register rm)
1436 {
1437 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1438 }
1439
1440 /* Write a ORN instruction into *BUF.
1441
1442 ORN rd, rn, rm
1443
1444 RD is the destination register.
1445 RN and RM are the source registers. */
1446
1447 static int
1448 emit_orn (uint32_t *buf, struct aarch64_register rd,
1449 struct aarch64_register rn, struct aarch64_register rm)
1450 {
1451 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1452 }
1453
1454 /* Write a EOR instruction into *BUF.
1455
1456 EOR rd, rn, rm
1457
1458 RD is the destination register.
1459 RN and RM are the source registers. */
1460
1461 static int
1462 emit_eor (uint32_t *buf, struct aarch64_register rd,
1463 struct aarch64_register rn, struct aarch64_register rm)
1464 {
1465 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1466 }
1467
1468 /* Write a MVN instruction into *BUF.
1469
1470 MVN rd, rm
1471
1472 This is an alias for ORN rd, xzr, rm.
1473
1474 RD is the destination register.
1475 RM is the source register. */
1476
1477 static int
1478 emit_mvn (uint32_t *buf, struct aarch64_register rd,
1479 struct aarch64_register rm)
1480 {
1481 return emit_orn (buf, rd, xzr, rm);
1482 }
1483
1484 /* Write a LSLV instruction into *BUF.
1485
1486 LSLV rd, rn, rm
1487
1488 RD is the destination register.
1489 RN and RM are the source registers. */
1490
1491 static int
1492 emit_lslv (uint32_t *buf, struct aarch64_register rd,
1493 struct aarch64_register rn, struct aarch64_register rm)
1494 {
1495 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1496 }
1497
1498 /* Write a LSRV instruction into *BUF.
1499
1500 LSRV rd, rn, rm
1501
1502 RD is the destination register.
1503 RN and RM are the source registers. */
1504
1505 static int
1506 emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1507 struct aarch64_register rn, struct aarch64_register rm)
1508 {
1509 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1510 }
1511
1512 /* Write a ASRV instruction into *BUF.
1513
1514 ASRV rd, rn, rm
1515
1516 RD is the destination register.
1517 RN and RM are the source registers. */
1518
1519 static int
1520 emit_asrv (uint32_t *buf, struct aarch64_register rd,
1521 struct aarch64_register rn, struct aarch64_register rm)
1522 {
1523 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1524 }
1525
1526 /* Write a MUL instruction into *BUF.
1527
1528 MUL rd, rn, rm
1529
1530 RD is the destination register.
1531 RN and RM are the source registers. */
1532
1533 static int
1534 emit_mul (uint32_t *buf, struct aarch64_register rd,
1535 struct aarch64_register rn, struct aarch64_register rm)
1536 {
1537 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1538 }
1539
1540 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1541
1542 MRS xt, system_reg
1543
1544 RT is the destination register.
1545 SYSTEM_REG is special purpose register to read. */
1546
1547 static int
1548 emit_mrs (uint32_t *buf, struct aarch64_register rt,
1549 enum aarch64_system_control_registers system_reg)
1550 {
1551 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1552 | ENCODE (rt.num, 5, 0));
1553 }
1554
1555 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1556
1557 MSR system_reg, xt
1558
1559 SYSTEM_REG is special purpose register to write.
1560 RT is the input register. */
1561
1562 static int
1563 emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1564 struct aarch64_register rt)
1565 {
1566 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1567 | ENCODE (rt.num, 5, 0));
1568 }
1569
1570 /* Write a SEVL instruction into *BUF.
1571
1572 This is a hint instruction telling the hardware to trigger an event. */
1573
1574 static int
1575 emit_sevl (uint32_t *buf)
1576 {
1577 return aarch64_emit_insn (buf, SEVL);
1578 }
1579
1580 /* Write a WFE instruction into *BUF.
1581
1582 This is a hint instruction telling the hardware to wait for an event. */
1583
1584 static int
1585 emit_wfe (uint32_t *buf)
1586 {
1587 return aarch64_emit_insn (buf, WFE);
1588 }
1589
1590 /* Write a SBFM instruction into *BUF.
1591
1592 SBFM rd, rn, #immr, #imms
1593
1594 This instruction moves the bits from #immr to #imms into the
1595 destination, sign extending the result.
1596
1597 RD is the destination register.
1598 RN is the source register.
1599 IMMR is the bit number to start at (least significant bit).
1600 IMMS is the bit number to stop at (most significant bit). */
1601
1602 static int
1603 emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1604 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1605 {
1606 uint32_t size = ENCODE (rd.is64, 1, 31);
1607 uint32_t n = ENCODE (rd.is64, 1, 22);
1608
1609 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1610 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1611 | ENCODE (rd.num, 5, 0));
1612 }
1613
1614 /* Write a SBFX instruction into *BUF.
1615
1616 SBFX rd, rn, #lsb, #width
1617
1618 This instruction moves #width bits from #lsb into the destination, sign
1619 extending the result. This is an alias for:
1620
1621 SBFM rd, rn, #lsb, #(lsb + width - 1)
1622
1623 RD is the destination register.
1624 RN is the source register.
1625 LSB is the bit number to start at (least significant bit).
1626 WIDTH is the number of bits to move. */
1627
1628 static int
1629 emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1630 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1631 {
1632 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1633 }
1634
1635 /* Write a UBFM instruction into *BUF.
1636
1637 UBFM rd, rn, #immr, #imms
1638
1639 This instruction moves the bits from #immr to #imms into the
1640 destination, extending the result with zeros.
1641
1642 RD is the destination register.
1643 RN is the source register.
1644 IMMR is the bit number to start at (least significant bit).
1645 IMMS is the bit number to stop at (most significant bit). */
1646
1647 static int
1648 emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1649 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1650 {
1651 uint32_t size = ENCODE (rd.is64, 1, 31);
1652 uint32_t n = ENCODE (rd.is64, 1, 22);
1653
1654 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1655 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1656 | ENCODE (rd.num, 5, 0));
1657 }
1658
1659 /* Write a UBFX instruction into *BUF.
1660
1661 UBFX rd, rn, #lsb, #width
1662
1663 This instruction moves #width bits from #lsb into the destination,
1664 extending the result with zeros. This is an alias for:
1665
1666 UBFM rd, rn, #lsb, #(lsb + width - 1)
1667
1668 RD is the destination register.
1669 RN is the source register.
1670 LSB is the bit number to start at (least significant bit).
1671 WIDTH is the number of bits to move. */
1672
1673 static int
1674 emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1675 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1676 {
1677 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1678 }
1679
1680 /* Write a CSINC instruction into *BUF.
1681
1682 CSINC rd, rn, rm, cond
1683
1684 This instruction conditionally increments rn or rm and places the result
1685 in rd. rn is chosen is the condition is true.
1686
1687 RD is the destination register.
1688 RN and RM are the source registers.
1689 COND is the encoded condition. */
1690
1691 static int
1692 emit_csinc (uint32_t *buf, struct aarch64_register rd,
1693 struct aarch64_register rn, struct aarch64_register rm,
1694 unsigned cond)
1695 {
1696 uint32_t size = ENCODE (rd.is64, 1, 31);
1697
1698 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1699 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1700 | ENCODE (rd.num, 5, 0));
1701 }
1702
1703 /* Write a CSET instruction into *BUF.
1704
1705 CSET rd, cond
1706
1707 This instruction conditionally write 1 or 0 in the destination register.
1708 1 is written if the condition is true. This is an alias for:
1709
1710 CSINC rd, xzr, xzr, !cond
1711
1712 Note that the condition needs to be inverted.
1713
1714 RD is the destination register.
1715 RN and RM are the source registers.
1716 COND is the encoded condition. */
1717
1718 static int
1719 emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1720 {
1721 /* The least significant bit of the condition needs toggling in order to
1722 invert it. */
1723 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1724 }
1725
1726 /* Write LEN instructions from BUF into the inferior memory at *TO.
1727
1728 Note instructions are always little endian on AArch64, unlike data. */
1729
1730 static void
1731 append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1732 {
1733 size_t byte_len = len * sizeof (uint32_t);
1734 #if (__BYTE_ORDER == __BIG_ENDIAN)
1735 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
1736 size_t i;
1737
1738 for (i = 0; i < len; i++)
1739 le_buf[i] = htole32 (buf[i]);
1740
1741 target_write_memory (*to, (const unsigned char *) le_buf, byte_len);
1742
1743 xfree (le_buf);
1744 #else
1745 target_write_memory (*to, (const unsigned char *) buf, byte_len);
1746 #endif
1747
1748 *to += byte_len;
1749 }
1750
1751 /* Sub-class of struct aarch64_insn_data, store information of
1752 instruction relocation for fast tracepoint. Visitor can
1753 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1754 the relocated instructions in buffer pointed by INSN_PTR. */
1755
1756 struct aarch64_insn_relocation_data
1757 {
1758 struct aarch64_insn_data base;
1759
1760 /* The new address the instruction is relocated to. */
1761 CORE_ADDR new_addr;
1762 /* Pointer to the buffer of relocated instruction(s). */
1763 uint32_t *insn_ptr;
1764 };
1765
1766 /* Implementation of aarch64_insn_visitor method "b". */
1767
1768 static void
1769 aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1770 struct aarch64_insn_data *data)
1771 {
1772 struct aarch64_insn_relocation_data *insn_reloc
1773 = (struct aarch64_insn_relocation_data *) data;
1774 int64_t new_offset
1775 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1776
1777 if (can_encode_int32 (new_offset, 28))
1778 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1779 }
1780
1781 /* Implementation of aarch64_insn_visitor method "b_cond". */
1782
1783 static void
1784 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1785 struct aarch64_insn_data *data)
1786 {
1787 struct aarch64_insn_relocation_data *insn_reloc
1788 = (struct aarch64_insn_relocation_data *) data;
1789 int64_t new_offset
1790 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1791
1792 if (can_encode_int32 (new_offset, 21))
1793 {
1794 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1795 new_offset);
1796 }
1797 else if (can_encode_int32 (new_offset, 28))
1798 {
1799 /* The offset is out of range for a conditional branch
1800 instruction but not for a unconditional branch. We can use
1801 the following instructions instead:
1802
1803 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1804 B NOT_TAKEN ; Else jump over TAKEN and continue.
1805 TAKEN:
1806 B #(offset - 8)
1807 NOT_TAKEN:
1808
1809 */
1810
1811 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1812 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1813 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1814 }
1815 }
1816
1817 /* Implementation of aarch64_insn_visitor method "cb". */
1818
1819 static void
1820 aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1821 const unsigned rn, int is64,
1822 struct aarch64_insn_data *data)
1823 {
1824 struct aarch64_insn_relocation_data *insn_reloc
1825 = (struct aarch64_insn_relocation_data *) data;
1826 int64_t new_offset
1827 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1828
1829 if (can_encode_int32 (new_offset, 21))
1830 {
1831 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1832 aarch64_register (rn, is64), new_offset);
1833 }
1834 else if (can_encode_int32 (new_offset, 28))
1835 {
1836 /* The offset is out of range for a compare and branch
1837 instruction but not for a unconditional branch. We can use
1838 the following instructions instead:
1839
1840 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1841 B NOT_TAKEN ; Else jump over TAKEN and continue.
1842 TAKEN:
1843 B #(offset - 8)
1844 NOT_TAKEN:
1845
1846 */
1847 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1848 aarch64_register (rn, is64), 8);
1849 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1850 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1851 }
1852 }
1853
1854 /* Implementation of aarch64_insn_visitor method "tb". */
1855
1856 static void
1857 aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1858 const unsigned rt, unsigned bit,
1859 struct aarch64_insn_data *data)
1860 {
1861 struct aarch64_insn_relocation_data *insn_reloc
1862 = (struct aarch64_insn_relocation_data *) data;
1863 int64_t new_offset
1864 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1865
1866 if (can_encode_int32 (new_offset, 16))
1867 {
1868 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1869 aarch64_register (rt, 1), new_offset);
1870 }
1871 else if (can_encode_int32 (new_offset, 28))
1872 {
1873 /* The offset is out of range for a test bit and branch
1874 instruction but not for a unconditional branch. We can use
1875 the following instructions instead:
1876
1877 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1878 B NOT_TAKEN ; Else jump over TAKEN and continue.
1879 TAKEN:
1880 B #(offset - 8)
1881 NOT_TAKEN:
1882
1883 */
1884 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1885 aarch64_register (rt, 1), 8);
1886 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1887 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1888 new_offset - 8);
1889 }
1890 }
1891
1892 /* Implementation of aarch64_insn_visitor method "adr". */
1893
1894 static void
1895 aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1896 const int is_adrp,
1897 struct aarch64_insn_data *data)
1898 {
1899 struct aarch64_insn_relocation_data *insn_reloc
1900 = (struct aarch64_insn_relocation_data *) data;
1901 /* We know exactly the address the ADR{P,} instruction will compute.
1902 We can just write it to the destination register. */
1903 CORE_ADDR address = data->insn_addr + offset;
1904
1905 if (is_adrp)
1906 {
1907 /* Clear the lower 12 bits of the offset to get the 4K page. */
1908 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1909 aarch64_register (rd, 1),
1910 address & ~0xfff);
1911 }
1912 else
1913 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1914 aarch64_register (rd, 1), address);
1915 }
1916
1917 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1918
1919 static void
1920 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1921 const unsigned rt, const int is64,
1922 struct aarch64_insn_data *data)
1923 {
1924 struct aarch64_insn_relocation_data *insn_reloc
1925 = (struct aarch64_insn_relocation_data *) data;
1926 CORE_ADDR address = data->insn_addr + offset;
1927
1928 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1929 aarch64_register (rt, 1), address);
1930
1931 /* We know exactly what address to load from, and what register we
1932 can use:
1933
1934 MOV xd, #(oldloc + offset)
1935 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1936 ...
1937
1938 LDR xd, [xd] ; or LDRSW xd, [xd]
1939
1940 */
1941
1942 if (is_sw)
1943 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1944 aarch64_register (rt, 1),
1945 aarch64_register (rt, 1),
1946 offset_memory_operand (0));
1947 else
1948 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1949 aarch64_register (rt, is64),
1950 aarch64_register (rt, 1),
1951 offset_memory_operand (0));
1952 }
1953
1954 /* Implementation of aarch64_insn_visitor method "others". */
1955
1956 static void
1957 aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1958 struct aarch64_insn_data *data)
1959 {
1960 struct aarch64_insn_relocation_data *insn_reloc
1961 = (struct aarch64_insn_relocation_data *) data;
1962
1963 /* The instruction is not PC relative. Just re-emit it at the new
1964 location. */
1965 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
1966 }
1967
1968 static const struct aarch64_insn_visitor visitor =
1969 {
1970 aarch64_ftrace_insn_reloc_b,
1971 aarch64_ftrace_insn_reloc_b_cond,
1972 aarch64_ftrace_insn_reloc_cb,
1973 aarch64_ftrace_insn_reloc_tb,
1974 aarch64_ftrace_insn_reloc_adr,
1975 aarch64_ftrace_insn_reloc_ldr_literal,
1976 aarch64_ftrace_insn_reloc_others,
1977 };
1978
1979 bool
1980 aarch64_target::supports_fast_tracepoints ()
1981 {
1982 return true;
1983 }
1984
1985 /* Implementation of target ops method
1986 "install_fast_tracepoint_jump_pad". */
1987
1988 int
1989 aarch64_target::install_fast_tracepoint_jump_pad
1990 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
1991 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
1992 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
1993 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
1994 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
1995 char *err)
1996 {
1997 uint32_t buf[256];
1998 uint32_t *p = buf;
1999 int64_t offset;
2000 int i;
2001 uint32_t insn;
2002 CORE_ADDR buildaddr = *jump_entry;
2003 struct aarch64_insn_relocation_data insn_data;
2004
2005 /* We need to save the current state on the stack both to restore it
2006 later and to collect register values when the tracepoint is hit.
2007
2008 The saved registers are pushed in a layout that needs to be in sync
2009 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
2010 the supply_fast_tracepoint_registers function will fill in the
2011 register cache from a pointer to saved registers on the stack we build
2012 here.
2013
2014 For simplicity, we set the size of each cell on the stack to 16 bytes.
2015 This way one cell can hold any register type, from system registers
2016 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
2017 has to be 16 bytes aligned anyway.
2018
2019 Note that the CPSR register does not exist on AArch64. Instead we
2020 can access system bits describing the process state with the
2021 MRS/MSR instructions, namely the condition flags. We save them as
2022 if they are part of a CPSR register because that's how GDB
2023 interprets these system bits. At the moment, only the condition
2024 flags are saved in CPSR (NZCV).
2025
2026 Stack layout, each cell is 16 bytes (descending):
2027
2028 High *-------- SIMD&FP registers from 31 down to 0. --------*
2029 | q31 |
2030 . .
2031 . . 32 cells
2032 . .
2033 | q0 |
2034 *---- General purpose registers from 30 down to 0. ----*
2035 | x30 |
2036 . .
2037 . . 31 cells
2038 . .
2039 | x0 |
2040 *------------- Special purpose registers. -------------*
2041 | SP |
2042 | PC |
2043 | CPSR (NZCV) | 5 cells
2044 | FPSR |
2045 | FPCR | <- SP + 16
2046 *------------- collecting_t object --------------------*
2047 | TPIDR_EL0 | struct tracepoint * |
2048 Low *------------------------------------------------------*
2049
2050 After this stack is set up, we issue a call to the collector, passing
2051 it the saved registers at (SP + 16). */
2052
2053 /* Push SIMD&FP registers on the stack:
2054
2055 SUB sp, sp, #(32 * 16)
2056
2057 STP q30, q31, [sp, #(30 * 16)]
2058 ...
2059 STP q0, q1, [sp]
2060
2061 */
2062 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
2063 for (i = 30; i >= 0; i -= 2)
2064 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
2065
2066 /* Push general purpose registers on the stack. Note that we do not need
2067 to push x31 as it represents the xzr register and not the stack
2068 pointer in a STR instruction.
2069
2070 SUB sp, sp, #(31 * 16)
2071
2072 STR x30, [sp, #(30 * 16)]
2073 ...
2074 STR x0, [sp]
2075
2076 */
2077 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
2078 for (i = 30; i >= 0; i -= 1)
2079 p += emit_str (p, aarch64_register (i, 1), sp,
2080 offset_memory_operand (i * 16));
2081
2082 /* Make space for 5 more cells.
2083
2084 SUB sp, sp, #(5 * 16)
2085
2086 */
2087 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
2088
2089
2090 /* Save SP:
2091
2092 ADD x4, sp, #((32 + 31 + 5) * 16)
2093 STR x4, [sp, #(4 * 16)]
2094
2095 */
2096 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
2097 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
2098
2099 /* Save PC (tracepoint address):
2100
2101 MOV x3, #(tpaddr)
2102 ...
2103
2104 STR x3, [sp, #(3 * 16)]
2105
2106 */
2107
2108 p += emit_mov_addr (p, x3, tpaddr);
2109 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
2110
2111 /* Save CPSR (NZCV), FPSR and FPCR:
2112
2113 MRS x2, nzcv
2114 MRS x1, fpsr
2115 MRS x0, fpcr
2116
2117 STR x2, [sp, #(2 * 16)]
2118 STR x1, [sp, #(1 * 16)]
2119 STR x0, [sp, #(0 * 16)]
2120
2121 */
2122 p += emit_mrs (p, x2, NZCV);
2123 p += emit_mrs (p, x1, FPSR);
2124 p += emit_mrs (p, x0, FPCR);
2125 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2126 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2127 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2128
2129 /* Push the collecting_t object. It consist of the address of the
2130 tracepoint and an ID for the current thread. We get the latter by
2131 reading the tpidr_el0 system register. It corresponds to the
2132 NT_ARM_TLS register accessible with ptrace.
2133
2134 MOV x0, #(tpoint)
2135 ...
2136
2137 MRS x1, tpidr_el0
2138
2139 STP x0, x1, [sp, #-16]!
2140
2141 */
2142
2143 p += emit_mov_addr (p, x0, tpoint);
2144 p += emit_mrs (p, x1, TPIDR_EL0);
2145 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2146
2147 /* Spin-lock:
2148
2149 The shared memory for the lock is at lockaddr. It will hold zero
2150 if no-one is holding the lock, otherwise it contains the address of
2151 the collecting_t object on the stack of the thread which acquired it.
2152
2153 At this stage, the stack pointer points to this thread's collecting_t
2154 object.
2155
2156 We use the following registers:
2157 - x0: Address of the lock.
2158 - x1: Pointer to collecting_t object.
2159 - x2: Scratch register.
2160
2161 MOV x0, #(lockaddr)
2162 ...
2163 MOV x1, sp
2164
2165 ; Trigger an event local to this core. So the following WFE
2166 ; instruction is ignored.
2167 SEVL
2168 again:
2169 ; Wait for an event. The event is triggered by either the SEVL
2170 ; or STLR instructions (store release).
2171 WFE
2172
2173 ; Atomically read at lockaddr. This marks the memory location as
2174 ; exclusive. This instruction also has memory constraints which
2175 ; make sure all previous data reads and writes are done before
2176 ; executing it.
2177 LDAXR x2, [x0]
2178
2179 ; Try again if another thread holds the lock.
2180 CBNZ x2, again
2181
2182 ; We can lock it! Write the address of the collecting_t object.
2183 ; This instruction will fail if the memory location is not marked
2184 ; as exclusive anymore. If it succeeds, it will remove the
2185 ; exclusive mark on the memory location. This way, if another
2186 ; thread executes this instruction before us, we will fail and try
2187 ; all over again.
2188 STXR w2, x1, [x0]
2189 CBNZ w2, again
2190
2191 */
2192
2193 p += emit_mov_addr (p, x0, lockaddr);
2194 p += emit_mov (p, x1, register_operand (sp));
2195
2196 p += emit_sevl (p);
2197 p += emit_wfe (p);
2198 p += emit_ldaxr (p, x2, x0);
2199 p += emit_cb (p, 1, w2, -2 * 4);
2200 p += emit_stxr (p, w2, x1, x0);
2201 p += emit_cb (p, 1, x2, -4 * 4);
2202
2203 /* Call collector (struct tracepoint *, unsigned char *):
2204
2205 MOV x0, #(tpoint)
2206 ...
2207
2208 ; Saved registers start after the collecting_t object.
2209 ADD x1, sp, #16
2210
2211 ; We use an intra-procedure-call scratch register.
2212 MOV ip0, #(collector)
2213 ...
2214
2215 ; And call back to C!
2216 BLR ip0
2217
2218 */
2219
2220 p += emit_mov_addr (p, x0, tpoint);
2221 p += emit_add (p, x1, sp, immediate_operand (16));
2222
2223 p += emit_mov_addr (p, ip0, collector);
2224 p += emit_blr (p, ip0);
2225
2226 /* Release the lock.
2227
2228 MOV x0, #(lockaddr)
2229 ...
2230
2231 ; This instruction is a normal store with memory ordering
2232 ; constraints. Thanks to this we do not have to put a data
2233 ; barrier instruction to make sure all data read and writes are done
2234 ; before this instruction is executed. Furthermore, this instruction
2235 ; will trigger an event, letting other threads know they can grab
2236 ; the lock.
2237 STLR xzr, [x0]
2238
2239 */
2240 p += emit_mov_addr (p, x0, lockaddr);
2241 p += emit_stlr (p, xzr, x0);
2242
2243 /* Free collecting_t object:
2244
2245 ADD sp, sp, #16
2246
2247 */
2248 p += emit_add (p, sp, sp, immediate_operand (16));
2249
2250 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2251 registers from the stack.
2252
2253 LDR x2, [sp, #(2 * 16)]
2254 LDR x1, [sp, #(1 * 16)]
2255 LDR x0, [sp, #(0 * 16)]
2256
2257 MSR NZCV, x2
2258 MSR FPSR, x1
2259 MSR FPCR, x0
2260
2261 ADD sp, sp #(5 * 16)
2262
2263 */
2264 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2265 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2266 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2267 p += emit_msr (p, NZCV, x2);
2268 p += emit_msr (p, FPSR, x1);
2269 p += emit_msr (p, FPCR, x0);
2270
2271 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2272
2273 /* Pop general purpose registers:
2274
2275 LDR x0, [sp]
2276 ...
2277 LDR x30, [sp, #(30 * 16)]
2278
2279 ADD sp, sp, #(31 * 16)
2280
2281 */
2282 for (i = 0; i <= 30; i += 1)
2283 p += emit_ldr (p, aarch64_register (i, 1), sp,
2284 offset_memory_operand (i * 16));
2285 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2286
2287 /* Pop SIMD&FP registers:
2288
2289 LDP q0, q1, [sp]
2290 ...
2291 LDP q30, q31, [sp, #(30 * 16)]
2292
2293 ADD sp, sp, #(32 * 16)
2294
2295 */
2296 for (i = 0; i <= 30; i += 2)
2297 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2298 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2299
2300 /* Write the code into the inferior memory. */
2301 append_insns (&buildaddr, p - buf, buf);
2302
2303 /* Now emit the relocated instruction. */
2304 *adjusted_insn_addr = buildaddr;
2305 target_read_uint32 (tpaddr, &insn);
2306
2307 insn_data.base.insn_addr = tpaddr;
2308 insn_data.new_addr = buildaddr;
2309 insn_data.insn_ptr = buf;
2310
2311 aarch64_relocate_instruction (insn, &visitor,
2312 (struct aarch64_insn_data *) &insn_data);
2313
2314 /* We may not have been able to relocate the instruction. */
2315 if (insn_data.insn_ptr == buf)
2316 {
2317 sprintf (err,
2318 "E.Could not relocate instruction from %s to %s.",
2319 core_addr_to_string_nz (tpaddr),
2320 core_addr_to_string_nz (buildaddr));
2321 return 1;
2322 }
2323 else
2324 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
2325 *adjusted_insn_addr_end = buildaddr;
2326
2327 /* Go back to the start of the buffer. */
2328 p = buf;
2329
2330 /* Emit a branch back from the jump pad. */
2331 offset = (tpaddr + orig_size - buildaddr);
2332 if (!can_encode_int32 (offset, 28))
2333 {
2334 sprintf (err,
2335 "E.Jump back from jump pad too far from tracepoint "
2336 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2337 offset);
2338 return 1;
2339 }
2340
2341 p += emit_b (p, 0, offset);
2342 append_insns (&buildaddr, p - buf, buf);
2343
2344 /* Give the caller a branch instruction into the jump pad. */
2345 offset = (*jump_entry - tpaddr);
2346 if (!can_encode_int32 (offset, 28))
2347 {
2348 sprintf (err,
2349 "E.Jump pad too far from tracepoint "
2350 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2351 offset);
2352 return 1;
2353 }
2354
2355 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2356 *jjump_pad_insn_size = 4;
2357
2358 /* Return the end address of our pad. */
2359 *jump_entry = buildaddr;
2360
2361 return 0;
2362 }
2363
2364 /* Helper function writing LEN instructions from START into
2365 current_insn_ptr. */
2366
2367 static void
2368 emit_ops_insns (const uint32_t *start, int len)
2369 {
2370 CORE_ADDR buildaddr = current_insn_ptr;
2371
2372 if (debug_threads)
2373 debug_printf ("Adding %d instrucions at %s\n",
2374 len, paddress (buildaddr));
2375
2376 append_insns (&buildaddr, len, start);
2377 current_insn_ptr = buildaddr;
2378 }
2379
2380 /* Pop a register from the stack. */
2381
2382 static int
2383 emit_pop (uint32_t *buf, struct aarch64_register rt)
2384 {
2385 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2386 }
2387
2388 /* Push a register on the stack. */
2389
2390 static int
2391 emit_push (uint32_t *buf, struct aarch64_register rt)
2392 {
2393 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2394 }
2395
2396 /* Implementation of emit_ops method "emit_prologue". */
2397
2398 static void
2399 aarch64_emit_prologue (void)
2400 {
2401 uint32_t buf[16];
2402 uint32_t *p = buf;
2403
2404 /* This function emit a prologue for the following function prototype:
2405
2406 enum eval_result_type f (unsigned char *regs,
2407 ULONGEST *value);
2408
2409 The first argument is a buffer of raw registers. The second
2410 argument is the result of
2411 evaluating the expression, which will be set to whatever is on top of
2412 the stack at the end.
2413
2414 The stack set up by the prologue is as such:
2415
2416 High *------------------------------------------------------*
2417 | LR |
2418 | FP | <- FP
2419 | x1 (ULONGEST *value) |
2420 | x0 (unsigned char *regs) |
2421 Low *------------------------------------------------------*
2422
2423 As we are implementing a stack machine, each opcode can expand the
2424 stack so we never know how far we are from the data saved by this
2425 prologue. In order to be able refer to value and regs later, we save
2426 the current stack pointer in the frame pointer. This way, it is not
2427 clobbered when calling C functions.
2428
2429 Finally, throughout every operation, we are using register x0 as the
2430 top of the stack, and x1 as a scratch register. */
2431
2432 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2433 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2434 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2435
2436 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2437
2438
2439 emit_ops_insns (buf, p - buf);
2440 }
2441
2442 /* Implementation of emit_ops method "emit_epilogue". */
2443
2444 static void
2445 aarch64_emit_epilogue (void)
2446 {
2447 uint32_t buf[16];
2448 uint32_t *p = buf;
2449
2450 /* Store the result of the expression (x0) in *value. */
2451 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2452 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2453 p += emit_str (p, x0, x1, offset_memory_operand (0));
2454
2455 /* Restore the previous state. */
2456 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2457 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2458
2459 /* Return expr_eval_no_error. */
2460 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2461 p += emit_ret (p, lr);
2462
2463 emit_ops_insns (buf, p - buf);
2464 }
2465
2466 /* Implementation of emit_ops method "emit_add". */
2467
2468 static void
2469 aarch64_emit_add (void)
2470 {
2471 uint32_t buf[16];
2472 uint32_t *p = buf;
2473
2474 p += emit_pop (p, x1);
2475 p += emit_add (p, x0, x1, register_operand (x0));
2476
2477 emit_ops_insns (buf, p - buf);
2478 }
2479
2480 /* Implementation of emit_ops method "emit_sub". */
2481
2482 static void
2483 aarch64_emit_sub (void)
2484 {
2485 uint32_t buf[16];
2486 uint32_t *p = buf;
2487
2488 p += emit_pop (p, x1);
2489 p += emit_sub (p, x0, x1, register_operand (x0));
2490
2491 emit_ops_insns (buf, p - buf);
2492 }
2493
2494 /* Implementation of emit_ops method "emit_mul". */
2495
2496 static void
2497 aarch64_emit_mul (void)
2498 {
2499 uint32_t buf[16];
2500 uint32_t *p = buf;
2501
2502 p += emit_pop (p, x1);
2503 p += emit_mul (p, x0, x1, x0);
2504
2505 emit_ops_insns (buf, p - buf);
2506 }
2507
2508 /* Implementation of emit_ops method "emit_lsh". */
2509
2510 static void
2511 aarch64_emit_lsh (void)
2512 {
2513 uint32_t buf[16];
2514 uint32_t *p = buf;
2515
2516 p += emit_pop (p, x1);
2517 p += emit_lslv (p, x0, x1, x0);
2518
2519 emit_ops_insns (buf, p - buf);
2520 }
2521
2522 /* Implementation of emit_ops method "emit_rsh_signed". */
2523
2524 static void
2525 aarch64_emit_rsh_signed (void)
2526 {
2527 uint32_t buf[16];
2528 uint32_t *p = buf;
2529
2530 p += emit_pop (p, x1);
2531 p += emit_asrv (p, x0, x1, x0);
2532
2533 emit_ops_insns (buf, p - buf);
2534 }
2535
2536 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2537
2538 static void
2539 aarch64_emit_rsh_unsigned (void)
2540 {
2541 uint32_t buf[16];
2542 uint32_t *p = buf;
2543
2544 p += emit_pop (p, x1);
2545 p += emit_lsrv (p, x0, x1, x0);
2546
2547 emit_ops_insns (buf, p - buf);
2548 }
2549
2550 /* Implementation of emit_ops method "emit_ext". */
2551
2552 static void
2553 aarch64_emit_ext (int arg)
2554 {
2555 uint32_t buf[16];
2556 uint32_t *p = buf;
2557
2558 p += emit_sbfx (p, x0, x0, 0, arg);
2559
2560 emit_ops_insns (buf, p - buf);
2561 }
2562
2563 /* Implementation of emit_ops method "emit_log_not". */
2564
2565 static void
2566 aarch64_emit_log_not (void)
2567 {
2568 uint32_t buf[16];
2569 uint32_t *p = buf;
2570
2571 /* If the top of the stack is 0, replace it with 1. Else replace it with
2572 0. */
2573
2574 p += emit_cmp (p, x0, immediate_operand (0));
2575 p += emit_cset (p, x0, EQ);
2576
2577 emit_ops_insns (buf, p - buf);
2578 }
2579
2580 /* Implementation of emit_ops method "emit_bit_and". */
2581
2582 static void
2583 aarch64_emit_bit_and (void)
2584 {
2585 uint32_t buf[16];
2586 uint32_t *p = buf;
2587
2588 p += emit_pop (p, x1);
2589 p += emit_and (p, x0, x0, x1);
2590
2591 emit_ops_insns (buf, p - buf);
2592 }
2593
2594 /* Implementation of emit_ops method "emit_bit_or". */
2595
2596 static void
2597 aarch64_emit_bit_or (void)
2598 {
2599 uint32_t buf[16];
2600 uint32_t *p = buf;
2601
2602 p += emit_pop (p, x1);
2603 p += emit_orr (p, x0, x0, x1);
2604
2605 emit_ops_insns (buf, p - buf);
2606 }
2607
2608 /* Implementation of emit_ops method "emit_bit_xor". */
2609
2610 static void
2611 aarch64_emit_bit_xor (void)
2612 {
2613 uint32_t buf[16];
2614 uint32_t *p = buf;
2615
2616 p += emit_pop (p, x1);
2617 p += emit_eor (p, x0, x0, x1);
2618
2619 emit_ops_insns (buf, p - buf);
2620 }
2621
2622 /* Implementation of emit_ops method "emit_bit_not". */
2623
2624 static void
2625 aarch64_emit_bit_not (void)
2626 {
2627 uint32_t buf[16];
2628 uint32_t *p = buf;
2629
2630 p += emit_mvn (p, x0, x0);
2631
2632 emit_ops_insns (buf, p - buf);
2633 }
2634
2635 /* Implementation of emit_ops method "emit_equal". */
2636
2637 static void
2638 aarch64_emit_equal (void)
2639 {
2640 uint32_t buf[16];
2641 uint32_t *p = buf;
2642
2643 p += emit_pop (p, x1);
2644 p += emit_cmp (p, x0, register_operand (x1));
2645 p += emit_cset (p, x0, EQ);
2646
2647 emit_ops_insns (buf, p - buf);
2648 }
2649
2650 /* Implementation of emit_ops method "emit_less_signed". */
2651
2652 static void
2653 aarch64_emit_less_signed (void)
2654 {
2655 uint32_t buf[16];
2656 uint32_t *p = buf;
2657
2658 p += emit_pop (p, x1);
2659 p += emit_cmp (p, x1, register_operand (x0));
2660 p += emit_cset (p, x0, LT);
2661
2662 emit_ops_insns (buf, p - buf);
2663 }
2664
2665 /* Implementation of emit_ops method "emit_less_unsigned". */
2666
2667 static void
2668 aarch64_emit_less_unsigned (void)
2669 {
2670 uint32_t buf[16];
2671 uint32_t *p = buf;
2672
2673 p += emit_pop (p, x1);
2674 p += emit_cmp (p, x1, register_operand (x0));
2675 p += emit_cset (p, x0, LO);
2676
2677 emit_ops_insns (buf, p - buf);
2678 }
2679
2680 /* Implementation of emit_ops method "emit_ref". */
2681
2682 static void
2683 aarch64_emit_ref (int size)
2684 {
2685 uint32_t buf[16];
2686 uint32_t *p = buf;
2687
2688 switch (size)
2689 {
2690 case 1:
2691 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2692 break;
2693 case 2:
2694 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2695 break;
2696 case 4:
2697 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2698 break;
2699 case 8:
2700 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2701 break;
2702 default:
2703 /* Unknown size, bail on compilation. */
2704 emit_error = 1;
2705 break;
2706 }
2707
2708 emit_ops_insns (buf, p - buf);
2709 }
2710
2711 /* Implementation of emit_ops method "emit_if_goto". */
2712
2713 static void
2714 aarch64_emit_if_goto (int *offset_p, int *size_p)
2715 {
2716 uint32_t buf[16];
2717 uint32_t *p = buf;
2718
2719 /* The Z flag is set or cleared here. */
2720 p += emit_cmp (p, x0, immediate_operand (0));
2721 /* This instruction must not change the Z flag. */
2722 p += emit_pop (p, x0);
2723 /* Branch over the next instruction if x0 == 0. */
2724 p += emit_bcond (p, EQ, 8);
2725
2726 /* The NOP instruction will be patched with an unconditional branch. */
2727 if (offset_p)
2728 *offset_p = (p - buf) * 4;
2729 if (size_p)
2730 *size_p = 4;
2731 p += emit_nop (p);
2732
2733 emit_ops_insns (buf, p - buf);
2734 }
2735
2736 /* Implementation of emit_ops method "emit_goto". */
2737
2738 static void
2739 aarch64_emit_goto (int *offset_p, int *size_p)
2740 {
2741 uint32_t buf[16];
2742 uint32_t *p = buf;
2743
2744 /* The NOP instruction will be patched with an unconditional branch. */
2745 if (offset_p)
2746 *offset_p = 0;
2747 if (size_p)
2748 *size_p = 4;
2749 p += emit_nop (p);
2750
2751 emit_ops_insns (buf, p - buf);
2752 }
2753
2754 /* Implementation of emit_ops method "write_goto_address". */
2755
2756 static void
2757 aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2758 {
2759 uint32_t insn;
2760
2761 emit_b (&insn, 0, to - from);
2762 append_insns (&from, 1, &insn);
2763 }
2764
2765 /* Implementation of emit_ops method "emit_const". */
2766
2767 static void
2768 aarch64_emit_const (LONGEST num)
2769 {
2770 uint32_t buf[16];
2771 uint32_t *p = buf;
2772
2773 p += emit_mov_addr (p, x0, num);
2774
2775 emit_ops_insns (buf, p - buf);
2776 }
2777
2778 /* Implementation of emit_ops method "emit_call". */
2779
2780 static void
2781 aarch64_emit_call (CORE_ADDR fn)
2782 {
2783 uint32_t buf[16];
2784 uint32_t *p = buf;
2785
2786 p += emit_mov_addr (p, ip0, fn);
2787 p += emit_blr (p, ip0);
2788
2789 emit_ops_insns (buf, p - buf);
2790 }
2791
2792 /* Implementation of emit_ops method "emit_reg". */
2793
2794 static void
2795 aarch64_emit_reg (int reg)
2796 {
2797 uint32_t buf[16];
2798 uint32_t *p = buf;
2799
2800 /* Set x0 to unsigned char *regs. */
2801 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2802 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2803 p += emit_mov (p, x1, immediate_operand (reg));
2804
2805 emit_ops_insns (buf, p - buf);
2806
2807 aarch64_emit_call (get_raw_reg_func_addr ());
2808 }
2809
2810 /* Implementation of emit_ops method "emit_pop". */
2811
2812 static void
2813 aarch64_emit_pop (void)
2814 {
2815 uint32_t buf[16];
2816 uint32_t *p = buf;
2817
2818 p += emit_pop (p, x0);
2819
2820 emit_ops_insns (buf, p - buf);
2821 }
2822
2823 /* Implementation of emit_ops method "emit_stack_flush". */
2824
2825 static void
2826 aarch64_emit_stack_flush (void)
2827 {
2828 uint32_t buf[16];
2829 uint32_t *p = buf;
2830
2831 p += emit_push (p, x0);
2832
2833 emit_ops_insns (buf, p - buf);
2834 }
2835
2836 /* Implementation of emit_ops method "emit_zero_ext". */
2837
2838 static void
2839 aarch64_emit_zero_ext (int arg)
2840 {
2841 uint32_t buf[16];
2842 uint32_t *p = buf;
2843
2844 p += emit_ubfx (p, x0, x0, 0, arg);
2845
2846 emit_ops_insns (buf, p - buf);
2847 }
2848
2849 /* Implementation of emit_ops method "emit_swap". */
2850
2851 static void
2852 aarch64_emit_swap (void)
2853 {
2854 uint32_t buf[16];
2855 uint32_t *p = buf;
2856
2857 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2858 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2859 p += emit_mov (p, x0, register_operand (x1));
2860
2861 emit_ops_insns (buf, p - buf);
2862 }
2863
2864 /* Implementation of emit_ops method "emit_stack_adjust". */
2865
2866 static void
2867 aarch64_emit_stack_adjust (int n)
2868 {
2869 /* This is not needed with our design. */
2870 uint32_t buf[16];
2871 uint32_t *p = buf;
2872
2873 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2874
2875 emit_ops_insns (buf, p - buf);
2876 }
2877
2878 /* Implementation of emit_ops method "emit_int_call_1". */
2879
2880 static void
2881 aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2882 {
2883 uint32_t buf[16];
2884 uint32_t *p = buf;
2885
2886 p += emit_mov (p, x0, immediate_operand (arg1));
2887
2888 emit_ops_insns (buf, p - buf);
2889
2890 aarch64_emit_call (fn);
2891 }
2892
2893 /* Implementation of emit_ops method "emit_void_call_2". */
2894
2895 static void
2896 aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2897 {
2898 uint32_t buf[16];
2899 uint32_t *p = buf;
2900
2901 /* Push x0 on the stack. */
2902 aarch64_emit_stack_flush ();
2903
2904 /* Setup arguments for the function call:
2905
2906 x0: arg1
2907 x1: top of the stack
2908
2909 MOV x1, x0
2910 MOV x0, #arg1 */
2911
2912 p += emit_mov (p, x1, register_operand (x0));
2913 p += emit_mov (p, x0, immediate_operand (arg1));
2914
2915 emit_ops_insns (buf, p - buf);
2916
2917 aarch64_emit_call (fn);
2918
2919 /* Restore x0. */
2920 aarch64_emit_pop ();
2921 }
2922
2923 /* Implementation of emit_ops method "emit_eq_goto". */
2924
2925 static void
2926 aarch64_emit_eq_goto (int *offset_p, int *size_p)
2927 {
2928 uint32_t buf[16];
2929 uint32_t *p = buf;
2930
2931 p += emit_pop (p, x1);
2932 p += emit_cmp (p, x1, register_operand (x0));
2933 /* Branch over the next instruction if x0 != x1. */
2934 p += emit_bcond (p, NE, 8);
2935 /* The NOP instruction will be patched with an unconditional branch. */
2936 if (offset_p)
2937 *offset_p = (p - buf) * 4;
2938 if (size_p)
2939 *size_p = 4;
2940 p += emit_nop (p);
2941
2942 emit_ops_insns (buf, p - buf);
2943 }
2944
2945 /* Implementation of emit_ops method "emit_ne_goto". */
2946
2947 static void
2948 aarch64_emit_ne_goto (int *offset_p, int *size_p)
2949 {
2950 uint32_t buf[16];
2951 uint32_t *p = buf;
2952
2953 p += emit_pop (p, x1);
2954 p += emit_cmp (p, x1, register_operand (x0));
2955 /* Branch over the next instruction if x0 == x1. */
2956 p += emit_bcond (p, EQ, 8);
2957 /* The NOP instruction will be patched with an unconditional branch. */
2958 if (offset_p)
2959 *offset_p = (p - buf) * 4;
2960 if (size_p)
2961 *size_p = 4;
2962 p += emit_nop (p);
2963
2964 emit_ops_insns (buf, p - buf);
2965 }
2966
2967 /* Implementation of emit_ops method "emit_lt_goto". */
2968
2969 static void
2970 aarch64_emit_lt_goto (int *offset_p, int *size_p)
2971 {
2972 uint32_t buf[16];
2973 uint32_t *p = buf;
2974
2975 p += emit_pop (p, x1);
2976 p += emit_cmp (p, x1, register_operand (x0));
2977 /* Branch over the next instruction if x0 >= x1. */
2978 p += emit_bcond (p, GE, 8);
2979 /* The NOP instruction will be patched with an unconditional branch. */
2980 if (offset_p)
2981 *offset_p = (p - buf) * 4;
2982 if (size_p)
2983 *size_p = 4;
2984 p += emit_nop (p);
2985
2986 emit_ops_insns (buf, p - buf);
2987 }
2988
2989 /* Implementation of emit_ops method "emit_le_goto". */
2990
2991 static void
2992 aarch64_emit_le_goto (int *offset_p, int *size_p)
2993 {
2994 uint32_t buf[16];
2995 uint32_t *p = buf;
2996
2997 p += emit_pop (p, x1);
2998 p += emit_cmp (p, x1, register_operand (x0));
2999 /* Branch over the next instruction if x0 > x1. */
3000 p += emit_bcond (p, GT, 8);
3001 /* The NOP instruction will be patched with an unconditional branch. */
3002 if (offset_p)
3003 *offset_p = (p - buf) * 4;
3004 if (size_p)
3005 *size_p = 4;
3006 p += emit_nop (p);
3007
3008 emit_ops_insns (buf, p - buf);
3009 }
3010
3011 /* Implementation of emit_ops method "emit_gt_goto". */
3012
3013 static void
3014 aarch64_emit_gt_goto (int *offset_p, int *size_p)
3015 {
3016 uint32_t buf[16];
3017 uint32_t *p = buf;
3018
3019 p += emit_pop (p, x1);
3020 p += emit_cmp (p, x1, register_operand (x0));
3021 /* Branch over the next instruction if x0 <= x1. */
3022 p += emit_bcond (p, LE, 8);
3023 /* The NOP instruction will be patched with an unconditional branch. */
3024 if (offset_p)
3025 *offset_p = (p - buf) * 4;
3026 if (size_p)
3027 *size_p = 4;
3028 p += emit_nop (p);
3029
3030 emit_ops_insns (buf, p - buf);
3031 }
3032
3033 /* Implementation of emit_ops method "emit_ge_got". */
3034
3035 static void
3036 aarch64_emit_ge_got (int *offset_p, int *size_p)
3037 {
3038 uint32_t buf[16];
3039 uint32_t *p = buf;
3040
3041 p += emit_pop (p, x1);
3042 p += emit_cmp (p, x1, register_operand (x0));
3043 /* Branch over the next instruction if x0 <= x1. */
3044 p += emit_bcond (p, LT, 8);
3045 /* The NOP instruction will be patched with an unconditional branch. */
3046 if (offset_p)
3047 *offset_p = (p - buf) * 4;
3048 if (size_p)
3049 *size_p = 4;
3050 p += emit_nop (p);
3051
3052 emit_ops_insns (buf, p - buf);
3053 }
3054
3055 static struct emit_ops aarch64_emit_ops_impl =
3056 {
3057 aarch64_emit_prologue,
3058 aarch64_emit_epilogue,
3059 aarch64_emit_add,
3060 aarch64_emit_sub,
3061 aarch64_emit_mul,
3062 aarch64_emit_lsh,
3063 aarch64_emit_rsh_signed,
3064 aarch64_emit_rsh_unsigned,
3065 aarch64_emit_ext,
3066 aarch64_emit_log_not,
3067 aarch64_emit_bit_and,
3068 aarch64_emit_bit_or,
3069 aarch64_emit_bit_xor,
3070 aarch64_emit_bit_not,
3071 aarch64_emit_equal,
3072 aarch64_emit_less_signed,
3073 aarch64_emit_less_unsigned,
3074 aarch64_emit_ref,
3075 aarch64_emit_if_goto,
3076 aarch64_emit_goto,
3077 aarch64_write_goto_address,
3078 aarch64_emit_const,
3079 aarch64_emit_call,
3080 aarch64_emit_reg,
3081 aarch64_emit_pop,
3082 aarch64_emit_stack_flush,
3083 aarch64_emit_zero_ext,
3084 aarch64_emit_swap,
3085 aarch64_emit_stack_adjust,
3086 aarch64_emit_int_call_1,
3087 aarch64_emit_void_call_2,
3088 aarch64_emit_eq_goto,
3089 aarch64_emit_ne_goto,
3090 aarch64_emit_lt_goto,
3091 aarch64_emit_le_goto,
3092 aarch64_emit_gt_goto,
3093 aarch64_emit_ge_got,
3094 };
3095
3096 /* Implementation of target ops method "emit_ops". */
3097
3098 emit_ops *
3099 aarch64_target::emit_ops ()
3100 {
3101 return &aarch64_emit_ops_impl;
3102 }
3103
3104 /* Implementation of target ops method
3105 "get_min_fast_tracepoint_insn_len". */
3106
3107 int
3108 aarch64_target::get_min_fast_tracepoint_insn_len ()
3109 {
3110 return 4;
3111 }
3112
3113 /* Implementation of linux_target_ops method "supports_range_stepping". */
3114
3115 static int
3116 aarch64_supports_range_stepping (void)
3117 {
3118 return 1;
3119 }
3120
3121 /* Implementation of target ops method "sw_breakpoint_from_kind". */
3122
3123 const gdb_byte *
3124 aarch64_target::sw_breakpoint_from_kind (int kind, int *size)
3125 {
3126 if (is_64bit_tdesc ())
3127 {
3128 *size = aarch64_breakpoint_len;
3129 return aarch64_breakpoint;
3130 }
3131 else
3132 return arm_sw_breakpoint_from_kind (kind, size);
3133 }
3134
3135 /* Implementation of target ops method "breakpoint_kind_from_pc". */
3136
3137 int
3138 aarch64_target::breakpoint_kind_from_pc (CORE_ADDR *pcptr)
3139 {
3140 if (is_64bit_tdesc ())
3141 return aarch64_breakpoint_len;
3142 else
3143 return arm_breakpoint_kind_from_pc (pcptr);
3144 }
3145
3146 /* Implementation of the target ops method
3147 "breakpoint_kind_from_current_state". */
3148
3149 int
3150 aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
3151 {
3152 if (is_64bit_tdesc ())
3153 return aarch64_breakpoint_len;
3154 else
3155 return arm_breakpoint_kind_from_current_state (pcptr);
3156 }
3157
3158 /* Support for hardware single step. */
3159
3160 static int
3161 aarch64_supports_hardware_single_step (void)
3162 {
3163 return 1;
3164 }
3165
3166 struct linux_target_ops the_low_target =
3167 {
3168 aarch64_supports_range_stepping,
3169 aarch64_supports_hardware_single_step,
3170 aarch64_get_syscall_trapinfo,
3171 };
3172
3173 /* The linux target ops object. */
3174
3175 linux_process_target *the_linux_target = &the_aarch64_target;
3176
3177 void
3178 initialize_low_arch (void)
3179 {
3180 initialize_low_arch_aarch32 ();
3181
3182 initialize_regsets_info (&aarch64_regsets_info);
3183 initialize_regsets_info (&aarch64_sve_regsets_info);
3184 }
This page took 0.091216 seconds and 5 git commands to generate.