AArch64: Add pointer authentication feature
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-aarch64-low.c
1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
4 Copyright (C) 2009-2019 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "server.h"
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
29 #include "ax.h"
30 #include "tracepoint.h"
31
32 #include <signal.h>
33 #include <sys/user.h>
34 #include "nat/gdb_ptrace.h"
35 #include <asm/ptrace.h>
36 #include <inttypes.h>
37 #include <endian.h>
38 #include <sys/uio.h>
39
40 #include "gdb_proc_service.h"
41 #include "arch/aarch64.h"
42 #include "linux-aarch64-tdesc.h"
43 #include "nat/aarch64-sve-linux-ptrace.h"
44 #include "tdesc.h"
45
46 #ifdef HAVE_SYS_REG_H
47 #include <sys/reg.h>
48 #endif
49
50 /* Per-process arch-specific data we want to keep. */
51
52 struct arch_process_info
53 {
54 /* Hardware breakpoint/watchpoint data.
55 The reason for them to be per-process rather than per-thread is
56 due to the lack of information in the gdbserver environment;
57 gdbserver is not told that whether a requested hardware
58 breakpoint/watchpoint is thread specific or not, so it has to set
59 each hw bp/wp for every thread in the current process. The
60 higher level bp/wp management in gdb will resume a thread if a hw
61 bp/wp trap is not expected for it. Since the hw bp/wp setting is
62 same for each thread, it is reasonable for the data to live here.
63 */
64 struct aarch64_debug_reg_state debug_reg_state;
65 };
66
67 /* Return true if the size of register 0 is 8 byte. */
68
69 static int
70 is_64bit_tdesc (void)
71 {
72 struct regcache *regcache = get_thread_regcache (current_thread, 0);
73
74 return register_size (regcache->tdesc, 0) == 8;
75 }
76
77 /* Return true if the regcache contains the number of SVE registers. */
78
79 static bool
80 is_sve_tdesc (void)
81 {
82 struct regcache *regcache = get_thread_regcache (current_thread, 0);
83
84 return regcache->tdesc->reg_defs.size () == AARCH64_SVE_NUM_REGS;
85 }
86
87 static void
88 aarch64_fill_gregset (struct regcache *regcache, void *buf)
89 {
90 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
91 int i;
92
93 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
94 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
95 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
96 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
97 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
98 }
99
100 static void
101 aarch64_store_gregset (struct regcache *regcache, const void *buf)
102 {
103 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
104 int i;
105
106 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
107 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
108 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
109 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
110 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
111 }
112
113 static void
114 aarch64_fill_fpregset (struct regcache *regcache, void *buf)
115 {
116 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
117 int i;
118
119 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
120 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
121 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
122 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
123 }
124
125 static void
126 aarch64_store_fpregset (struct regcache *regcache, const void *buf)
127 {
128 const struct user_fpsimd_state *regset
129 = (const struct user_fpsimd_state *) buf;
130 int i;
131
132 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
133 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
134 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
135 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
136 }
137
138 /* Enable miscellaneous debugging output. The name is historical - it
139 was originally used to debug LinuxThreads support. */
140 extern int debug_threads;
141
142 /* Implementation of linux_target_ops method "get_pc". */
143
144 static CORE_ADDR
145 aarch64_get_pc (struct regcache *regcache)
146 {
147 if (register_size (regcache->tdesc, 0) == 8)
148 return linux_get_pc_64bit (regcache);
149 else
150 return linux_get_pc_32bit (regcache);
151 }
152
153 /* Implementation of linux_target_ops method "set_pc". */
154
155 static void
156 aarch64_set_pc (struct regcache *regcache, CORE_ADDR pc)
157 {
158 if (register_size (regcache->tdesc, 0) == 8)
159 linux_set_pc_64bit (regcache, pc);
160 else
161 linux_set_pc_32bit (regcache, pc);
162 }
163
164 #define aarch64_breakpoint_len 4
165
166 /* AArch64 BRK software debug mode instruction.
167 This instruction needs to match gdb/aarch64-tdep.c
168 (aarch64_default_breakpoint). */
169 static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
170
171 /* Implementation of linux_target_ops method "breakpoint_at". */
172
173 static int
174 aarch64_breakpoint_at (CORE_ADDR where)
175 {
176 if (is_64bit_tdesc ())
177 {
178 gdb_byte insn[aarch64_breakpoint_len];
179
180 (*the_target->read_memory) (where, (unsigned char *) &insn,
181 aarch64_breakpoint_len);
182 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
183 return 1;
184
185 return 0;
186 }
187 else
188 return arm_breakpoint_at (where);
189 }
190
191 static void
192 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
193 {
194 int i;
195
196 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
197 {
198 state->dr_addr_bp[i] = 0;
199 state->dr_ctrl_bp[i] = 0;
200 state->dr_ref_count_bp[i] = 0;
201 }
202
203 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
204 {
205 state->dr_addr_wp[i] = 0;
206 state->dr_ctrl_wp[i] = 0;
207 state->dr_ref_count_wp[i] = 0;
208 }
209 }
210
211 /* Return the pointer to the debug register state structure in the
212 current process' arch-specific data area. */
213
214 struct aarch64_debug_reg_state *
215 aarch64_get_debug_reg_state (pid_t pid)
216 {
217 struct process_info *proc = find_process_pid (pid);
218
219 return &proc->priv->arch_private->debug_reg_state;
220 }
221
222 /* Implementation of linux_target_ops method "supports_z_point_type". */
223
224 static int
225 aarch64_supports_z_point_type (char z_type)
226 {
227 switch (z_type)
228 {
229 case Z_PACKET_SW_BP:
230 case Z_PACKET_HW_BP:
231 case Z_PACKET_WRITE_WP:
232 case Z_PACKET_READ_WP:
233 case Z_PACKET_ACCESS_WP:
234 return 1;
235 default:
236 return 0;
237 }
238 }
239
240 /* Implementation of linux_target_ops method "insert_point".
241
242 It actually only records the info of the to-be-inserted bp/wp;
243 the actual insertion will happen when threads are resumed. */
244
245 static int
246 aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
247 int len, struct raw_breakpoint *bp)
248 {
249 int ret;
250 enum target_hw_bp_type targ_type;
251 struct aarch64_debug_reg_state *state
252 = aarch64_get_debug_reg_state (pid_of (current_thread));
253
254 if (show_debug_regs)
255 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
256 (unsigned long) addr, len);
257
258 /* Determine the type from the raw breakpoint type. */
259 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
260
261 if (targ_type != hw_execute)
262 {
263 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
264 ret = aarch64_handle_watchpoint (targ_type, addr, len,
265 1 /* is_insert */, state);
266 else
267 ret = -1;
268 }
269 else
270 {
271 if (len == 3)
272 {
273 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
274 instruction. Set it to 2 to correctly encode length bit
275 mask in hardware/watchpoint control register. */
276 len = 2;
277 }
278 ret = aarch64_handle_breakpoint (targ_type, addr, len,
279 1 /* is_insert */, state);
280 }
281
282 if (show_debug_regs)
283 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
284 targ_type);
285
286 return ret;
287 }
288
289 /* Implementation of linux_target_ops method "remove_point".
290
291 It actually only records the info of the to-be-removed bp/wp,
292 the actual removal will be done when threads are resumed. */
293
294 static int
295 aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
296 int len, struct raw_breakpoint *bp)
297 {
298 int ret;
299 enum target_hw_bp_type targ_type;
300 struct aarch64_debug_reg_state *state
301 = aarch64_get_debug_reg_state (pid_of (current_thread));
302
303 if (show_debug_regs)
304 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
305 (unsigned long) addr, len);
306
307 /* Determine the type from the raw breakpoint type. */
308 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
309
310 /* Set up state pointers. */
311 if (targ_type != hw_execute)
312 ret =
313 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
314 state);
315 else
316 {
317 if (len == 3)
318 {
319 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
320 instruction. Set it to 2 to correctly encode length bit
321 mask in hardware/watchpoint control register. */
322 len = 2;
323 }
324 ret = aarch64_handle_breakpoint (targ_type, addr, len,
325 0 /* is_insert */, state);
326 }
327
328 if (show_debug_regs)
329 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
330 targ_type);
331
332 return ret;
333 }
334
335 /* Implementation of linux_target_ops method "stopped_data_address". */
336
337 static CORE_ADDR
338 aarch64_stopped_data_address (void)
339 {
340 siginfo_t siginfo;
341 int pid, i;
342 struct aarch64_debug_reg_state *state;
343
344 pid = lwpid_of (current_thread);
345
346 /* Get the siginfo. */
347 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
348 return (CORE_ADDR) 0;
349
350 /* Need to be a hardware breakpoint/watchpoint trap. */
351 if (siginfo.si_signo != SIGTRAP
352 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
353 return (CORE_ADDR) 0;
354
355 /* Check if the address matches any watched address. */
356 state = aarch64_get_debug_reg_state (pid_of (current_thread));
357 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
358 {
359 const unsigned int offset
360 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
361 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
362 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
363 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
364 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
365 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
366
367 if (state->dr_ref_count_wp[i]
368 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
369 && addr_trap >= addr_watch_aligned
370 && addr_trap < addr_watch + len)
371 {
372 /* ADDR_TRAP reports the first address of the memory range
373 accessed by the CPU, regardless of what was the memory
374 range watched. Thus, a large CPU access that straddles
375 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
376 ADDR_TRAP that is lower than the
377 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
378
379 addr: | 4 | 5 | 6 | 7 | 8 |
380 |---- range watched ----|
381 |----------- range accessed ------------|
382
383 In this case, ADDR_TRAP will be 4.
384
385 To match a watchpoint known to GDB core, we must never
386 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
387 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
388 positive on kernels older than 4.10. See PR
389 external/20207. */
390 return addr_orig;
391 }
392 }
393
394 return (CORE_ADDR) 0;
395 }
396
397 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
398
399 static int
400 aarch64_stopped_by_watchpoint (void)
401 {
402 if (aarch64_stopped_data_address () != 0)
403 return 1;
404 else
405 return 0;
406 }
407
408 /* Fetch the thread-local storage pointer for libthread_db. */
409
410 ps_err_e
411 ps_get_thread_area (struct ps_prochandle *ph,
412 lwpid_t lwpid, int idx, void **base)
413 {
414 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
415 is_64bit_tdesc ());
416 }
417
418 /* Implementation of linux_target_ops method "siginfo_fixup". */
419
420 static int
421 aarch64_linux_siginfo_fixup (siginfo_t *native, gdb_byte *inf, int direction)
422 {
423 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
424 if (!is_64bit_tdesc ())
425 {
426 if (direction == 0)
427 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
428 native);
429 else
430 aarch64_siginfo_from_compat_siginfo (native,
431 (struct compat_siginfo *) inf);
432
433 return 1;
434 }
435
436 return 0;
437 }
438
439 /* Implementation of linux_target_ops method "new_process". */
440
441 static struct arch_process_info *
442 aarch64_linux_new_process (void)
443 {
444 struct arch_process_info *info = XCNEW (struct arch_process_info);
445
446 aarch64_init_debug_reg_state (&info->debug_reg_state);
447
448 return info;
449 }
450
451 /* Implementation of linux_target_ops method "delete_process". */
452
453 static void
454 aarch64_linux_delete_process (struct arch_process_info *info)
455 {
456 xfree (info);
457 }
458
459 /* Implementation of linux_target_ops method "linux_new_fork". */
460
461 static void
462 aarch64_linux_new_fork (struct process_info *parent,
463 struct process_info *child)
464 {
465 /* These are allocated by linux_add_process. */
466 gdb_assert (parent->priv != NULL
467 && parent->priv->arch_private != NULL);
468 gdb_assert (child->priv != NULL
469 && child->priv->arch_private != NULL);
470
471 /* Linux kernel before 2.6.33 commit
472 72f674d203cd230426437cdcf7dd6f681dad8b0d
473 will inherit hardware debug registers from parent
474 on fork/vfork/clone. Newer Linux kernels create such tasks with
475 zeroed debug registers.
476
477 GDB core assumes the child inherits the watchpoints/hw
478 breakpoints of the parent, and will remove them all from the
479 forked off process. Copy the debug registers mirrors into the
480 new process so that all breakpoints and watchpoints can be
481 removed together. The debug registers mirror will become zeroed
482 in the end before detaching the forked off process, thus making
483 this compatible with older Linux kernels too. */
484
485 *child->priv->arch_private = *parent->priv->arch_private;
486 }
487
488 /* Implementation of linux_target_ops method "arch_setup". */
489
490 static void
491 aarch64_arch_setup (void)
492 {
493 unsigned int machine;
494 int is_elf64;
495 int tid;
496
497 tid = lwpid_of (current_thread);
498
499 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
500
501 if (is_elf64)
502 {
503 uint64_t vq = aarch64_sve_get_vq (tid);
504 /* pauth not yet supported. */
505 current_process ()->tdesc = aarch64_linux_read_description (vq, false);
506 }
507 else
508 current_process ()->tdesc = tdesc_arm_with_neon;
509
510 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
511 }
512
513 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
514
515 static void
516 aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
517 {
518 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
519 }
520
521 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
522
523 static void
524 aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
525 {
526 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
527 }
528
529 static struct regset_info aarch64_regsets[] =
530 {
531 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
532 sizeof (struct user_pt_regs), GENERAL_REGS,
533 aarch64_fill_gregset, aarch64_store_gregset },
534 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
535 sizeof (struct user_fpsimd_state), FP_REGS,
536 aarch64_fill_fpregset, aarch64_store_fpregset
537 },
538 NULL_REGSET
539 };
540
541 static struct regsets_info aarch64_regsets_info =
542 {
543 aarch64_regsets, /* regsets */
544 0, /* num_regsets */
545 NULL, /* disabled_regsets */
546 };
547
548 static struct regs_info regs_info_aarch64 =
549 {
550 NULL, /* regset_bitmap */
551 NULL, /* usrregs */
552 &aarch64_regsets_info,
553 };
554
555 static struct regset_info aarch64_sve_regsets[] =
556 {
557 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
558 sizeof (struct user_pt_regs), GENERAL_REGS,
559 aarch64_fill_gregset, aarch64_store_gregset },
560 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
561 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
562 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
563 },
564 NULL_REGSET
565 };
566
567 static struct regsets_info aarch64_sve_regsets_info =
568 {
569 aarch64_sve_regsets, /* regsets. */
570 0, /* num_regsets. */
571 NULL, /* disabled_regsets. */
572 };
573
574 static struct regs_info regs_info_aarch64_sve =
575 {
576 NULL, /* regset_bitmap. */
577 NULL, /* usrregs. */
578 &aarch64_sve_regsets_info,
579 };
580
581 /* Implementation of linux_target_ops method "regs_info". */
582
583 static const struct regs_info *
584 aarch64_regs_info (void)
585 {
586 if (!is_64bit_tdesc ())
587 return &regs_info_aarch32;
588
589 if (is_sve_tdesc ())
590 return &regs_info_aarch64_sve;
591
592 return &regs_info_aarch64;
593 }
594
595 /* Implementation of linux_target_ops method "supports_tracepoints". */
596
597 static int
598 aarch64_supports_tracepoints (void)
599 {
600 if (current_thread == NULL)
601 return 1;
602 else
603 {
604 /* We don't support tracepoints on aarch32 now. */
605 return is_64bit_tdesc ();
606 }
607 }
608
609 /* Implementation of linux_target_ops method "get_thread_area". */
610
611 static int
612 aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
613 {
614 struct iovec iovec;
615 uint64_t reg;
616
617 iovec.iov_base = &reg;
618 iovec.iov_len = sizeof (reg);
619
620 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
621 return -1;
622
623 *addrp = reg;
624
625 return 0;
626 }
627
628 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
629
630 static void
631 aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
632 {
633 int use_64bit = register_size (regcache->tdesc, 0) == 8;
634
635 if (use_64bit)
636 {
637 long l_sysno;
638
639 collect_register_by_name (regcache, "x8", &l_sysno);
640 *sysno = (int) l_sysno;
641 }
642 else
643 collect_register_by_name (regcache, "r7", sysno);
644 }
645
646 /* List of condition codes that we need. */
647
648 enum aarch64_condition_codes
649 {
650 EQ = 0x0,
651 NE = 0x1,
652 LO = 0x3,
653 GE = 0xa,
654 LT = 0xb,
655 GT = 0xc,
656 LE = 0xd,
657 };
658
659 enum aarch64_operand_type
660 {
661 OPERAND_IMMEDIATE,
662 OPERAND_REGISTER,
663 };
664
665 /* Representation of an operand. At this time, it only supports register
666 and immediate types. */
667
668 struct aarch64_operand
669 {
670 /* Type of the operand. */
671 enum aarch64_operand_type type;
672
673 /* Value of the operand according to the type. */
674 union
675 {
676 uint32_t imm;
677 struct aarch64_register reg;
678 };
679 };
680
681 /* List of registers that we are currently using, we can add more here as
682 we need to use them. */
683
684 /* General purpose scratch registers (64 bit). */
685 static const struct aarch64_register x0 = { 0, 1 };
686 static const struct aarch64_register x1 = { 1, 1 };
687 static const struct aarch64_register x2 = { 2, 1 };
688 static const struct aarch64_register x3 = { 3, 1 };
689 static const struct aarch64_register x4 = { 4, 1 };
690
691 /* General purpose scratch registers (32 bit). */
692 static const struct aarch64_register w0 = { 0, 0 };
693 static const struct aarch64_register w2 = { 2, 0 };
694
695 /* Intra-procedure scratch registers. */
696 static const struct aarch64_register ip0 = { 16, 1 };
697
698 /* Special purpose registers. */
699 static const struct aarch64_register fp = { 29, 1 };
700 static const struct aarch64_register lr = { 30, 1 };
701 static const struct aarch64_register sp = { 31, 1 };
702 static const struct aarch64_register xzr = { 31, 1 };
703
704 /* Dynamically allocate a new register. If we know the register
705 statically, we should make it a global as above instead of using this
706 helper function. */
707
708 static struct aarch64_register
709 aarch64_register (unsigned num, int is64)
710 {
711 return (struct aarch64_register) { num, is64 };
712 }
713
714 /* Helper function to create a register operand, for instructions with
715 different types of operands.
716
717 For example:
718 p += emit_mov (p, x0, register_operand (x1)); */
719
720 static struct aarch64_operand
721 register_operand (struct aarch64_register reg)
722 {
723 struct aarch64_operand operand;
724
725 operand.type = OPERAND_REGISTER;
726 operand.reg = reg;
727
728 return operand;
729 }
730
731 /* Helper function to create an immediate operand, for instructions with
732 different types of operands.
733
734 For example:
735 p += emit_mov (p, x0, immediate_operand (12)); */
736
737 static struct aarch64_operand
738 immediate_operand (uint32_t imm)
739 {
740 struct aarch64_operand operand;
741
742 operand.type = OPERAND_IMMEDIATE;
743 operand.imm = imm;
744
745 return operand;
746 }
747
748 /* Helper function to create an offset memory operand.
749
750 For example:
751 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
752
753 static struct aarch64_memory_operand
754 offset_memory_operand (int32_t offset)
755 {
756 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
757 }
758
759 /* Helper function to create a pre-index memory operand.
760
761 For example:
762 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
763
764 static struct aarch64_memory_operand
765 preindex_memory_operand (int32_t index)
766 {
767 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
768 }
769
770 /* Helper function to create a post-index memory operand.
771
772 For example:
773 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
774
775 static struct aarch64_memory_operand
776 postindex_memory_operand (int32_t index)
777 {
778 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
779 }
780
781 /* System control registers. These special registers can be written and
782 read with the MRS and MSR instructions.
783
784 - NZCV: Condition flags. GDB refers to this register under the CPSR
785 name.
786 - FPSR: Floating-point status register.
787 - FPCR: Floating-point control registers.
788 - TPIDR_EL0: Software thread ID register. */
789
790 enum aarch64_system_control_registers
791 {
792 /* op0 op1 crn crm op2 */
793 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
794 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
795 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
796 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
797 };
798
799 /* Write a BLR instruction into *BUF.
800
801 BLR rn
802
803 RN is the register to branch to. */
804
805 static int
806 emit_blr (uint32_t *buf, struct aarch64_register rn)
807 {
808 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
809 }
810
811 /* Write a RET instruction into *BUF.
812
813 RET xn
814
815 RN is the register to branch to. */
816
817 static int
818 emit_ret (uint32_t *buf, struct aarch64_register rn)
819 {
820 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
821 }
822
823 static int
824 emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
825 struct aarch64_register rt,
826 struct aarch64_register rt2,
827 struct aarch64_register rn,
828 struct aarch64_memory_operand operand)
829 {
830 uint32_t opc;
831 uint32_t pre_index;
832 uint32_t write_back;
833
834 if (rt.is64)
835 opc = ENCODE (2, 2, 30);
836 else
837 opc = ENCODE (0, 2, 30);
838
839 switch (operand.type)
840 {
841 case MEMORY_OPERAND_OFFSET:
842 {
843 pre_index = ENCODE (1, 1, 24);
844 write_back = ENCODE (0, 1, 23);
845 break;
846 }
847 case MEMORY_OPERAND_POSTINDEX:
848 {
849 pre_index = ENCODE (0, 1, 24);
850 write_back = ENCODE (1, 1, 23);
851 break;
852 }
853 case MEMORY_OPERAND_PREINDEX:
854 {
855 pre_index = ENCODE (1, 1, 24);
856 write_back = ENCODE (1, 1, 23);
857 break;
858 }
859 default:
860 return 0;
861 }
862
863 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
864 | ENCODE (operand.index >> 3, 7, 15)
865 | ENCODE (rt2.num, 5, 10)
866 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
867 }
868
869 /* Write a STP instruction into *BUF.
870
871 STP rt, rt2, [rn, #offset]
872 STP rt, rt2, [rn, #index]!
873 STP rt, rt2, [rn], #index
874
875 RT and RT2 are the registers to store.
876 RN is the base address register.
877 OFFSET is the immediate to add to the base address. It is limited to a
878 -512 .. 504 range (7 bits << 3). */
879
880 static int
881 emit_stp (uint32_t *buf, struct aarch64_register rt,
882 struct aarch64_register rt2, struct aarch64_register rn,
883 struct aarch64_memory_operand operand)
884 {
885 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
886 }
887
888 /* Write a LDP instruction into *BUF.
889
890 LDP rt, rt2, [rn, #offset]
891 LDP rt, rt2, [rn, #index]!
892 LDP rt, rt2, [rn], #index
893
894 RT and RT2 are the registers to store.
895 RN is the base address register.
896 OFFSET is the immediate to add to the base address. It is limited to a
897 -512 .. 504 range (7 bits << 3). */
898
899 static int
900 emit_ldp (uint32_t *buf, struct aarch64_register rt,
901 struct aarch64_register rt2, struct aarch64_register rn,
902 struct aarch64_memory_operand operand)
903 {
904 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
905 }
906
907 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
908
909 LDP qt, qt2, [rn, #offset]
910
911 RT and RT2 are the Q registers to store.
912 RN is the base address register.
913 OFFSET is the immediate to add to the base address. It is limited to
914 -1024 .. 1008 range (7 bits << 4). */
915
916 static int
917 emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
918 struct aarch64_register rn, int32_t offset)
919 {
920 uint32_t opc = ENCODE (2, 2, 30);
921 uint32_t pre_index = ENCODE (1, 1, 24);
922
923 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
924 | ENCODE (offset >> 4, 7, 15)
925 | ENCODE (rt2, 5, 10)
926 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
927 }
928
929 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
930
931 STP qt, qt2, [rn, #offset]
932
933 RT and RT2 are the Q registers to store.
934 RN is the base address register.
935 OFFSET is the immediate to add to the base address. It is limited to
936 -1024 .. 1008 range (7 bits << 4). */
937
938 static int
939 emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
940 struct aarch64_register rn, int32_t offset)
941 {
942 uint32_t opc = ENCODE (2, 2, 30);
943 uint32_t pre_index = ENCODE (1, 1, 24);
944
945 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
946 | ENCODE (offset >> 4, 7, 15)
947 | ENCODE (rt2, 5, 10)
948 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
949 }
950
951 /* Write a LDRH instruction into *BUF.
952
953 LDRH wt, [xn, #offset]
954 LDRH wt, [xn, #index]!
955 LDRH wt, [xn], #index
956
957 RT is the register to store.
958 RN is the base address register.
959 OFFSET is the immediate to add to the base address. It is limited to
960 0 .. 32760 range (12 bits << 3). */
961
962 static int
963 emit_ldrh (uint32_t *buf, struct aarch64_register rt,
964 struct aarch64_register rn,
965 struct aarch64_memory_operand operand)
966 {
967 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
968 }
969
970 /* Write a LDRB instruction into *BUF.
971
972 LDRB wt, [xn, #offset]
973 LDRB wt, [xn, #index]!
974 LDRB wt, [xn], #index
975
976 RT is the register to store.
977 RN is the base address register.
978 OFFSET is the immediate to add to the base address. It is limited to
979 0 .. 32760 range (12 bits << 3). */
980
981 static int
982 emit_ldrb (uint32_t *buf, struct aarch64_register rt,
983 struct aarch64_register rn,
984 struct aarch64_memory_operand operand)
985 {
986 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
987 }
988
989
990
991 /* Write a STR instruction into *BUF.
992
993 STR rt, [rn, #offset]
994 STR rt, [rn, #index]!
995 STR rt, [rn], #index
996
997 RT is the register to store.
998 RN is the base address register.
999 OFFSET is the immediate to add to the base address. It is limited to
1000 0 .. 32760 range (12 bits << 3). */
1001
1002 static int
1003 emit_str (uint32_t *buf, struct aarch64_register rt,
1004 struct aarch64_register rn,
1005 struct aarch64_memory_operand operand)
1006 {
1007 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
1008 }
1009
1010 /* Helper function emitting an exclusive load or store instruction. */
1011
1012 static int
1013 emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1014 enum aarch64_opcodes opcode,
1015 struct aarch64_register rs,
1016 struct aarch64_register rt,
1017 struct aarch64_register rt2,
1018 struct aarch64_register rn)
1019 {
1020 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1021 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1022 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
1023 }
1024
1025 /* Write a LAXR instruction into *BUF.
1026
1027 LDAXR rt, [xn]
1028
1029 RT is the destination register.
1030 RN is the base address register. */
1031
1032 static int
1033 emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1034 struct aarch64_register rn)
1035 {
1036 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1037 xzr, rn);
1038 }
1039
1040 /* Write a STXR instruction into *BUF.
1041
1042 STXR ws, rt, [xn]
1043
1044 RS is the result register, it indicates if the store succeeded or not.
1045 RT is the destination register.
1046 RN is the base address register. */
1047
1048 static int
1049 emit_stxr (uint32_t *buf, struct aarch64_register rs,
1050 struct aarch64_register rt, struct aarch64_register rn)
1051 {
1052 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1053 xzr, rn);
1054 }
1055
1056 /* Write a STLR instruction into *BUF.
1057
1058 STLR rt, [xn]
1059
1060 RT is the register to store.
1061 RN is the base address register. */
1062
1063 static int
1064 emit_stlr (uint32_t *buf, struct aarch64_register rt,
1065 struct aarch64_register rn)
1066 {
1067 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1068 xzr, rn);
1069 }
1070
1071 /* Helper function for data processing instructions with register sources. */
1072
1073 static int
1074 emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
1075 struct aarch64_register rd,
1076 struct aarch64_register rn,
1077 struct aarch64_register rm)
1078 {
1079 uint32_t size = ENCODE (rd.is64, 1, 31);
1080
1081 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1082 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
1083 }
1084
1085 /* Helper function for data processing instructions taking either a register
1086 or an immediate. */
1087
1088 static int
1089 emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1090 struct aarch64_register rd,
1091 struct aarch64_register rn,
1092 struct aarch64_operand operand)
1093 {
1094 uint32_t size = ENCODE (rd.is64, 1, 31);
1095 /* The opcode is different for register and immediate source operands. */
1096 uint32_t operand_opcode;
1097
1098 if (operand.type == OPERAND_IMMEDIATE)
1099 {
1100 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1101 operand_opcode = ENCODE (8, 4, 25);
1102
1103 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1104 | ENCODE (operand.imm, 12, 10)
1105 | ENCODE (rn.num, 5, 5)
1106 | ENCODE (rd.num, 5, 0));
1107 }
1108 else
1109 {
1110 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1111 operand_opcode = ENCODE (5, 4, 25);
1112
1113 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1114 rn, operand.reg);
1115 }
1116 }
1117
1118 /* Write an ADD instruction into *BUF.
1119
1120 ADD rd, rn, #imm
1121 ADD rd, rn, rm
1122
1123 This function handles both an immediate and register add.
1124
1125 RD is the destination register.
1126 RN is the input register.
1127 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1128 OPERAND_REGISTER. */
1129
1130 static int
1131 emit_add (uint32_t *buf, struct aarch64_register rd,
1132 struct aarch64_register rn, struct aarch64_operand operand)
1133 {
1134 return emit_data_processing (buf, ADD, rd, rn, operand);
1135 }
1136
1137 /* Write a SUB instruction into *BUF.
1138
1139 SUB rd, rn, #imm
1140 SUB rd, rn, rm
1141
1142 This function handles both an immediate and register sub.
1143
1144 RD is the destination register.
1145 RN is the input register.
1146 IMM is the immediate to substract to RN. */
1147
1148 static int
1149 emit_sub (uint32_t *buf, struct aarch64_register rd,
1150 struct aarch64_register rn, struct aarch64_operand operand)
1151 {
1152 return emit_data_processing (buf, SUB, rd, rn, operand);
1153 }
1154
1155 /* Write a MOV instruction into *BUF.
1156
1157 MOV rd, #imm
1158 MOV rd, rm
1159
1160 This function handles both a wide immediate move and a register move,
1161 with the condition that the source register is not xzr. xzr and the
1162 stack pointer share the same encoding and this function only supports
1163 the stack pointer.
1164
1165 RD is the destination register.
1166 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1167 OPERAND_REGISTER. */
1168
1169 static int
1170 emit_mov (uint32_t *buf, struct aarch64_register rd,
1171 struct aarch64_operand operand)
1172 {
1173 if (operand.type == OPERAND_IMMEDIATE)
1174 {
1175 uint32_t size = ENCODE (rd.is64, 1, 31);
1176 /* Do not shift the immediate. */
1177 uint32_t shift = ENCODE (0, 2, 21);
1178
1179 return aarch64_emit_insn (buf, MOV | size | shift
1180 | ENCODE (operand.imm, 16, 5)
1181 | ENCODE (rd.num, 5, 0));
1182 }
1183 else
1184 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1185 }
1186
1187 /* Write a MOVK instruction into *BUF.
1188
1189 MOVK rd, #imm, lsl #shift
1190
1191 RD is the destination register.
1192 IMM is the immediate.
1193 SHIFT is the logical shift left to apply to IMM. */
1194
1195 static int
1196 emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1197 unsigned shift)
1198 {
1199 uint32_t size = ENCODE (rd.is64, 1, 31);
1200
1201 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1202 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
1203 }
1204
1205 /* Write instructions into *BUF in order to move ADDR into a register.
1206 ADDR can be a 64-bit value.
1207
1208 This function will emit a series of MOV and MOVK instructions, such as:
1209
1210 MOV xd, #(addr)
1211 MOVK xd, #(addr >> 16), lsl #16
1212 MOVK xd, #(addr >> 32), lsl #32
1213 MOVK xd, #(addr >> 48), lsl #48 */
1214
1215 static int
1216 emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1217 {
1218 uint32_t *p = buf;
1219
1220 /* The MOV (wide immediate) instruction clears to top bits of the
1221 register. */
1222 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1223
1224 if ((addr >> 16) != 0)
1225 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1226 else
1227 return p - buf;
1228
1229 if ((addr >> 32) != 0)
1230 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1231 else
1232 return p - buf;
1233
1234 if ((addr >> 48) != 0)
1235 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1236
1237 return p - buf;
1238 }
1239
1240 /* Write a SUBS instruction into *BUF.
1241
1242 SUBS rd, rn, rm
1243
1244 This instruction update the condition flags.
1245
1246 RD is the destination register.
1247 RN and RM are the source registers. */
1248
1249 static int
1250 emit_subs (uint32_t *buf, struct aarch64_register rd,
1251 struct aarch64_register rn, struct aarch64_operand operand)
1252 {
1253 return emit_data_processing (buf, SUBS, rd, rn, operand);
1254 }
1255
1256 /* Write a CMP instruction into *BUF.
1257
1258 CMP rn, rm
1259
1260 This instruction is an alias of SUBS xzr, rn, rm.
1261
1262 RN and RM are the registers to compare. */
1263
1264 static int
1265 emit_cmp (uint32_t *buf, struct aarch64_register rn,
1266 struct aarch64_operand operand)
1267 {
1268 return emit_subs (buf, xzr, rn, operand);
1269 }
1270
1271 /* Write a AND instruction into *BUF.
1272
1273 AND rd, rn, rm
1274
1275 RD is the destination register.
1276 RN and RM are the source registers. */
1277
1278 static int
1279 emit_and (uint32_t *buf, struct aarch64_register rd,
1280 struct aarch64_register rn, struct aarch64_register rm)
1281 {
1282 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1283 }
1284
1285 /* Write a ORR instruction into *BUF.
1286
1287 ORR rd, rn, rm
1288
1289 RD is the destination register.
1290 RN and RM are the source registers. */
1291
1292 static int
1293 emit_orr (uint32_t *buf, struct aarch64_register rd,
1294 struct aarch64_register rn, struct aarch64_register rm)
1295 {
1296 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1297 }
1298
1299 /* Write a ORN instruction into *BUF.
1300
1301 ORN rd, rn, rm
1302
1303 RD is the destination register.
1304 RN and RM are the source registers. */
1305
1306 static int
1307 emit_orn (uint32_t *buf, struct aarch64_register rd,
1308 struct aarch64_register rn, struct aarch64_register rm)
1309 {
1310 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1311 }
1312
1313 /* Write a EOR instruction into *BUF.
1314
1315 EOR rd, rn, rm
1316
1317 RD is the destination register.
1318 RN and RM are the source registers. */
1319
1320 static int
1321 emit_eor (uint32_t *buf, struct aarch64_register rd,
1322 struct aarch64_register rn, struct aarch64_register rm)
1323 {
1324 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1325 }
1326
1327 /* Write a MVN instruction into *BUF.
1328
1329 MVN rd, rm
1330
1331 This is an alias for ORN rd, xzr, rm.
1332
1333 RD is the destination register.
1334 RM is the source register. */
1335
1336 static int
1337 emit_mvn (uint32_t *buf, struct aarch64_register rd,
1338 struct aarch64_register rm)
1339 {
1340 return emit_orn (buf, rd, xzr, rm);
1341 }
1342
1343 /* Write a LSLV instruction into *BUF.
1344
1345 LSLV rd, rn, rm
1346
1347 RD is the destination register.
1348 RN and RM are the source registers. */
1349
1350 static int
1351 emit_lslv (uint32_t *buf, struct aarch64_register rd,
1352 struct aarch64_register rn, struct aarch64_register rm)
1353 {
1354 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1355 }
1356
1357 /* Write a LSRV instruction into *BUF.
1358
1359 LSRV rd, rn, rm
1360
1361 RD is the destination register.
1362 RN and RM are the source registers. */
1363
1364 static int
1365 emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1366 struct aarch64_register rn, struct aarch64_register rm)
1367 {
1368 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1369 }
1370
1371 /* Write a ASRV instruction into *BUF.
1372
1373 ASRV rd, rn, rm
1374
1375 RD is the destination register.
1376 RN and RM are the source registers. */
1377
1378 static int
1379 emit_asrv (uint32_t *buf, struct aarch64_register rd,
1380 struct aarch64_register rn, struct aarch64_register rm)
1381 {
1382 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1383 }
1384
1385 /* Write a MUL instruction into *BUF.
1386
1387 MUL rd, rn, rm
1388
1389 RD is the destination register.
1390 RN and RM are the source registers. */
1391
1392 static int
1393 emit_mul (uint32_t *buf, struct aarch64_register rd,
1394 struct aarch64_register rn, struct aarch64_register rm)
1395 {
1396 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1397 }
1398
1399 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1400
1401 MRS xt, system_reg
1402
1403 RT is the destination register.
1404 SYSTEM_REG is special purpose register to read. */
1405
1406 static int
1407 emit_mrs (uint32_t *buf, struct aarch64_register rt,
1408 enum aarch64_system_control_registers system_reg)
1409 {
1410 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1411 | ENCODE (rt.num, 5, 0));
1412 }
1413
1414 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1415
1416 MSR system_reg, xt
1417
1418 SYSTEM_REG is special purpose register to write.
1419 RT is the input register. */
1420
1421 static int
1422 emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1423 struct aarch64_register rt)
1424 {
1425 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1426 | ENCODE (rt.num, 5, 0));
1427 }
1428
1429 /* Write a SEVL instruction into *BUF.
1430
1431 This is a hint instruction telling the hardware to trigger an event. */
1432
1433 static int
1434 emit_sevl (uint32_t *buf)
1435 {
1436 return aarch64_emit_insn (buf, SEVL);
1437 }
1438
1439 /* Write a WFE instruction into *BUF.
1440
1441 This is a hint instruction telling the hardware to wait for an event. */
1442
1443 static int
1444 emit_wfe (uint32_t *buf)
1445 {
1446 return aarch64_emit_insn (buf, WFE);
1447 }
1448
1449 /* Write a SBFM instruction into *BUF.
1450
1451 SBFM rd, rn, #immr, #imms
1452
1453 This instruction moves the bits from #immr to #imms into the
1454 destination, sign extending the result.
1455
1456 RD is the destination register.
1457 RN is the source register.
1458 IMMR is the bit number to start at (least significant bit).
1459 IMMS is the bit number to stop at (most significant bit). */
1460
1461 static int
1462 emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1463 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1464 {
1465 uint32_t size = ENCODE (rd.is64, 1, 31);
1466 uint32_t n = ENCODE (rd.is64, 1, 22);
1467
1468 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1469 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1470 | ENCODE (rd.num, 5, 0));
1471 }
1472
1473 /* Write a SBFX instruction into *BUF.
1474
1475 SBFX rd, rn, #lsb, #width
1476
1477 This instruction moves #width bits from #lsb into the destination, sign
1478 extending the result. This is an alias for:
1479
1480 SBFM rd, rn, #lsb, #(lsb + width - 1)
1481
1482 RD is the destination register.
1483 RN is the source register.
1484 LSB is the bit number to start at (least significant bit).
1485 WIDTH is the number of bits to move. */
1486
1487 static int
1488 emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1489 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1490 {
1491 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1492 }
1493
1494 /* Write a UBFM instruction into *BUF.
1495
1496 UBFM rd, rn, #immr, #imms
1497
1498 This instruction moves the bits from #immr to #imms into the
1499 destination, extending the result with zeros.
1500
1501 RD is the destination register.
1502 RN is the source register.
1503 IMMR is the bit number to start at (least significant bit).
1504 IMMS is the bit number to stop at (most significant bit). */
1505
1506 static int
1507 emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1508 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1509 {
1510 uint32_t size = ENCODE (rd.is64, 1, 31);
1511 uint32_t n = ENCODE (rd.is64, 1, 22);
1512
1513 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1514 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1515 | ENCODE (rd.num, 5, 0));
1516 }
1517
1518 /* Write a UBFX instruction into *BUF.
1519
1520 UBFX rd, rn, #lsb, #width
1521
1522 This instruction moves #width bits from #lsb into the destination,
1523 extending the result with zeros. This is an alias for:
1524
1525 UBFM rd, rn, #lsb, #(lsb + width - 1)
1526
1527 RD is the destination register.
1528 RN is the source register.
1529 LSB is the bit number to start at (least significant bit).
1530 WIDTH is the number of bits to move. */
1531
1532 static int
1533 emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1534 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1535 {
1536 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1537 }
1538
1539 /* Write a CSINC instruction into *BUF.
1540
1541 CSINC rd, rn, rm, cond
1542
1543 This instruction conditionally increments rn or rm and places the result
1544 in rd. rn is chosen is the condition is true.
1545
1546 RD is the destination register.
1547 RN and RM are the source registers.
1548 COND is the encoded condition. */
1549
1550 static int
1551 emit_csinc (uint32_t *buf, struct aarch64_register rd,
1552 struct aarch64_register rn, struct aarch64_register rm,
1553 unsigned cond)
1554 {
1555 uint32_t size = ENCODE (rd.is64, 1, 31);
1556
1557 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1558 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1559 | ENCODE (rd.num, 5, 0));
1560 }
1561
1562 /* Write a CSET instruction into *BUF.
1563
1564 CSET rd, cond
1565
1566 This instruction conditionally write 1 or 0 in the destination register.
1567 1 is written if the condition is true. This is an alias for:
1568
1569 CSINC rd, xzr, xzr, !cond
1570
1571 Note that the condition needs to be inverted.
1572
1573 RD is the destination register.
1574 RN and RM are the source registers.
1575 COND is the encoded condition. */
1576
1577 static int
1578 emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1579 {
1580 /* The least significant bit of the condition needs toggling in order to
1581 invert it. */
1582 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1583 }
1584
1585 /* Write LEN instructions from BUF into the inferior memory at *TO.
1586
1587 Note instructions are always little endian on AArch64, unlike data. */
1588
1589 static void
1590 append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1591 {
1592 size_t byte_len = len * sizeof (uint32_t);
1593 #if (__BYTE_ORDER == __BIG_ENDIAN)
1594 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
1595 size_t i;
1596
1597 for (i = 0; i < len; i++)
1598 le_buf[i] = htole32 (buf[i]);
1599
1600 write_inferior_memory (*to, (const unsigned char *) le_buf, byte_len);
1601
1602 xfree (le_buf);
1603 #else
1604 write_inferior_memory (*to, (const unsigned char *) buf, byte_len);
1605 #endif
1606
1607 *to += byte_len;
1608 }
1609
1610 /* Sub-class of struct aarch64_insn_data, store information of
1611 instruction relocation for fast tracepoint. Visitor can
1612 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1613 the relocated instructions in buffer pointed by INSN_PTR. */
1614
1615 struct aarch64_insn_relocation_data
1616 {
1617 struct aarch64_insn_data base;
1618
1619 /* The new address the instruction is relocated to. */
1620 CORE_ADDR new_addr;
1621 /* Pointer to the buffer of relocated instruction(s). */
1622 uint32_t *insn_ptr;
1623 };
1624
1625 /* Implementation of aarch64_insn_visitor method "b". */
1626
1627 static void
1628 aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1629 struct aarch64_insn_data *data)
1630 {
1631 struct aarch64_insn_relocation_data *insn_reloc
1632 = (struct aarch64_insn_relocation_data *) data;
1633 int64_t new_offset
1634 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1635
1636 if (can_encode_int32 (new_offset, 28))
1637 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1638 }
1639
1640 /* Implementation of aarch64_insn_visitor method "b_cond". */
1641
1642 static void
1643 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1644 struct aarch64_insn_data *data)
1645 {
1646 struct aarch64_insn_relocation_data *insn_reloc
1647 = (struct aarch64_insn_relocation_data *) data;
1648 int64_t new_offset
1649 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1650
1651 if (can_encode_int32 (new_offset, 21))
1652 {
1653 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1654 new_offset);
1655 }
1656 else if (can_encode_int32 (new_offset, 28))
1657 {
1658 /* The offset is out of range for a conditional branch
1659 instruction but not for a unconditional branch. We can use
1660 the following instructions instead:
1661
1662 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1663 B NOT_TAKEN ; Else jump over TAKEN and continue.
1664 TAKEN:
1665 B #(offset - 8)
1666 NOT_TAKEN:
1667
1668 */
1669
1670 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1671 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1672 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1673 }
1674 }
1675
1676 /* Implementation of aarch64_insn_visitor method "cb". */
1677
1678 static void
1679 aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1680 const unsigned rn, int is64,
1681 struct aarch64_insn_data *data)
1682 {
1683 struct aarch64_insn_relocation_data *insn_reloc
1684 = (struct aarch64_insn_relocation_data *) data;
1685 int64_t new_offset
1686 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1687
1688 if (can_encode_int32 (new_offset, 21))
1689 {
1690 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1691 aarch64_register (rn, is64), new_offset);
1692 }
1693 else if (can_encode_int32 (new_offset, 28))
1694 {
1695 /* The offset is out of range for a compare and branch
1696 instruction but not for a unconditional branch. We can use
1697 the following instructions instead:
1698
1699 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1700 B NOT_TAKEN ; Else jump over TAKEN and continue.
1701 TAKEN:
1702 B #(offset - 8)
1703 NOT_TAKEN:
1704
1705 */
1706 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1707 aarch64_register (rn, is64), 8);
1708 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1709 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1710 }
1711 }
1712
1713 /* Implementation of aarch64_insn_visitor method "tb". */
1714
1715 static void
1716 aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1717 const unsigned rt, unsigned bit,
1718 struct aarch64_insn_data *data)
1719 {
1720 struct aarch64_insn_relocation_data *insn_reloc
1721 = (struct aarch64_insn_relocation_data *) data;
1722 int64_t new_offset
1723 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1724
1725 if (can_encode_int32 (new_offset, 16))
1726 {
1727 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1728 aarch64_register (rt, 1), new_offset);
1729 }
1730 else if (can_encode_int32 (new_offset, 28))
1731 {
1732 /* The offset is out of range for a test bit and branch
1733 instruction but not for a unconditional branch. We can use
1734 the following instructions instead:
1735
1736 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1737 B NOT_TAKEN ; Else jump over TAKEN and continue.
1738 TAKEN:
1739 B #(offset - 8)
1740 NOT_TAKEN:
1741
1742 */
1743 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1744 aarch64_register (rt, 1), 8);
1745 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1746 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1747 new_offset - 8);
1748 }
1749 }
1750
1751 /* Implementation of aarch64_insn_visitor method "adr". */
1752
1753 static void
1754 aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1755 const int is_adrp,
1756 struct aarch64_insn_data *data)
1757 {
1758 struct aarch64_insn_relocation_data *insn_reloc
1759 = (struct aarch64_insn_relocation_data *) data;
1760 /* We know exactly the address the ADR{P,} instruction will compute.
1761 We can just write it to the destination register. */
1762 CORE_ADDR address = data->insn_addr + offset;
1763
1764 if (is_adrp)
1765 {
1766 /* Clear the lower 12 bits of the offset to get the 4K page. */
1767 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1768 aarch64_register (rd, 1),
1769 address & ~0xfff);
1770 }
1771 else
1772 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1773 aarch64_register (rd, 1), address);
1774 }
1775
1776 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1777
1778 static void
1779 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1780 const unsigned rt, const int is64,
1781 struct aarch64_insn_data *data)
1782 {
1783 struct aarch64_insn_relocation_data *insn_reloc
1784 = (struct aarch64_insn_relocation_data *) data;
1785 CORE_ADDR address = data->insn_addr + offset;
1786
1787 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1788 aarch64_register (rt, 1), address);
1789
1790 /* We know exactly what address to load from, and what register we
1791 can use:
1792
1793 MOV xd, #(oldloc + offset)
1794 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1795 ...
1796
1797 LDR xd, [xd] ; or LDRSW xd, [xd]
1798
1799 */
1800
1801 if (is_sw)
1802 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1803 aarch64_register (rt, 1),
1804 aarch64_register (rt, 1),
1805 offset_memory_operand (0));
1806 else
1807 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1808 aarch64_register (rt, is64),
1809 aarch64_register (rt, 1),
1810 offset_memory_operand (0));
1811 }
1812
1813 /* Implementation of aarch64_insn_visitor method "others". */
1814
1815 static void
1816 aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1817 struct aarch64_insn_data *data)
1818 {
1819 struct aarch64_insn_relocation_data *insn_reloc
1820 = (struct aarch64_insn_relocation_data *) data;
1821
1822 /* The instruction is not PC relative. Just re-emit it at the new
1823 location. */
1824 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
1825 }
1826
1827 static const struct aarch64_insn_visitor visitor =
1828 {
1829 aarch64_ftrace_insn_reloc_b,
1830 aarch64_ftrace_insn_reloc_b_cond,
1831 aarch64_ftrace_insn_reloc_cb,
1832 aarch64_ftrace_insn_reloc_tb,
1833 aarch64_ftrace_insn_reloc_adr,
1834 aarch64_ftrace_insn_reloc_ldr_literal,
1835 aarch64_ftrace_insn_reloc_others,
1836 };
1837
1838 /* Implementation of linux_target_ops method
1839 "install_fast_tracepoint_jump_pad". */
1840
1841 static int
1842 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1843 CORE_ADDR tpaddr,
1844 CORE_ADDR collector,
1845 CORE_ADDR lockaddr,
1846 ULONGEST orig_size,
1847 CORE_ADDR *jump_entry,
1848 CORE_ADDR *trampoline,
1849 ULONGEST *trampoline_size,
1850 unsigned char *jjump_pad_insn,
1851 ULONGEST *jjump_pad_insn_size,
1852 CORE_ADDR *adjusted_insn_addr,
1853 CORE_ADDR *adjusted_insn_addr_end,
1854 char *err)
1855 {
1856 uint32_t buf[256];
1857 uint32_t *p = buf;
1858 int64_t offset;
1859 int i;
1860 uint32_t insn;
1861 CORE_ADDR buildaddr = *jump_entry;
1862 struct aarch64_insn_relocation_data insn_data;
1863
1864 /* We need to save the current state on the stack both to restore it
1865 later and to collect register values when the tracepoint is hit.
1866
1867 The saved registers are pushed in a layout that needs to be in sync
1868 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1869 the supply_fast_tracepoint_registers function will fill in the
1870 register cache from a pointer to saved registers on the stack we build
1871 here.
1872
1873 For simplicity, we set the size of each cell on the stack to 16 bytes.
1874 This way one cell can hold any register type, from system registers
1875 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1876 has to be 16 bytes aligned anyway.
1877
1878 Note that the CPSR register does not exist on AArch64. Instead we
1879 can access system bits describing the process state with the
1880 MRS/MSR instructions, namely the condition flags. We save them as
1881 if they are part of a CPSR register because that's how GDB
1882 interprets these system bits. At the moment, only the condition
1883 flags are saved in CPSR (NZCV).
1884
1885 Stack layout, each cell is 16 bytes (descending):
1886
1887 High *-------- SIMD&FP registers from 31 down to 0. --------*
1888 | q31 |
1889 . .
1890 . . 32 cells
1891 . .
1892 | q0 |
1893 *---- General purpose registers from 30 down to 0. ----*
1894 | x30 |
1895 . .
1896 . . 31 cells
1897 . .
1898 | x0 |
1899 *------------- Special purpose registers. -------------*
1900 | SP |
1901 | PC |
1902 | CPSR (NZCV) | 5 cells
1903 | FPSR |
1904 | FPCR | <- SP + 16
1905 *------------- collecting_t object --------------------*
1906 | TPIDR_EL0 | struct tracepoint * |
1907 Low *------------------------------------------------------*
1908
1909 After this stack is set up, we issue a call to the collector, passing
1910 it the saved registers at (SP + 16). */
1911
1912 /* Push SIMD&FP registers on the stack:
1913
1914 SUB sp, sp, #(32 * 16)
1915
1916 STP q30, q31, [sp, #(30 * 16)]
1917 ...
1918 STP q0, q1, [sp]
1919
1920 */
1921 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
1922 for (i = 30; i >= 0; i -= 2)
1923 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
1924
1925 /* Push general puspose registers on the stack. Note that we do not need
1926 to push x31 as it represents the xzr register and not the stack
1927 pointer in a STR instruction.
1928
1929 SUB sp, sp, #(31 * 16)
1930
1931 STR x30, [sp, #(30 * 16)]
1932 ...
1933 STR x0, [sp]
1934
1935 */
1936 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
1937 for (i = 30; i >= 0; i -= 1)
1938 p += emit_str (p, aarch64_register (i, 1), sp,
1939 offset_memory_operand (i * 16));
1940
1941 /* Make space for 5 more cells.
1942
1943 SUB sp, sp, #(5 * 16)
1944
1945 */
1946 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
1947
1948
1949 /* Save SP:
1950
1951 ADD x4, sp, #((32 + 31 + 5) * 16)
1952 STR x4, [sp, #(4 * 16)]
1953
1954 */
1955 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
1956 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
1957
1958 /* Save PC (tracepoint address):
1959
1960 MOV x3, #(tpaddr)
1961 ...
1962
1963 STR x3, [sp, #(3 * 16)]
1964
1965 */
1966
1967 p += emit_mov_addr (p, x3, tpaddr);
1968 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
1969
1970 /* Save CPSR (NZCV), FPSR and FPCR:
1971
1972 MRS x2, nzcv
1973 MRS x1, fpsr
1974 MRS x0, fpcr
1975
1976 STR x2, [sp, #(2 * 16)]
1977 STR x1, [sp, #(1 * 16)]
1978 STR x0, [sp, #(0 * 16)]
1979
1980 */
1981 p += emit_mrs (p, x2, NZCV);
1982 p += emit_mrs (p, x1, FPSR);
1983 p += emit_mrs (p, x0, FPCR);
1984 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
1985 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
1986 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
1987
1988 /* Push the collecting_t object. It consist of the address of the
1989 tracepoint and an ID for the current thread. We get the latter by
1990 reading the tpidr_el0 system register. It corresponds to the
1991 NT_ARM_TLS register accessible with ptrace.
1992
1993 MOV x0, #(tpoint)
1994 ...
1995
1996 MRS x1, tpidr_el0
1997
1998 STP x0, x1, [sp, #-16]!
1999
2000 */
2001
2002 p += emit_mov_addr (p, x0, tpoint);
2003 p += emit_mrs (p, x1, TPIDR_EL0);
2004 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2005
2006 /* Spin-lock:
2007
2008 The shared memory for the lock is at lockaddr. It will hold zero
2009 if no-one is holding the lock, otherwise it contains the address of
2010 the collecting_t object on the stack of the thread which acquired it.
2011
2012 At this stage, the stack pointer points to this thread's collecting_t
2013 object.
2014
2015 We use the following registers:
2016 - x0: Address of the lock.
2017 - x1: Pointer to collecting_t object.
2018 - x2: Scratch register.
2019
2020 MOV x0, #(lockaddr)
2021 ...
2022 MOV x1, sp
2023
2024 ; Trigger an event local to this core. So the following WFE
2025 ; instruction is ignored.
2026 SEVL
2027 again:
2028 ; Wait for an event. The event is triggered by either the SEVL
2029 ; or STLR instructions (store release).
2030 WFE
2031
2032 ; Atomically read at lockaddr. This marks the memory location as
2033 ; exclusive. This instruction also has memory constraints which
2034 ; make sure all previous data reads and writes are done before
2035 ; executing it.
2036 LDAXR x2, [x0]
2037
2038 ; Try again if another thread holds the lock.
2039 CBNZ x2, again
2040
2041 ; We can lock it! Write the address of the collecting_t object.
2042 ; This instruction will fail if the memory location is not marked
2043 ; as exclusive anymore. If it succeeds, it will remove the
2044 ; exclusive mark on the memory location. This way, if another
2045 ; thread executes this instruction before us, we will fail and try
2046 ; all over again.
2047 STXR w2, x1, [x0]
2048 CBNZ w2, again
2049
2050 */
2051
2052 p += emit_mov_addr (p, x0, lockaddr);
2053 p += emit_mov (p, x1, register_operand (sp));
2054
2055 p += emit_sevl (p);
2056 p += emit_wfe (p);
2057 p += emit_ldaxr (p, x2, x0);
2058 p += emit_cb (p, 1, w2, -2 * 4);
2059 p += emit_stxr (p, w2, x1, x0);
2060 p += emit_cb (p, 1, x2, -4 * 4);
2061
2062 /* Call collector (struct tracepoint *, unsigned char *):
2063
2064 MOV x0, #(tpoint)
2065 ...
2066
2067 ; Saved registers start after the collecting_t object.
2068 ADD x1, sp, #16
2069
2070 ; We use an intra-procedure-call scratch register.
2071 MOV ip0, #(collector)
2072 ...
2073
2074 ; And call back to C!
2075 BLR ip0
2076
2077 */
2078
2079 p += emit_mov_addr (p, x0, tpoint);
2080 p += emit_add (p, x1, sp, immediate_operand (16));
2081
2082 p += emit_mov_addr (p, ip0, collector);
2083 p += emit_blr (p, ip0);
2084
2085 /* Release the lock.
2086
2087 MOV x0, #(lockaddr)
2088 ...
2089
2090 ; This instruction is a normal store with memory ordering
2091 ; constraints. Thanks to this we do not have to put a data
2092 ; barrier instruction to make sure all data read and writes are done
2093 ; before this instruction is executed. Furthermore, this instrucion
2094 ; will trigger an event, letting other threads know they can grab
2095 ; the lock.
2096 STLR xzr, [x0]
2097
2098 */
2099 p += emit_mov_addr (p, x0, lockaddr);
2100 p += emit_stlr (p, xzr, x0);
2101
2102 /* Free collecting_t object:
2103
2104 ADD sp, sp, #16
2105
2106 */
2107 p += emit_add (p, sp, sp, immediate_operand (16));
2108
2109 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2110 registers from the stack.
2111
2112 LDR x2, [sp, #(2 * 16)]
2113 LDR x1, [sp, #(1 * 16)]
2114 LDR x0, [sp, #(0 * 16)]
2115
2116 MSR NZCV, x2
2117 MSR FPSR, x1
2118 MSR FPCR, x0
2119
2120 ADD sp, sp #(5 * 16)
2121
2122 */
2123 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2124 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2125 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2126 p += emit_msr (p, NZCV, x2);
2127 p += emit_msr (p, FPSR, x1);
2128 p += emit_msr (p, FPCR, x0);
2129
2130 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2131
2132 /* Pop general purpose registers:
2133
2134 LDR x0, [sp]
2135 ...
2136 LDR x30, [sp, #(30 * 16)]
2137
2138 ADD sp, sp, #(31 * 16)
2139
2140 */
2141 for (i = 0; i <= 30; i += 1)
2142 p += emit_ldr (p, aarch64_register (i, 1), sp,
2143 offset_memory_operand (i * 16));
2144 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2145
2146 /* Pop SIMD&FP registers:
2147
2148 LDP q0, q1, [sp]
2149 ...
2150 LDP q30, q31, [sp, #(30 * 16)]
2151
2152 ADD sp, sp, #(32 * 16)
2153
2154 */
2155 for (i = 0; i <= 30; i += 2)
2156 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2157 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2158
2159 /* Write the code into the inferior memory. */
2160 append_insns (&buildaddr, p - buf, buf);
2161
2162 /* Now emit the relocated instruction. */
2163 *adjusted_insn_addr = buildaddr;
2164 target_read_uint32 (tpaddr, &insn);
2165
2166 insn_data.base.insn_addr = tpaddr;
2167 insn_data.new_addr = buildaddr;
2168 insn_data.insn_ptr = buf;
2169
2170 aarch64_relocate_instruction (insn, &visitor,
2171 (struct aarch64_insn_data *) &insn_data);
2172
2173 /* We may not have been able to relocate the instruction. */
2174 if (insn_data.insn_ptr == buf)
2175 {
2176 sprintf (err,
2177 "E.Could not relocate instruction from %s to %s.",
2178 core_addr_to_string_nz (tpaddr),
2179 core_addr_to_string_nz (buildaddr));
2180 return 1;
2181 }
2182 else
2183 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
2184 *adjusted_insn_addr_end = buildaddr;
2185
2186 /* Go back to the start of the buffer. */
2187 p = buf;
2188
2189 /* Emit a branch back from the jump pad. */
2190 offset = (tpaddr + orig_size - buildaddr);
2191 if (!can_encode_int32 (offset, 28))
2192 {
2193 sprintf (err,
2194 "E.Jump back from jump pad too far from tracepoint "
2195 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2196 offset);
2197 return 1;
2198 }
2199
2200 p += emit_b (p, 0, offset);
2201 append_insns (&buildaddr, p - buf, buf);
2202
2203 /* Give the caller a branch instruction into the jump pad. */
2204 offset = (*jump_entry - tpaddr);
2205 if (!can_encode_int32 (offset, 28))
2206 {
2207 sprintf (err,
2208 "E.Jump pad too far from tracepoint "
2209 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2210 offset);
2211 return 1;
2212 }
2213
2214 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2215 *jjump_pad_insn_size = 4;
2216
2217 /* Return the end address of our pad. */
2218 *jump_entry = buildaddr;
2219
2220 return 0;
2221 }
2222
2223 /* Helper function writing LEN instructions from START into
2224 current_insn_ptr. */
2225
2226 static void
2227 emit_ops_insns (const uint32_t *start, int len)
2228 {
2229 CORE_ADDR buildaddr = current_insn_ptr;
2230
2231 if (debug_threads)
2232 debug_printf ("Adding %d instrucions at %s\n",
2233 len, paddress (buildaddr));
2234
2235 append_insns (&buildaddr, len, start);
2236 current_insn_ptr = buildaddr;
2237 }
2238
2239 /* Pop a register from the stack. */
2240
2241 static int
2242 emit_pop (uint32_t *buf, struct aarch64_register rt)
2243 {
2244 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2245 }
2246
2247 /* Push a register on the stack. */
2248
2249 static int
2250 emit_push (uint32_t *buf, struct aarch64_register rt)
2251 {
2252 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2253 }
2254
2255 /* Implementation of emit_ops method "emit_prologue". */
2256
2257 static void
2258 aarch64_emit_prologue (void)
2259 {
2260 uint32_t buf[16];
2261 uint32_t *p = buf;
2262
2263 /* This function emit a prologue for the following function prototype:
2264
2265 enum eval_result_type f (unsigned char *regs,
2266 ULONGEST *value);
2267
2268 The first argument is a buffer of raw registers. The second
2269 argument is the result of
2270 evaluating the expression, which will be set to whatever is on top of
2271 the stack at the end.
2272
2273 The stack set up by the prologue is as such:
2274
2275 High *------------------------------------------------------*
2276 | LR |
2277 | FP | <- FP
2278 | x1 (ULONGEST *value) |
2279 | x0 (unsigned char *regs) |
2280 Low *------------------------------------------------------*
2281
2282 As we are implementing a stack machine, each opcode can expand the
2283 stack so we never know how far we are from the data saved by this
2284 prologue. In order to be able refer to value and regs later, we save
2285 the current stack pointer in the frame pointer. This way, it is not
2286 clobbered when calling C functions.
2287
2288 Finally, throughtout every operation, we are using register x0 as the
2289 top of the stack, and x1 as a scratch register. */
2290
2291 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2292 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2293 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2294
2295 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2296
2297
2298 emit_ops_insns (buf, p - buf);
2299 }
2300
2301 /* Implementation of emit_ops method "emit_epilogue". */
2302
2303 static void
2304 aarch64_emit_epilogue (void)
2305 {
2306 uint32_t buf[16];
2307 uint32_t *p = buf;
2308
2309 /* Store the result of the expression (x0) in *value. */
2310 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2311 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2312 p += emit_str (p, x0, x1, offset_memory_operand (0));
2313
2314 /* Restore the previous state. */
2315 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2316 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2317
2318 /* Return expr_eval_no_error. */
2319 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2320 p += emit_ret (p, lr);
2321
2322 emit_ops_insns (buf, p - buf);
2323 }
2324
2325 /* Implementation of emit_ops method "emit_add". */
2326
2327 static void
2328 aarch64_emit_add (void)
2329 {
2330 uint32_t buf[16];
2331 uint32_t *p = buf;
2332
2333 p += emit_pop (p, x1);
2334 p += emit_add (p, x0, x1, register_operand (x0));
2335
2336 emit_ops_insns (buf, p - buf);
2337 }
2338
2339 /* Implementation of emit_ops method "emit_sub". */
2340
2341 static void
2342 aarch64_emit_sub (void)
2343 {
2344 uint32_t buf[16];
2345 uint32_t *p = buf;
2346
2347 p += emit_pop (p, x1);
2348 p += emit_sub (p, x0, x1, register_operand (x0));
2349
2350 emit_ops_insns (buf, p - buf);
2351 }
2352
2353 /* Implementation of emit_ops method "emit_mul". */
2354
2355 static void
2356 aarch64_emit_mul (void)
2357 {
2358 uint32_t buf[16];
2359 uint32_t *p = buf;
2360
2361 p += emit_pop (p, x1);
2362 p += emit_mul (p, x0, x1, x0);
2363
2364 emit_ops_insns (buf, p - buf);
2365 }
2366
2367 /* Implementation of emit_ops method "emit_lsh". */
2368
2369 static void
2370 aarch64_emit_lsh (void)
2371 {
2372 uint32_t buf[16];
2373 uint32_t *p = buf;
2374
2375 p += emit_pop (p, x1);
2376 p += emit_lslv (p, x0, x1, x0);
2377
2378 emit_ops_insns (buf, p - buf);
2379 }
2380
2381 /* Implementation of emit_ops method "emit_rsh_signed". */
2382
2383 static void
2384 aarch64_emit_rsh_signed (void)
2385 {
2386 uint32_t buf[16];
2387 uint32_t *p = buf;
2388
2389 p += emit_pop (p, x1);
2390 p += emit_asrv (p, x0, x1, x0);
2391
2392 emit_ops_insns (buf, p - buf);
2393 }
2394
2395 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2396
2397 static void
2398 aarch64_emit_rsh_unsigned (void)
2399 {
2400 uint32_t buf[16];
2401 uint32_t *p = buf;
2402
2403 p += emit_pop (p, x1);
2404 p += emit_lsrv (p, x0, x1, x0);
2405
2406 emit_ops_insns (buf, p - buf);
2407 }
2408
2409 /* Implementation of emit_ops method "emit_ext". */
2410
2411 static void
2412 aarch64_emit_ext (int arg)
2413 {
2414 uint32_t buf[16];
2415 uint32_t *p = buf;
2416
2417 p += emit_sbfx (p, x0, x0, 0, arg);
2418
2419 emit_ops_insns (buf, p - buf);
2420 }
2421
2422 /* Implementation of emit_ops method "emit_log_not". */
2423
2424 static void
2425 aarch64_emit_log_not (void)
2426 {
2427 uint32_t buf[16];
2428 uint32_t *p = buf;
2429
2430 /* If the top of the stack is 0, replace it with 1. Else replace it with
2431 0. */
2432
2433 p += emit_cmp (p, x0, immediate_operand (0));
2434 p += emit_cset (p, x0, EQ);
2435
2436 emit_ops_insns (buf, p - buf);
2437 }
2438
2439 /* Implementation of emit_ops method "emit_bit_and". */
2440
2441 static void
2442 aarch64_emit_bit_and (void)
2443 {
2444 uint32_t buf[16];
2445 uint32_t *p = buf;
2446
2447 p += emit_pop (p, x1);
2448 p += emit_and (p, x0, x0, x1);
2449
2450 emit_ops_insns (buf, p - buf);
2451 }
2452
2453 /* Implementation of emit_ops method "emit_bit_or". */
2454
2455 static void
2456 aarch64_emit_bit_or (void)
2457 {
2458 uint32_t buf[16];
2459 uint32_t *p = buf;
2460
2461 p += emit_pop (p, x1);
2462 p += emit_orr (p, x0, x0, x1);
2463
2464 emit_ops_insns (buf, p - buf);
2465 }
2466
2467 /* Implementation of emit_ops method "emit_bit_xor". */
2468
2469 static void
2470 aarch64_emit_bit_xor (void)
2471 {
2472 uint32_t buf[16];
2473 uint32_t *p = buf;
2474
2475 p += emit_pop (p, x1);
2476 p += emit_eor (p, x0, x0, x1);
2477
2478 emit_ops_insns (buf, p - buf);
2479 }
2480
2481 /* Implementation of emit_ops method "emit_bit_not". */
2482
2483 static void
2484 aarch64_emit_bit_not (void)
2485 {
2486 uint32_t buf[16];
2487 uint32_t *p = buf;
2488
2489 p += emit_mvn (p, x0, x0);
2490
2491 emit_ops_insns (buf, p - buf);
2492 }
2493
2494 /* Implementation of emit_ops method "emit_equal". */
2495
2496 static void
2497 aarch64_emit_equal (void)
2498 {
2499 uint32_t buf[16];
2500 uint32_t *p = buf;
2501
2502 p += emit_pop (p, x1);
2503 p += emit_cmp (p, x0, register_operand (x1));
2504 p += emit_cset (p, x0, EQ);
2505
2506 emit_ops_insns (buf, p - buf);
2507 }
2508
2509 /* Implementation of emit_ops method "emit_less_signed". */
2510
2511 static void
2512 aarch64_emit_less_signed (void)
2513 {
2514 uint32_t buf[16];
2515 uint32_t *p = buf;
2516
2517 p += emit_pop (p, x1);
2518 p += emit_cmp (p, x1, register_operand (x0));
2519 p += emit_cset (p, x0, LT);
2520
2521 emit_ops_insns (buf, p - buf);
2522 }
2523
2524 /* Implementation of emit_ops method "emit_less_unsigned". */
2525
2526 static void
2527 aarch64_emit_less_unsigned (void)
2528 {
2529 uint32_t buf[16];
2530 uint32_t *p = buf;
2531
2532 p += emit_pop (p, x1);
2533 p += emit_cmp (p, x1, register_operand (x0));
2534 p += emit_cset (p, x0, LO);
2535
2536 emit_ops_insns (buf, p - buf);
2537 }
2538
2539 /* Implementation of emit_ops method "emit_ref". */
2540
2541 static void
2542 aarch64_emit_ref (int size)
2543 {
2544 uint32_t buf[16];
2545 uint32_t *p = buf;
2546
2547 switch (size)
2548 {
2549 case 1:
2550 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2551 break;
2552 case 2:
2553 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2554 break;
2555 case 4:
2556 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2557 break;
2558 case 8:
2559 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2560 break;
2561 default:
2562 /* Unknown size, bail on compilation. */
2563 emit_error = 1;
2564 break;
2565 }
2566
2567 emit_ops_insns (buf, p - buf);
2568 }
2569
2570 /* Implementation of emit_ops method "emit_if_goto". */
2571
2572 static void
2573 aarch64_emit_if_goto (int *offset_p, int *size_p)
2574 {
2575 uint32_t buf[16];
2576 uint32_t *p = buf;
2577
2578 /* The Z flag is set or cleared here. */
2579 p += emit_cmp (p, x0, immediate_operand (0));
2580 /* This instruction must not change the Z flag. */
2581 p += emit_pop (p, x0);
2582 /* Branch over the next instruction if x0 == 0. */
2583 p += emit_bcond (p, EQ, 8);
2584
2585 /* The NOP instruction will be patched with an unconditional branch. */
2586 if (offset_p)
2587 *offset_p = (p - buf) * 4;
2588 if (size_p)
2589 *size_p = 4;
2590 p += emit_nop (p);
2591
2592 emit_ops_insns (buf, p - buf);
2593 }
2594
2595 /* Implementation of emit_ops method "emit_goto". */
2596
2597 static void
2598 aarch64_emit_goto (int *offset_p, int *size_p)
2599 {
2600 uint32_t buf[16];
2601 uint32_t *p = buf;
2602
2603 /* The NOP instruction will be patched with an unconditional branch. */
2604 if (offset_p)
2605 *offset_p = 0;
2606 if (size_p)
2607 *size_p = 4;
2608 p += emit_nop (p);
2609
2610 emit_ops_insns (buf, p - buf);
2611 }
2612
2613 /* Implementation of emit_ops method "write_goto_address". */
2614
2615 void
2616 aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2617 {
2618 uint32_t insn;
2619
2620 emit_b (&insn, 0, to - from);
2621 append_insns (&from, 1, &insn);
2622 }
2623
2624 /* Implementation of emit_ops method "emit_const". */
2625
2626 static void
2627 aarch64_emit_const (LONGEST num)
2628 {
2629 uint32_t buf[16];
2630 uint32_t *p = buf;
2631
2632 p += emit_mov_addr (p, x0, num);
2633
2634 emit_ops_insns (buf, p - buf);
2635 }
2636
2637 /* Implementation of emit_ops method "emit_call". */
2638
2639 static void
2640 aarch64_emit_call (CORE_ADDR fn)
2641 {
2642 uint32_t buf[16];
2643 uint32_t *p = buf;
2644
2645 p += emit_mov_addr (p, ip0, fn);
2646 p += emit_blr (p, ip0);
2647
2648 emit_ops_insns (buf, p - buf);
2649 }
2650
2651 /* Implementation of emit_ops method "emit_reg". */
2652
2653 static void
2654 aarch64_emit_reg (int reg)
2655 {
2656 uint32_t buf[16];
2657 uint32_t *p = buf;
2658
2659 /* Set x0 to unsigned char *regs. */
2660 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2661 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2662 p += emit_mov (p, x1, immediate_operand (reg));
2663
2664 emit_ops_insns (buf, p - buf);
2665
2666 aarch64_emit_call (get_raw_reg_func_addr ());
2667 }
2668
2669 /* Implementation of emit_ops method "emit_pop". */
2670
2671 static void
2672 aarch64_emit_pop (void)
2673 {
2674 uint32_t buf[16];
2675 uint32_t *p = buf;
2676
2677 p += emit_pop (p, x0);
2678
2679 emit_ops_insns (buf, p - buf);
2680 }
2681
2682 /* Implementation of emit_ops method "emit_stack_flush". */
2683
2684 static void
2685 aarch64_emit_stack_flush (void)
2686 {
2687 uint32_t buf[16];
2688 uint32_t *p = buf;
2689
2690 p += emit_push (p, x0);
2691
2692 emit_ops_insns (buf, p - buf);
2693 }
2694
2695 /* Implementation of emit_ops method "emit_zero_ext". */
2696
2697 static void
2698 aarch64_emit_zero_ext (int arg)
2699 {
2700 uint32_t buf[16];
2701 uint32_t *p = buf;
2702
2703 p += emit_ubfx (p, x0, x0, 0, arg);
2704
2705 emit_ops_insns (buf, p - buf);
2706 }
2707
2708 /* Implementation of emit_ops method "emit_swap". */
2709
2710 static void
2711 aarch64_emit_swap (void)
2712 {
2713 uint32_t buf[16];
2714 uint32_t *p = buf;
2715
2716 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2717 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2718 p += emit_mov (p, x0, register_operand (x1));
2719
2720 emit_ops_insns (buf, p - buf);
2721 }
2722
2723 /* Implementation of emit_ops method "emit_stack_adjust". */
2724
2725 static void
2726 aarch64_emit_stack_adjust (int n)
2727 {
2728 /* This is not needed with our design. */
2729 uint32_t buf[16];
2730 uint32_t *p = buf;
2731
2732 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2733
2734 emit_ops_insns (buf, p - buf);
2735 }
2736
2737 /* Implementation of emit_ops method "emit_int_call_1". */
2738
2739 static void
2740 aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2741 {
2742 uint32_t buf[16];
2743 uint32_t *p = buf;
2744
2745 p += emit_mov (p, x0, immediate_operand (arg1));
2746
2747 emit_ops_insns (buf, p - buf);
2748
2749 aarch64_emit_call (fn);
2750 }
2751
2752 /* Implementation of emit_ops method "emit_void_call_2". */
2753
2754 static void
2755 aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2756 {
2757 uint32_t buf[16];
2758 uint32_t *p = buf;
2759
2760 /* Push x0 on the stack. */
2761 aarch64_emit_stack_flush ();
2762
2763 /* Setup arguments for the function call:
2764
2765 x0: arg1
2766 x1: top of the stack
2767
2768 MOV x1, x0
2769 MOV x0, #arg1 */
2770
2771 p += emit_mov (p, x1, register_operand (x0));
2772 p += emit_mov (p, x0, immediate_operand (arg1));
2773
2774 emit_ops_insns (buf, p - buf);
2775
2776 aarch64_emit_call (fn);
2777
2778 /* Restore x0. */
2779 aarch64_emit_pop ();
2780 }
2781
2782 /* Implementation of emit_ops method "emit_eq_goto". */
2783
2784 static void
2785 aarch64_emit_eq_goto (int *offset_p, int *size_p)
2786 {
2787 uint32_t buf[16];
2788 uint32_t *p = buf;
2789
2790 p += emit_pop (p, x1);
2791 p += emit_cmp (p, x1, register_operand (x0));
2792 /* Branch over the next instruction if x0 != x1. */
2793 p += emit_bcond (p, NE, 8);
2794 /* The NOP instruction will be patched with an unconditional branch. */
2795 if (offset_p)
2796 *offset_p = (p - buf) * 4;
2797 if (size_p)
2798 *size_p = 4;
2799 p += emit_nop (p);
2800
2801 emit_ops_insns (buf, p - buf);
2802 }
2803
2804 /* Implementation of emit_ops method "emit_ne_goto". */
2805
2806 static void
2807 aarch64_emit_ne_goto (int *offset_p, int *size_p)
2808 {
2809 uint32_t buf[16];
2810 uint32_t *p = buf;
2811
2812 p += emit_pop (p, x1);
2813 p += emit_cmp (p, x1, register_operand (x0));
2814 /* Branch over the next instruction if x0 == x1. */
2815 p += emit_bcond (p, EQ, 8);
2816 /* The NOP instruction will be patched with an unconditional branch. */
2817 if (offset_p)
2818 *offset_p = (p - buf) * 4;
2819 if (size_p)
2820 *size_p = 4;
2821 p += emit_nop (p);
2822
2823 emit_ops_insns (buf, p - buf);
2824 }
2825
2826 /* Implementation of emit_ops method "emit_lt_goto". */
2827
2828 static void
2829 aarch64_emit_lt_goto (int *offset_p, int *size_p)
2830 {
2831 uint32_t buf[16];
2832 uint32_t *p = buf;
2833
2834 p += emit_pop (p, x1);
2835 p += emit_cmp (p, x1, register_operand (x0));
2836 /* Branch over the next instruction if x0 >= x1. */
2837 p += emit_bcond (p, GE, 8);
2838 /* The NOP instruction will be patched with an unconditional branch. */
2839 if (offset_p)
2840 *offset_p = (p - buf) * 4;
2841 if (size_p)
2842 *size_p = 4;
2843 p += emit_nop (p);
2844
2845 emit_ops_insns (buf, p - buf);
2846 }
2847
2848 /* Implementation of emit_ops method "emit_le_goto". */
2849
2850 static void
2851 aarch64_emit_le_goto (int *offset_p, int *size_p)
2852 {
2853 uint32_t buf[16];
2854 uint32_t *p = buf;
2855
2856 p += emit_pop (p, x1);
2857 p += emit_cmp (p, x1, register_operand (x0));
2858 /* Branch over the next instruction if x0 > x1. */
2859 p += emit_bcond (p, GT, 8);
2860 /* The NOP instruction will be patched with an unconditional branch. */
2861 if (offset_p)
2862 *offset_p = (p - buf) * 4;
2863 if (size_p)
2864 *size_p = 4;
2865 p += emit_nop (p);
2866
2867 emit_ops_insns (buf, p - buf);
2868 }
2869
2870 /* Implementation of emit_ops method "emit_gt_goto". */
2871
2872 static void
2873 aarch64_emit_gt_goto (int *offset_p, int *size_p)
2874 {
2875 uint32_t buf[16];
2876 uint32_t *p = buf;
2877
2878 p += emit_pop (p, x1);
2879 p += emit_cmp (p, x1, register_operand (x0));
2880 /* Branch over the next instruction if x0 <= x1. */
2881 p += emit_bcond (p, LE, 8);
2882 /* The NOP instruction will be patched with an unconditional branch. */
2883 if (offset_p)
2884 *offset_p = (p - buf) * 4;
2885 if (size_p)
2886 *size_p = 4;
2887 p += emit_nop (p);
2888
2889 emit_ops_insns (buf, p - buf);
2890 }
2891
2892 /* Implementation of emit_ops method "emit_ge_got". */
2893
2894 static void
2895 aarch64_emit_ge_got (int *offset_p, int *size_p)
2896 {
2897 uint32_t buf[16];
2898 uint32_t *p = buf;
2899
2900 p += emit_pop (p, x1);
2901 p += emit_cmp (p, x1, register_operand (x0));
2902 /* Branch over the next instruction if x0 <= x1. */
2903 p += emit_bcond (p, LT, 8);
2904 /* The NOP instruction will be patched with an unconditional branch. */
2905 if (offset_p)
2906 *offset_p = (p - buf) * 4;
2907 if (size_p)
2908 *size_p = 4;
2909 p += emit_nop (p);
2910
2911 emit_ops_insns (buf, p - buf);
2912 }
2913
2914 static struct emit_ops aarch64_emit_ops_impl =
2915 {
2916 aarch64_emit_prologue,
2917 aarch64_emit_epilogue,
2918 aarch64_emit_add,
2919 aarch64_emit_sub,
2920 aarch64_emit_mul,
2921 aarch64_emit_lsh,
2922 aarch64_emit_rsh_signed,
2923 aarch64_emit_rsh_unsigned,
2924 aarch64_emit_ext,
2925 aarch64_emit_log_not,
2926 aarch64_emit_bit_and,
2927 aarch64_emit_bit_or,
2928 aarch64_emit_bit_xor,
2929 aarch64_emit_bit_not,
2930 aarch64_emit_equal,
2931 aarch64_emit_less_signed,
2932 aarch64_emit_less_unsigned,
2933 aarch64_emit_ref,
2934 aarch64_emit_if_goto,
2935 aarch64_emit_goto,
2936 aarch64_write_goto_address,
2937 aarch64_emit_const,
2938 aarch64_emit_call,
2939 aarch64_emit_reg,
2940 aarch64_emit_pop,
2941 aarch64_emit_stack_flush,
2942 aarch64_emit_zero_ext,
2943 aarch64_emit_swap,
2944 aarch64_emit_stack_adjust,
2945 aarch64_emit_int_call_1,
2946 aarch64_emit_void_call_2,
2947 aarch64_emit_eq_goto,
2948 aarch64_emit_ne_goto,
2949 aarch64_emit_lt_goto,
2950 aarch64_emit_le_goto,
2951 aarch64_emit_gt_goto,
2952 aarch64_emit_ge_got,
2953 };
2954
2955 /* Implementation of linux_target_ops method "emit_ops". */
2956
2957 static struct emit_ops *
2958 aarch64_emit_ops (void)
2959 {
2960 return &aarch64_emit_ops_impl;
2961 }
2962
2963 /* Implementation of linux_target_ops method
2964 "get_min_fast_tracepoint_insn_len". */
2965
2966 static int
2967 aarch64_get_min_fast_tracepoint_insn_len (void)
2968 {
2969 return 4;
2970 }
2971
2972 /* Implementation of linux_target_ops method "supports_range_stepping". */
2973
2974 static int
2975 aarch64_supports_range_stepping (void)
2976 {
2977 return 1;
2978 }
2979
2980 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2981
2982 static const gdb_byte *
2983 aarch64_sw_breakpoint_from_kind (int kind, int *size)
2984 {
2985 if (is_64bit_tdesc ())
2986 {
2987 *size = aarch64_breakpoint_len;
2988 return aarch64_breakpoint;
2989 }
2990 else
2991 return arm_sw_breakpoint_from_kind (kind, size);
2992 }
2993
2994 /* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
2995
2996 static int
2997 aarch64_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
2998 {
2999 if (is_64bit_tdesc ())
3000 return aarch64_breakpoint_len;
3001 else
3002 return arm_breakpoint_kind_from_pc (pcptr);
3003 }
3004
3005 /* Implementation of the linux_target_ops method
3006 "breakpoint_kind_from_current_state". */
3007
3008 static int
3009 aarch64_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
3010 {
3011 if (is_64bit_tdesc ())
3012 return aarch64_breakpoint_len;
3013 else
3014 return arm_breakpoint_kind_from_current_state (pcptr);
3015 }
3016
3017 /* Support for hardware single step. */
3018
3019 static int
3020 aarch64_supports_hardware_single_step (void)
3021 {
3022 return 1;
3023 }
3024
3025 struct linux_target_ops the_low_target =
3026 {
3027 aarch64_arch_setup,
3028 aarch64_regs_info,
3029 NULL, /* cannot_fetch_register */
3030 NULL, /* cannot_store_register */
3031 NULL, /* fetch_register */
3032 aarch64_get_pc,
3033 aarch64_set_pc,
3034 aarch64_breakpoint_kind_from_pc,
3035 aarch64_sw_breakpoint_from_kind,
3036 NULL, /* get_next_pcs */
3037 0, /* decr_pc_after_break */
3038 aarch64_breakpoint_at,
3039 aarch64_supports_z_point_type,
3040 aarch64_insert_point,
3041 aarch64_remove_point,
3042 aarch64_stopped_by_watchpoint,
3043 aarch64_stopped_data_address,
3044 NULL, /* collect_ptrace_register */
3045 NULL, /* supply_ptrace_register */
3046 aarch64_linux_siginfo_fixup,
3047 aarch64_linux_new_process,
3048 aarch64_linux_delete_process,
3049 aarch64_linux_new_thread,
3050 aarch64_linux_delete_thread,
3051 aarch64_linux_new_fork,
3052 aarch64_linux_prepare_to_resume,
3053 NULL, /* process_qsupported */
3054 aarch64_supports_tracepoints,
3055 aarch64_get_thread_area,
3056 aarch64_install_fast_tracepoint_jump_pad,
3057 aarch64_emit_ops,
3058 aarch64_get_min_fast_tracepoint_insn_len,
3059 aarch64_supports_range_stepping,
3060 aarch64_breakpoint_kind_from_current_state,
3061 aarch64_supports_hardware_single_step,
3062 aarch64_get_syscall_trapinfo,
3063 };
3064
3065 void
3066 initialize_low_arch (void)
3067 {
3068 initialize_low_arch_aarch32 ();
3069
3070 initialize_regsets_info (&aarch64_regsets_info);
3071 initialize_regsets_info (&aarch64_sve_regsets_info);
3072
3073 #if GDB_SELF_TEST
3074 initialize_low_tdesc ();
3075 #endif
3076 }
This page took 0.089209 seconds and 5 git commands to generate.