gdbserver: add aarch64_create_target_description
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-aarch64-low.c
1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
4 Copyright (C) 2009-2017 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "server.h"
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
29 #include "ax.h"
30 #include "tracepoint.h"
31
32 #include <signal.h>
33 #include <sys/user.h>
34 #include "nat/gdb_ptrace.h"
35 #include <asm/ptrace.h>
36 #include <inttypes.h>
37 #include <endian.h>
38 #include <sys/uio.h>
39
40 #include "gdb_proc_service.h"
41 #include "arch/aarch64.h"
42 #include "linux-aarch64-tdesc.h"
43
44 /* Defined in auto-generated files. */
45 void init_registers_aarch64 (void);
46
47 #ifdef HAVE_SYS_REG_H
48 #include <sys/reg.h>
49 #endif
50
51 /* Per-process arch-specific data we want to keep. */
52
53 struct arch_process_info
54 {
55 /* Hardware breakpoint/watchpoint data.
56 The reason for them to be per-process rather than per-thread is
57 due to the lack of information in the gdbserver environment;
58 gdbserver is not told that whether a requested hardware
59 breakpoint/watchpoint is thread specific or not, so it has to set
60 each hw bp/wp for every thread in the current process. The
61 higher level bp/wp management in gdb will resume a thread if a hw
62 bp/wp trap is not expected for it. Since the hw bp/wp setting is
63 same for each thread, it is reasonable for the data to live here.
64 */
65 struct aarch64_debug_reg_state debug_reg_state;
66 };
67
68 /* Return true if the size of register 0 is 8 byte. */
69
70 static int
71 is_64bit_tdesc (void)
72 {
73 struct regcache *regcache = get_thread_regcache (current_thread, 0);
74
75 return register_size (regcache->tdesc, 0) == 8;
76 }
77
78 /* Implementation of linux_target_ops method "cannot_store_register". */
79
80 static int
81 aarch64_cannot_store_register (int regno)
82 {
83 return regno >= AARCH64_NUM_REGS;
84 }
85
86 /* Implementation of linux_target_ops method "cannot_fetch_register". */
87
88 static int
89 aarch64_cannot_fetch_register (int regno)
90 {
91 return regno >= AARCH64_NUM_REGS;
92 }
93
94 static void
95 aarch64_fill_gregset (struct regcache *regcache, void *buf)
96 {
97 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
98 int i;
99
100 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
101 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
102 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
103 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
104 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
105 }
106
107 static void
108 aarch64_store_gregset (struct regcache *regcache, const void *buf)
109 {
110 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
111 int i;
112
113 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
114 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
115 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
116 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
117 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
118 }
119
120 static void
121 aarch64_fill_fpregset (struct regcache *regcache, void *buf)
122 {
123 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
124 int i;
125
126 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
127 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
128 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
129 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
130 }
131
132 static void
133 aarch64_store_fpregset (struct regcache *regcache, const void *buf)
134 {
135 const struct user_fpsimd_state *regset
136 = (const struct user_fpsimd_state *) buf;
137 int i;
138
139 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
140 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
141 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
142 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
143 }
144
145 /* Enable miscellaneous debugging output. The name is historical - it
146 was originally used to debug LinuxThreads support. */
147 extern int debug_threads;
148
149 /* Implementation of linux_target_ops method "get_pc". */
150
151 static CORE_ADDR
152 aarch64_get_pc (struct regcache *regcache)
153 {
154 if (register_size (regcache->tdesc, 0) == 8)
155 return linux_get_pc_64bit (regcache);
156 else
157 return linux_get_pc_32bit (regcache);
158 }
159
160 /* Implementation of linux_target_ops method "set_pc". */
161
162 static void
163 aarch64_set_pc (struct regcache *regcache, CORE_ADDR pc)
164 {
165 if (register_size (regcache->tdesc, 0) == 8)
166 linux_set_pc_64bit (regcache, pc);
167 else
168 linux_set_pc_32bit (regcache, pc);
169 }
170
171 #define aarch64_breakpoint_len 4
172
173 /* AArch64 BRK software debug mode instruction.
174 This instruction needs to match gdb/aarch64-tdep.c
175 (aarch64_default_breakpoint). */
176 static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
177
178 /* Implementation of linux_target_ops method "breakpoint_at". */
179
180 static int
181 aarch64_breakpoint_at (CORE_ADDR where)
182 {
183 if (is_64bit_tdesc ())
184 {
185 gdb_byte insn[aarch64_breakpoint_len];
186
187 (*the_target->read_memory) (where, (unsigned char *) &insn,
188 aarch64_breakpoint_len);
189 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
190 return 1;
191
192 return 0;
193 }
194 else
195 return arm_breakpoint_at (where);
196 }
197
198 static void
199 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
200 {
201 int i;
202
203 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
204 {
205 state->dr_addr_bp[i] = 0;
206 state->dr_ctrl_bp[i] = 0;
207 state->dr_ref_count_bp[i] = 0;
208 }
209
210 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
211 {
212 state->dr_addr_wp[i] = 0;
213 state->dr_ctrl_wp[i] = 0;
214 state->dr_ref_count_wp[i] = 0;
215 }
216 }
217
218 /* Return the pointer to the debug register state structure in the
219 current process' arch-specific data area. */
220
221 struct aarch64_debug_reg_state *
222 aarch64_get_debug_reg_state (pid_t pid)
223 {
224 struct process_info *proc = find_process_pid (pid);
225
226 return &proc->priv->arch_private->debug_reg_state;
227 }
228
229 /* Implementation of linux_target_ops method "supports_z_point_type". */
230
231 static int
232 aarch64_supports_z_point_type (char z_type)
233 {
234 switch (z_type)
235 {
236 case Z_PACKET_SW_BP:
237 case Z_PACKET_HW_BP:
238 case Z_PACKET_WRITE_WP:
239 case Z_PACKET_READ_WP:
240 case Z_PACKET_ACCESS_WP:
241 return 1;
242 default:
243 return 0;
244 }
245 }
246
247 /* Implementation of linux_target_ops method "insert_point".
248
249 It actually only records the info of the to-be-inserted bp/wp;
250 the actual insertion will happen when threads are resumed. */
251
252 static int
253 aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
254 int len, struct raw_breakpoint *bp)
255 {
256 int ret;
257 enum target_hw_bp_type targ_type;
258 struct aarch64_debug_reg_state *state
259 = aarch64_get_debug_reg_state (pid_of (current_thread));
260
261 if (show_debug_regs)
262 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
263 (unsigned long) addr, len);
264
265 /* Determine the type from the raw breakpoint type. */
266 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
267
268 if (targ_type != hw_execute)
269 {
270 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
271 ret = aarch64_handle_watchpoint (targ_type, addr, len,
272 1 /* is_insert */, state);
273 else
274 ret = -1;
275 }
276 else
277 {
278 if (len == 3)
279 {
280 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
281 instruction. Set it to 2 to correctly encode length bit
282 mask in hardware/watchpoint control register. */
283 len = 2;
284 }
285 ret = aarch64_handle_breakpoint (targ_type, addr, len,
286 1 /* is_insert */, state);
287 }
288
289 if (show_debug_regs)
290 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
291 targ_type);
292
293 return ret;
294 }
295
296 /* Implementation of linux_target_ops method "remove_point".
297
298 It actually only records the info of the to-be-removed bp/wp,
299 the actual removal will be done when threads are resumed. */
300
301 static int
302 aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
303 int len, struct raw_breakpoint *bp)
304 {
305 int ret;
306 enum target_hw_bp_type targ_type;
307 struct aarch64_debug_reg_state *state
308 = aarch64_get_debug_reg_state (pid_of (current_thread));
309
310 if (show_debug_regs)
311 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
312 (unsigned long) addr, len);
313
314 /* Determine the type from the raw breakpoint type. */
315 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
316
317 /* Set up state pointers. */
318 if (targ_type != hw_execute)
319 ret =
320 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
321 state);
322 else
323 {
324 if (len == 3)
325 {
326 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
327 instruction. Set it to 2 to correctly encode length bit
328 mask in hardware/watchpoint control register. */
329 len = 2;
330 }
331 ret = aarch64_handle_breakpoint (targ_type, addr, len,
332 0 /* is_insert */, state);
333 }
334
335 if (show_debug_regs)
336 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
337 targ_type);
338
339 return ret;
340 }
341
342 /* Implementation of linux_target_ops method "stopped_data_address". */
343
344 static CORE_ADDR
345 aarch64_stopped_data_address (void)
346 {
347 siginfo_t siginfo;
348 int pid, i;
349 struct aarch64_debug_reg_state *state;
350
351 pid = lwpid_of (current_thread);
352
353 /* Get the siginfo. */
354 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
355 return (CORE_ADDR) 0;
356
357 /* Need to be a hardware breakpoint/watchpoint trap. */
358 if (siginfo.si_signo != SIGTRAP
359 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
360 return (CORE_ADDR) 0;
361
362 /* Check if the address matches any watched address. */
363 state = aarch64_get_debug_reg_state (pid_of (current_thread));
364 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
365 {
366 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
367 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
368 const CORE_ADDR addr_watch = state->dr_addr_wp[i];
369 if (state->dr_ref_count_wp[i]
370 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
371 && addr_trap >= addr_watch
372 && addr_trap < addr_watch + len)
373 return addr_trap;
374 }
375
376 return (CORE_ADDR) 0;
377 }
378
379 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
380
381 static int
382 aarch64_stopped_by_watchpoint (void)
383 {
384 if (aarch64_stopped_data_address () != 0)
385 return 1;
386 else
387 return 0;
388 }
389
390 /* Fetch the thread-local storage pointer for libthread_db. */
391
392 ps_err_e
393 ps_get_thread_area (struct ps_prochandle *ph,
394 lwpid_t lwpid, int idx, void **base)
395 {
396 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
397 is_64bit_tdesc ());
398 }
399
400 /* Implementation of linux_target_ops method "siginfo_fixup". */
401
402 static int
403 aarch64_linux_siginfo_fixup (siginfo_t *native, gdb_byte *inf, int direction)
404 {
405 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
406 if (!is_64bit_tdesc ())
407 {
408 if (direction == 0)
409 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
410 native);
411 else
412 aarch64_siginfo_from_compat_siginfo (native,
413 (struct compat_siginfo *) inf);
414
415 return 1;
416 }
417
418 return 0;
419 }
420
421 /* Implementation of linux_target_ops method "new_process". */
422
423 static struct arch_process_info *
424 aarch64_linux_new_process (void)
425 {
426 struct arch_process_info *info = XCNEW (struct arch_process_info);
427
428 aarch64_init_debug_reg_state (&info->debug_reg_state);
429
430 return info;
431 }
432
433 /* Implementation of linux_target_ops method "delete_process". */
434
435 static void
436 aarch64_linux_delete_process (struct arch_process_info *info)
437 {
438 xfree (info);
439 }
440
441 /* Implementation of linux_target_ops method "linux_new_fork". */
442
443 static void
444 aarch64_linux_new_fork (struct process_info *parent,
445 struct process_info *child)
446 {
447 /* These are allocated by linux_add_process. */
448 gdb_assert (parent->priv != NULL
449 && parent->priv->arch_private != NULL);
450 gdb_assert (child->priv != NULL
451 && child->priv->arch_private != NULL);
452
453 /* Linux kernel before 2.6.33 commit
454 72f674d203cd230426437cdcf7dd6f681dad8b0d
455 will inherit hardware debug registers from parent
456 on fork/vfork/clone. Newer Linux kernels create such tasks with
457 zeroed debug registers.
458
459 GDB core assumes the child inherits the watchpoints/hw
460 breakpoints of the parent, and will remove them all from the
461 forked off process. Copy the debug registers mirrors into the
462 new process so that all breakpoints and watchpoints can be
463 removed together. The debug registers mirror will become zeroed
464 in the end before detaching the forked off process, thus making
465 this compatible with older Linux kernels too. */
466
467 *child->priv->arch_private = *parent->priv->arch_private;
468 }
469
470 /* Implementation of linux_target_ops method "arch_setup". */
471
472 static void
473 aarch64_arch_setup (void)
474 {
475 unsigned int machine;
476 int is_elf64;
477 int tid;
478
479 tid = lwpid_of (current_thread);
480
481 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
482
483 if (is_elf64)
484 current_process ()->tdesc = aarch64_linux_read_description ();
485 else
486 current_process ()->tdesc = tdesc_arm_with_neon;
487
488 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
489 }
490
491 static struct regset_info aarch64_regsets[] =
492 {
493 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
494 sizeof (struct user_pt_regs), GENERAL_REGS,
495 aarch64_fill_gregset, aarch64_store_gregset },
496 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
497 sizeof (struct user_fpsimd_state), FP_REGS,
498 aarch64_fill_fpregset, aarch64_store_fpregset
499 },
500 NULL_REGSET
501 };
502
503 static struct regsets_info aarch64_regsets_info =
504 {
505 aarch64_regsets, /* regsets */
506 0, /* num_regsets */
507 NULL, /* disabled_regsets */
508 };
509
510 static struct regs_info regs_info_aarch64 =
511 {
512 NULL, /* regset_bitmap */
513 NULL, /* usrregs */
514 &aarch64_regsets_info,
515 };
516
517 /* Implementation of linux_target_ops method "regs_info". */
518
519 static const struct regs_info *
520 aarch64_regs_info (void)
521 {
522 if (is_64bit_tdesc ())
523 return &regs_info_aarch64;
524 else
525 return &regs_info_aarch32;
526 }
527
528 /* Implementation of linux_target_ops method "supports_tracepoints". */
529
530 static int
531 aarch64_supports_tracepoints (void)
532 {
533 if (current_thread == NULL)
534 return 1;
535 else
536 {
537 /* We don't support tracepoints on aarch32 now. */
538 return is_64bit_tdesc ();
539 }
540 }
541
542 /* Implementation of linux_target_ops method "get_thread_area". */
543
544 static int
545 aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
546 {
547 struct iovec iovec;
548 uint64_t reg;
549
550 iovec.iov_base = &reg;
551 iovec.iov_len = sizeof (reg);
552
553 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
554 return -1;
555
556 *addrp = reg;
557
558 return 0;
559 }
560
561 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
562
563 static void
564 aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
565 {
566 int use_64bit = register_size (regcache->tdesc, 0) == 8;
567
568 if (use_64bit)
569 {
570 long l_sysno;
571
572 collect_register_by_name (regcache, "x8", &l_sysno);
573 *sysno = (int) l_sysno;
574 }
575 else
576 collect_register_by_name (regcache, "r7", sysno);
577 }
578
579 /* List of condition codes that we need. */
580
581 enum aarch64_condition_codes
582 {
583 EQ = 0x0,
584 NE = 0x1,
585 LO = 0x3,
586 GE = 0xa,
587 LT = 0xb,
588 GT = 0xc,
589 LE = 0xd,
590 };
591
592 enum aarch64_operand_type
593 {
594 OPERAND_IMMEDIATE,
595 OPERAND_REGISTER,
596 };
597
598 /* Representation of an operand. At this time, it only supports register
599 and immediate types. */
600
601 struct aarch64_operand
602 {
603 /* Type of the operand. */
604 enum aarch64_operand_type type;
605
606 /* Value of the operand according to the type. */
607 union
608 {
609 uint32_t imm;
610 struct aarch64_register reg;
611 };
612 };
613
614 /* List of registers that we are currently using, we can add more here as
615 we need to use them. */
616
617 /* General purpose scratch registers (64 bit). */
618 static const struct aarch64_register x0 = { 0, 1 };
619 static const struct aarch64_register x1 = { 1, 1 };
620 static const struct aarch64_register x2 = { 2, 1 };
621 static const struct aarch64_register x3 = { 3, 1 };
622 static const struct aarch64_register x4 = { 4, 1 };
623
624 /* General purpose scratch registers (32 bit). */
625 static const struct aarch64_register w0 = { 0, 0 };
626 static const struct aarch64_register w2 = { 2, 0 };
627
628 /* Intra-procedure scratch registers. */
629 static const struct aarch64_register ip0 = { 16, 1 };
630
631 /* Special purpose registers. */
632 static const struct aarch64_register fp = { 29, 1 };
633 static const struct aarch64_register lr = { 30, 1 };
634 static const struct aarch64_register sp = { 31, 1 };
635 static const struct aarch64_register xzr = { 31, 1 };
636
637 /* Dynamically allocate a new register. If we know the register
638 statically, we should make it a global as above instead of using this
639 helper function. */
640
641 static struct aarch64_register
642 aarch64_register (unsigned num, int is64)
643 {
644 return (struct aarch64_register) { num, is64 };
645 }
646
647 /* Helper function to create a register operand, for instructions with
648 different types of operands.
649
650 For example:
651 p += emit_mov (p, x0, register_operand (x1)); */
652
653 static struct aarch64_operand
654 register_operand (struct aarch64_register reg)
655 {
656 struct aarch64_operand operand;
657
658 operand.type = OPERAND_REGISTER;
659 operand.reg = reg;
660
661 return operand;
662 }
663
664 /* Helper function to create an immediate operand, for instructions with
665 different types of operands.
666
667 For example:
668 p += emit_mov (p, x0, immediate_operand (12)); */
669
670 static struct aarch64_operand
671 immediate_operand (uint32_t imm)
672 {
673 struct aarch64_operand operand;
674
675 operand.type = OPERAND_IMMEDIATE;
676 operand.imm = imm;
677
678 return operand;
679 }
680
681 /* Helper function to create an offset memory operand.
682
683 For example:
684 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
685
686 static struct aarch64_memory_operand
687 offset_memory_operand (int32_t offset)
688 {
689 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
690 }
691
692 /* Helper function to create a pre-index memory operand.
693
694 For example:
695 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
696
697 static struct aarch64_memory_operand
698 preindex_memory_operand (int32_t index)
699 {
700 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
701 }
702
703 /* Helper function to create a post-index memory operand.
704
705 For example:
706 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
707
708 static struct aarch64_memory_operand
709 postindex_memory_operand (int32_t index)
710 {
711 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
712 }
713
714 /* System control registers. These special registers can be written and
715 read with the MRS and MSR instructions.
716
717 - NZCV: Condition flags. GDB refers to this register under the CPSR
718 name.
719 - FPSR: Floating-point status register.
720 - FPCR: Floating-point control registers.
721 - TPIDR_EL0: Software thread ID register. */
722
723 enum aarch64_system_control_registers
724 {
725 /* op0 op1 crn crm op2 */
726 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
727 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
728 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
729 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
730 };
731
732 /* Write a BLR instruction into *BUF.
733
734 BLR rn
735
736 RN is the register to branch to. */
737
738 static int
739 emit_blr (uint32_t *buf, struct aarch64_register rn)
740 {
741 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
742 }
743
744 /* Write a RET instruction into *BUF.
745
746 RET xn
747
748 RN is the register to branch to. */
749
750 static int
751 emit_ret (uint32_t *buf, struct aarch64_register rn)
752 {
753 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
754 }
755
756 static int
757 emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
758 struct aarch64_register rt,
759 struct aarch64_register rt2,
760 struct aarch64_register rn,
761 struct aarch64_memory_operand operand)
762 {
763 uint32_t opc;
764 uint32_t pre_index;
765 uint32_t write_back;
766
767 if (rt.is64)
768 opc = ENCODE (2, 2, 30);
769 else
770 opc = ENCODE (0, 2, 30);
771
772 switch (operand.type)
773 {
774 case MEMORY_OPERAND_OFFSET:
775 {
776 pre_index = ENCODE (1, 1, 24);
777 write_back = ENCODE (0, 1, 23);
778 break;
779 }
780 case MEMORY_OPERAND_POSTINDEX:
781 {
782 pre_index = ENCODE (0, 1, 24);
783 write_back = ENCODE (1, 1, 23);
784 break;
785 }
786 case MEMORY_OPERAND_PREINDEX:
787 {
788 pre_index = ENCODE (1, 1, 24);
789 write_back = ENCODE (1, 1, 23);
790 break;
791 }
792 default:
793 return 0;
794 }
795
796 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
797 | ENCODE (operand.index >> 3, 7, 15)
798 | ENCODE (rt2.num, 5, 10)
799 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
800 }
801
802 /* Write a STP instruction into *BUF.
803
804 STP rt, rt2, [rn, #offset]
805 STP rt, rt2, [rn, #index]!
806 STP rt, rt2, [rn], #index
807
808 RT and RT2 are the registers to store.
809 RN is the base address register.
810 OFFSET is the immediate to add to the base address. It is limited to a
811 -512 .. 504 range (7 bits << 3). */
812
813 static int
814 emit_stp (uint32_t *buf, struct aarch64_register rt,
815 struct aarch64_register rt2, struct aarch64_register rn,
816 struct aarch64_memory_operand operand)
817 {
818 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
819 }
820
821 /* Write a LDP instruction into *BUF.
822
823 LDP rt, rt2, [rn, #offset]
824 LDP rt, rt2, [rn, #index]!
825 LDP rt, rt2, [rn], #index
826
827 RT and RT2 are the registers to store.
828 RN is the base address register.
829 OFFSET is the immediate to add to the base address. It is limited to a
830 -512 .. 504 range (7 bits << 3). */
831
832 static int
833 emit_ldp (uint32_t *buf, struct aarch64_register rt,
834 struct aarch64_register rt2, struct aarch64_register rn,
835 struct aarch64_memory_operand operand)
836 {
837 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
838 }
839
840 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
841
842 LDP qt, qt2, [rn, #offset]
843
844 RT and RT2 are the Q registers to store.
845 RN is the base address register.
846 OFFSET is the immediate to add to the base address. It is limited to
847 -1024 .. 1008 range (7 bits << 4). */
848
849 static int
850 emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
851 struct aarch64_register rn, int32_t offset)
852 {
853 uint32_t opc = ENCODE (2, 2, 30);
854 uint32_t pre_index = ENCODE (1, 1, 24);
855
856 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
857 | ENCODE (offset >> 4, 7, 15)
858 | ENCODE (rt2, 5, 10)
859 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
860 }
861
862 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
863
864 STP qt, qt2, [rn, #offset]
865
866 RT and RT2 are the Q registers to store.
867 RN is the base address register.
868 OFFSET is the immediate to add to the base address. It is limited to
869 -1024 .. 1008 range (7 bits << 4). */
870
871 static int
872 emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
873 struct aarch64_register rn, int32_t offset)
874 {
875 uint32_t opc = ENCODE (2, 2, 30);
876 uint32_t pre_index = ENCODE (1, 1, 24);
877
878 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
879 | ENCODE (offset >> 4, 7, 15)
880 | ENCODE (rt2, 5, 10)
881 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
882 }
883
884 /* Write a LDRH instruction into *BUF.
885
886 LDRH wt, [xn, #offset]
887 LDRH wt, [xn, #index]!
888 LDRH wt, [xn], #index
889
890 RT is the register to store.
891 RN is the base address register.
892 OFFSET is the immediate to add to the base address. It is limited to
893 0 .. 32760 range (12 bits << 3). */
894
895 static int
896 emit_ldrh (uint32_t *buf, struct aarch64_register rt,
897 struct aarch64_register rn,
898 struct aarch64_memory_operand operand)
899 {
900 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
901 }
902
903 /* Write a LDRB instruction into *BUF.
904
905 LDRB wt, [xn, #offset]
906 LDRB wt, [xn, #index]!
907 LDRB wt, [xn], #index
908
909 RT is the register to store.
910 RN is the base address register.
911 OFFSET is the immediate to add to the base address. It is limited to
912 0 .. 32760 range (12 bits << 3). */
913
914 static int
915 emit_ldrb (uint32_t *buf, struct aarch64_register rt,
916 struct aarch64_register rn,
917 struct aarch64_memory_operand operand)
918 {
919 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
920 }
921
922
923
924 /* Write a STR instruction into *BUF.
925
926 STR rt, [rn, #offset]
927 STR rt, [rn, #index]!
928 STR rt, [rn], #index
929
930 RT is the register to store.
931 RN is the base address register.
932 OFFSET is the immediate to add to the base address. It is limited to
933 0 .. 32760 range (12 bits << 3). */
934
935 static int
936 emit_str (uint32_t *buf, struct aarch64_register rt,
937 struct aarch64_register rn,
938 struct aarch64_memory_operand operand)
939 {
940 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
941 }
942
943 /* Helper function emitting an exclusive load or store instruction. */
944
945 static int
946 emit_load_store_exclusive (uint32_t *buf, uint32_t size,
947 enum aarch64_opcodes opcode,
948 struct aarch64_register rs,
949 struct aarch64_register rt,
950 struct aarch64_register rt2,
951 struct aarch64_register rn)
952 {
953 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
954 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
955 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
956 }
957
958 /* Write a LAXR instruction into *BUF.
959
960 LDAXR rt, [xn]
961
962 RT is the destination register.
963 RN is the base address register. */
964
965 static int
966 emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
967 struct aarch64_register rn)
968 {
969 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
970 xzr, rn);
971 }
972
973 /* Write a STXR instruction into *BUF.
974
975 STXR ws, rt, [xn]
976
977 RS is the result register, it indicates if the store succeeded or not.
978 RT is the destination register.
979 RN is the base address register. */
980
981 static int
982 emit_stxr (uint32_t *buf, struct aarch64_register rs,
983 struct aarch64_register rt, struct aarch64_register rn)
984 {
985 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
986 xzr, rn);
987 }
988
989 /* Write a STLR instruction into *BUF.
990
991 STLR rt, [xn]
992
993 RT is the register to store.
994 RN is the base address register. */
995
996 static int
997 emit_stlr (uint32_t *buf, struct aarch64_register rt,
998 struct aarch64_register rn)
999 {
1000 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1001 xzr, rn);
1002 }
1003
1004 /* Helper function for data processing instructions with register sources. */
1005
1006 static int
1007 emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
1008 struct aarch64_register rd,
1009 struct aarch64_register rn,
1010 struct aarch64_register rm)
1011 {
1012 uint32_t size = ENCODE (rd.is64, 1, 31);
1013
1014 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1015 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
1016 }
1017
1018 /* Helper function for data processing instructions taking either a register
1019 or an immediate. */
1020
1021 static int
1022 emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1023 struct aarch64_register rd,
1024 struct aarch64_register rn,
1025 struct aarch64_operand operand)
1026 {
1027 uint32_t size = ENCODE (rd.is64, 1, 31);
1028 /* The opcode is different for register and immediate source operands. */
1029 uint32_t operand_opcode;
1030
1031 if (operand.type == OPERAND_IMMEDIATE)
1032 {
1033 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1034 operand_opcode = ENCODE (8, 4, 25);
1035
1036 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1037 | ENCODE (operand.imm, 12, 10)
1038 | ENCODE (rn.num, 5, 5)
1039 | ENCODE (rd.num, 5, 0));
1040 }
1041 else
1042 {
1043 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1044 operand_opcode = ENCODE (5, 4, 25);
1045
1046 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1047 rn, operand.reg);
1048 }
1049 }
1050
1051 /* Write an ADD instruction into *BUF.
1052
1053 ADD rd, rn, #imm
1054 ADD rd, rn, rm
1055
1056 This function handles both an immediate and register add.
1057
1058 RD is the destination register.
1059 RN is the input register.
1060 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1061 OPERAND_REGISTER. */
1062
1063 static int
1064 emit_add (uint32_t *buf, struct aarch64_register rd,
1065 struct aarch64_register rn, struct aarch64_operand operand)
1066 {
1067 return emit_data_processing (buf, ADD, rd, rn, operand);
1068 }
1069
1070 /* Write a SUB instruction into *BUF.
1071
1072 SUB rd, rn, #imm
1073 SUB rd, rn, rm
1074
1075 This function handles both an immediate and register sub.
1076
1077 RD is the destination register.
1078 RN is the input register.
1079 IMM is the immediate to substract to RN. */
1080
1081 static int
1082 emit_sub (uint32_t *buf, struct aarch64_register rd,
1083 struct aarch64_register rn, struct aarch64_operand operand)
1084 {
1085 return emit_data_processing (buf, SUB, rd, rn, operand);
1086 }
1087
1088 /* Write a MOV instruction into *BUF.
1089
1090 MOV rd, #imm
1091 MOV rd, rm
1092
1093 This function handles both a wide immediate move and a register move,
1094 with the condition that the source register is not xzr. xzr and the
1095 stack pointer share the same encoding and this function only supports
1096 the stack pointer.
1097
1098 RD is the destination register.
1099 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1100 OPERAND_REGISTER. */
1101
1102 static int
1103 emit_mov (uint32_t *buf, struct aarch64_register rd,
1104 struct aarch64_operand operand)
1105 {
1106 if (operand.type == OPERAND_IMMEDIATE)
1107 {
1108 uint32_t size = ENCODE (rd.is64, 1, 31);
1109 /* Do not shift the immediate. */
1110 uint32_t shift = ENCODE (0, 2, 21);
1111
1112 return aarch64_emit_insn (buf, MOV | size | shift
1113 | ENCODE (operand.imm, 16, 5)
1114 | ENCODE (rd.num, 5, 0));
1115 }
1116 else
1117 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1118 }
1119
1120 /* Write a MOVK instruction into *BUF.
1121
1122 MOVK rd, #imm, lsl #shift
1123
1124 RD is the destination register.
1125 IMM is the immediate.
1126 SHIFT is the logical shift left to apply to IMM. */
1127
1128 static int
1129 emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1130 unsigned shift)
1131 {
1132 uint32_t size = ENCODE (rd.is64, 1, 31);
1133
1134 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1135 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
1136 }
1137
1138 /* Write instructions into *BUF in order to move ADDR into a register.
1139 ADDR can be a 64-bit value.
1140
1141 This function will emit a series of MOV and MOVK instructions, such as:
1142
1143 MOV xd, #(addr)
1144 MOVK xd, #(addr >> 16), lsl #16
1145 MOVK xd, #(addr >> 32), lsl #32
1146 MOVK xd, #(addr >> 48), lsl #48 */
1147
1148 static int
1149 emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1150 {
1151 uint32_t *p = buf;
1152
1153 /* The MOV (wide immediate) instruction clears to top bits of the
1154 register. */
1155 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1156
1157 if ((addr >> 16) != 0)
1158 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1159 else
1160 return p - buf;
1161
1162 if ((addr >> 32) != 0)
1163 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1164 else
1165 return p - buf;
1166
1167 if ((addr >> 48) != 0)
1168 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1169
1170 return p - buf;
1171 }
1172
1173 /* Write a SUBS instruction into *BUF.
1174
1175 SUBS rd, rn, rm
1176
1177 This instruction update the condition flags.
1178
1179 RD is the destination register.
1180 RN and RM are the source registers. */
1181
1182 static int
1183 emit_subs (uint32_t *buf, struct aarch64_register rd,
1184 struct aarch64_register rn, struct aarch64_operand operand)
1185 {
1186 return emit_data_processing (buf, SUBS, rd, rn, operand);
1187 }
1188
1189 /* Write a CMP instruction into *BUF.
1190
1191 CMP rn, rm
1192
1193 This instruction is an alias of SUBS xzr, rn, rm.
1194
1195 RN and RM are the registers to compare. */
1196
1197 static int
1198 emit_cmp (uint32_t *buf, struct aarch64_register rn,
1199 struct aarch64_operand operand)
1200 {
1201 return emit_subs (buf, xzr, rn, operand);
1202 }
1203
1204 /* Write a AND instruction into *BUF.
1205
1206 AND rd, rn, rm
1207
1208 RD is the destination register.
1209 RN and RM are the source registers. */
1210
1211 static int
1212 emit_and (uint32_t *buf, struct aarch64_register rd,
1213 struct aarch64_register rn, struct aarch64_register rm)
1214 {
1215 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1216 }
1217
1218 /* Write a ORR instruction into *BUF.
1219
1220 ORR rd, rn, rm
1221
1222 RD is the destination register.
1223 RN and RM are the source registers. */
1224
1225 static int
1226 emit_orr (uint32_t *buf, struct aarch64_register rd,
1227 struct aarch64_register rn, struct aarch64_register rm)
1228 {
1229 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1230 }
1231
1232 /* Write a ORN instruction into *BUF.
1233
1234 ORN rd, rn, rm
1235
1236 RD is the destination register.
1237 RN and RM are the source registers. */
1238
1239 static int
1240 emit_orn (uint32_t *buf, struct aarch64_register rd,
1241 struct aarch64_register rn, struct aarch64_register rm)
1242 {
1243 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1244 }
1245
1246 /* Write a EOR instruction into *BUF.
1247
1248 EOR rd, rn, rm
1249
1250 RD is the destination register.
1251 RN and RM are the source registers. */
1252
1253 static int
1254 emit_eor (uint32_t *buf, struct aarch64_register rd,
1255 struct aarch64_register rn, struct aarch64_register rm)
1256 {
1257 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1258 }
1259
1260 /* Write a MVN instruction into *BUF.
1261
1262 MVN rd, rm
1263
1264 This is an alias for ORN rd, xzr, rm.
1265
1266 RD is the destination register.
1267 RM is the source register. */
1268
1269 static int
1270 emit_mvn (uint32_t *buf, struct aarch64_register rd,
1271 struct aarch64_register rm)
1272 {
1273 return emit_orn (buf, rd, xzr, rm);
1274 }
1275
1276 /* Write a LSLV instruction into *BUF.
1277
1278 LSLV rd, rn, rm
1279
1280 RD is the destination register.
1281 RN and RM are the source registers. */
1282
1283 static int
1284 emit_lslv (uint32_t *buf, struct aarch64_register rd,
1285 struct aarch64_register rn, struct aarch64_register rm)
1286 {
1287 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1288 }
1289
1290 /* Write a LSRV instruction into *BUF.
1291
1292 LSRV rd, rn, rm
1293
1294 RD is the destination register.
1295 RN and RM are the source registers. */
1296
1297 static int
1298 emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1299 struct aarch64_register rn, struct aarch64_register rm)
1300 {
1301 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1302 }
1303
1304 /* Write a ASRV instruction into *BUF.
1305
1306 ASRV rd, rn, rm
1307
1308 RD is the destination register.
1309 RN and RM are the source registers. */
1310
1311 static int
1312 emit_asrv (uint32_t *buf, struct aarch64_register rd,
1313 struct aarch64_register rn, struct aarch64_register rm)
1314 {
1315 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1316 }
1317
1318 /* Write a MUL instruction into *BUF.
1319
1320 MUL rd, rn, rm
1321
1322 RD is the destination register.
1323 RN and RM are the source registers. */
1324
1325 static int
1326 emit_mul (uint32_t *buf, struct aarch64_register rd,
1327 struct aarch64_register rn, struct aarch64_register rm)
1328 {
1329 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1330 }
1331
1332 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1333
1334 MRS xt, system_reg
1335
1336 RT is the destination register.
1337 SYSTEM_REG is special purpose register to read. */
1338
1339 static int
1340 emit_mrs (uint32_t *buf, struct aarch64_register rt,
1341 enum aarch64_system_control_registers system_reg)
1342 {
1343 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1344 | ENCODE (rt.num, 5, 0));
1345 }
1346
1347 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1348
1349 MSR system_reg, xt
1350
1351 SYSTEM_REG is special purpose register to write.
1352 RT is the input register. */
1353
1354 static int
1355 emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1356 struct aarch64_register rt)
1357 {
1358 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1359 | ENCODE (rt.num, 5, 0));
1360 }
1361
1362 /* Write a SEVL instruction into *BUF.
1363
1364 This is a hint instruction telling the hardware to trigger an event. */
1365
1366 static int
1367 emit_sevl (uint32_t *buf)
1368 {
1369 return aarch64_emit_insn (buf, SEVL);
1370 }
1371
1372 /* Write a WFE instruction into *BUF.
1373
1374 This is a hint instruction telling the hardware to wait for an event. */
1375
1376 static int
1377 emit_wfe (uint32_t *buf)
1378 {
1379 return aarch64_emit_insn (buf, WFE);
1380 }
1381
1382 /* Write a SBFM instruction into *BUF.
1383
1384 SBFM rd, rn, #immr, #imms
1385
1386 This instruction moves the bits from #immr to #imms into the
1387 destination, sign extending the result.
1388
1389 RD is the destination register.
1390 RN is the source register.
1391 IMMR is the bit number to start at (least significant bit).
1392 IMMS is the bit number to stop at (most significant bit). */
1393
1394 static int
1395 emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1396 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1397 {
1398 uint32_t size = ENCODE (rd.is64, 1, 31);
1399 uint32_t n = ENCODE (rd.is64, 1, 22);
1400
1401 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1402 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1403 | ENCODE (rd.num, 5, 0));
1404 }
1405
1406 /* Write a SBFX instruction into *BUF.
1407
1408 SBFX rd, rn, #lsb, #width
1409
1410 This instruction moves #width bits from #lsb into the destination, sign
1411 extending the result. This is an alias for:
1412
1413 SBFM rd, rn, #lsb, #(lsb + width - 1)
1414
1415 RD is the destination register.
1416 RN is the source register.
1417 LSB is the bit number to start at (least significant bit).
1418 WIDTH is the number of bits to move. */
1419
1420 static int
1421 emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1422 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1423 {
1424 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1425 }
1426
1427 /* Write a UBFM instruction into *BUF.
1428
1429 UBFM rd, rn, #immr, #imms
1430
1431 This instruction moves the bits from #immr to #imms into the
1432 destination, extending the result with zeros.
1433
1434 RD is the destination register.
1435 RN is the source register.
1436 IMMR is the bit number to start at (least significant bit).
1437 IMMS is the bit number to stop at (most significant bit). */
1438
1439 static int
1440 emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1441 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1442 {
1443 uint32_t size = ENCODE (rd.is64, 1, 31);
1444 uint32_t n = ENCODE (rd.is64, 1, 22);
1445
1446 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1447 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1448 | ENCODE (rd.num, 5, 0));
1449 }
1450
1451 /* Write a UBFX instruction into *BUF.
1452
1453 UBFX rd, rn, #lsb, #width
1454
1455 This instruction moves #width bits from #lsb into the destination,
1456 extending the result with zeros. This is an alias for:
1457
1458 UBFM rd, rn, #lsb, #(lsb + width - 1)
1459
1460 RD is the destination register.
1461 RN is the source register.
1462 LSB is the bit number to start at (least significant bit).
1463 WIDTH is the number of bits to move. */
1464
1465 static int
1466 emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1467 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1468 {
1469 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1470 }
1471
1472 /* Write a CSINC instruction into *BUF.
1473
1474 CSINC rd, rn, rm, cond
1475
1476 This instruction conditionally increments rn or rm and places the result
1477 in rd. rn is chosen is the condition is true.
1478
1479 RD is the destination register.
1480 RN and RM are the source registers.
1481 COND is the encoded condition. */
1482
1483 static int
1484 emit_csinc (uint32_t *buf, struct aarch64_register rd,
1485 struct aarch64_register rn, struct aarch64_register rm,
1486 unsigned cond)
1487 {
1488 uint32_t size = ENCODE (rd.is64, 1, 31);
1489
1490 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1491 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1492 | ENCODE (rd.num, 5, 0));
1493 }
1494
1495 /* Write a CSET instruction into *BUF.
1496
1497 CSET rd, cond
1498
1499 This instruction conditionally write 1 or 0 in the destination register.
1500 1 is written if the condition is true. This is an alias for:
1501
1502 CSINC rd, xzr, xzr, !cond
1503
1504 Note that the condition needs to be inverted.
1505
1506 RD is the destination register.
1507 RN and RM are the source registers.
1508 COND is the encoded condition. */
1509
1510 static int
1511 emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1512 {
1513 /* The least significant bit of the condition needs toggling in order to
1514 invert it. */
1515 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1516 }
1517
1518 /* Write LEN instructions from BUF into the inferior memory at *TO.
1519
1520 Note instructions are always little endian on AArch64, unlike data. */
1521
1522 static void
1523 append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1524 {
1525 size_t byte_len = len * sizeof (uint32_t);
1526 #if (__BYTE_ORDER == __BIG_ENDIAN)
1527 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
1528 size_t i;
1529
1530 for (i = 0; i < len; i++)
1531 le_buf[i] = htole32 (buf[i]);
1532
1533 write_inferior_memory (*to, (const unsigned char *) le_buf, byte_len);
1534
1535 xfree (le_buf);
1536 #else
1537 write_inferior_memory (*to, (const unsigned char *) buf, byte_len);
1538 #endif
1539
1540 *to += byte_len;
1541 }
1542
1543 /* Sub-class of struct aarch64_insn_data, store information of
1544 instruction relocation for fast tracepoint. Visitor can
1545 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1546 the relocated instructions in buffer pointed by INSN_PTR. */
1547
1548 struct aarch64_insn_relocation_data
1549 {
1550 struct aarch64_insn_data base;
1551
1552 /* The new address the instruction is relocated to. */
1553 CORE_ADDR new_addr;
1554 /* Pointer to the buffer of relocated instruction(s). */
1555 uint32_t *insn_ptr;
1556 };
1557
1558 /* Implementation of aarch64_insn_visitor method "b". */
1559
1560 static void
1561 aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1562 struct aarch64_insn_data *data)
1563 {
1564 struct aarch64_insn_relocation_data *insn_reloc
1565 = (struct aarch64_insn_relocation_data *) data;
1566 int64_t new_offset
1567 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1568
1569 if (can_encode_int32 (new_offset, 28))
1570 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1571 }
1572
1573 /* Implementation of aarch64_insn_visitor method "b_cond". */
1574
1575 static void
1576 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1577 struct aarch64_insn_data *data)
1578 {
1579 struct aarch64_insn_relocation_data *insn_reloc
1580 = (struct aarch64_insn_relocation_data *) data;
1581 int64_t new_offset
1582 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1583
1584 if (can_encode_int32 (new_offset, 21))
1585 {
1586 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1587 new_offset);
1588 }
1589 else if (can_encode_int32 (new_offset, 28))
1590 {
1591 /* The offset is out of range for a conditional branch
1592 instruction but not for a unconditional branch. We can use
1593 the following instructions instead:
1594
1595 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1596 B NOT_TAKEN ; Else jump over TAKEN and continue.
1597 TAKEN:
1598 B #(offset - 8)
1599 NOT_TAKEN:
1600
1601 */
1602
1603 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1604 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1605 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1606 }
1607 }
1608
1609 /* Implementation of aarch64_insn_visitor method "cb". */
1610
1611 static void
1612 aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1613 const unsigned rn, int is64,
1614 struct aarch64_insn_data *data)
1615 {
1616 struct aarch64_insn_relocation_data *insn_reloc
1617 = (struct aarch64_insn_relocation_data *) data;
1618 int64_t new_offset
1619 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1620
1621 if (can_encode_int32 (new_offset, 21))
1622 {
1623 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1624 aarch64_register (rn, is64), new_offset);
1625 }
1626 else if (can_encode_int32 (new_offset, 28))
1627 {
1628 /* The offset is out of range for a compare and branch
1629 instruction but not for a unconditional branch. We can use
1630 the following instructions instead:
1631
1632 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1633 B NOT_TAKEN ; Else jump over TAKEN and continue.
1634 TAKEN:
1635 B #(offset - 8)
1636 NOT_TAKEN:
1637
1638 */
1639 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1640 aarch64_register (rn, is64), 8);
1641 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1642 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1643 }
1644 }
1645
1646 /* Implementation of aarch64_insn_visitor method "tb". */
1647
1648 static void
1649 aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1650 const unsigned rt, unsigned bit,
1651 struct aarch64_insn_data *data)
1652 {
1653 struct aarch64_insn_relocation_data *insn_reloc
1654 = (struct aarch64_insn_relocation_data *) data;
1655 int64_t new_offset
1656 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1657
1658 if (can_encode_int32 (new_offset, 16))
1659 {
1660 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1661 aarch64_register (rt, 1), new_offset);
1662 }
1663 else if (can_encode_int32 (new_offset, 28))
1664 {
1665 /* The offset is out of range for a test bit and branch
1666 instruction but not for a unconditional branch. We can use
1667 the following instructions instead:
1668
1669 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1670 B NOT_TAKEN ; Else jump over TAKEN and continue.
1671 TAKEN:
1672 B #(offset - 8)
1673 NOT_TAKEN:
1674
1675 */
1676 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1677 aarch64_register (rt, 1), 8);
1678 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1679 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1680 new_offset - 8);
1681 }
1682 }
1683
1684 /* Implementation of aarch64_insn_visitor method "adr". */
1685
1686 static void
1687 aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1688 const int is_adrp,
1689 struct aarch64_insn_data *data)
1690 {
1691 struct aarch64_insn_relocation_data *insn_reloc
1692 = (struct aarch64_insn_relocation_data *) data;
1693 /* We know exactly the address the ADR{P,} instruction will compute.
1694 We can just write it to the destination register. */
1695 CORE_ADDR address = data->insn_addr + offset;
1696
1697 if (is_adrp)
1698 {
1699 /* Clear the lower 12 bits of the offset to get the 4K page. */
1700 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1701 aarch64_register (rd, 1),
1702 address & ~0xfff);
1703 }
1704 else
1705 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1706 aarch64_register (rd, 1), address);
1707 }
1708
1709 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1710
1711 static void
1712 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1713 const unsigned rt, const int is64,
1714 struct aarch64_insn_data *data)
1715 {
1716 struct aarch64_insn_relocation_data *insn_reloc
1717 = (struct aarch64_insn_relocation_data *) data;
1718 CORE_ADDR address = data->insn_addr + offset;
1719
1720 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1721 aarch64_register (rt, 1), address);
1722
1723 /* We know exactly what address to load from, and what register we
1724 can use:
1725
1726 MOV xd, #(oldloc + offset)
1727 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1728 ...
1729
1730 LDR xd, [xd] ; or LDRSW xd, [xd]
1731
1732 */
1733
1734 if (is_sw)
1735 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1736 aarch64_register (rt, 1),
1737 aarch64_register (rt, 1),
1738 offset_memory_operand (0));
1739 else
1740 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1741 aarch64_register (rt, is64),
1742 aarch64_register (rt, 1),
1743 offset_memory_operand (0));
1744 }
1745
1746 /* Implementation of aarch64_insn_visitor method "others". */
1747
1748 static void
1749 aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1750 struct aarch64_insn_data *data)
1751 {
1752 struct aarch64_insn_relocation_data *insn_reloc
1753 = (struct aarch64_insn_relocation_data *) data;
1754
1755 /* The instruction is not PC relative. Just re-emit it at the new
1756 location. */
1757 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
1758 }
1759
1760 static const struct aarch64_insn_visitor visitor =
1761 {
1762 aarch64_ftrace_insn_reloc_b,
1763 aarch64_ftrace_insn_reloc_b_cond,
1764 aarch64_ftrace_insn_reloc_cb,
1765 aarch64_ftrace_insn_reloc_tb,
1766 aarch64_ftrace_insn_reloc_adr,
1767 aarch64_ftrace_insn_reloc_ldr_literal,
1768 aarch64_ftrace_insn_reloc_others,
1769 };
1770
1771 /* Implementation of linux_target_ops method
1772 "install_fast_tracepoint_jump_pad". */
1773
1774 static int
1775 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1776 CORE_ADDR tpaddr,
1777 CORE_ADDR collector,
1778 CORE_ADDR lockaddr,
1779 ULONGEST orig_size,
1780 CORE_ADDR *jump_entry,
1781 CORE_ADDR *trampoline,
1782 ULONGEST *trampoline_size,
1783 unsigned char *jjump_pad_insn,
1784 ULONGEST *jjump_pad_insn_size,
1785 CORE_ADDR *adjusted_insn_addr,
1786 CORE_ADDR *adjusted_insn_addr_end,
1787 char *err)
1788 {
1789 uint32_t buf[256];
1790 uint32_t *p = buf;
1791 int64_t offset;
1792 int i;
1793 uint32_t insn;
1794 CORE_ADDR buildaddr = *jump_entry;
1795 struct aarch64_insn_relocation_data insn_data;
1796
1797 /* We need to save the current state on the stack both to restore it
1798 later and to collect register values when the tracepoint is hit.
1799
1800 The saved registers are pushed in a layout that needs to be in sync
1801 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1802 the supply_fast_tracepoint_registers function will fill in the
1803 register cache from a pointer to saved registers on the stack we build
1804 here.
1805
1806 For simplicity, we set the size of each cell on the stack to 16 bytes.
1807 This way one cell can hold any register type, from system registers
1808 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1809 has to be 16 bytes aligned anyway.
1810
1811 Note that the CPSR register does not exist on AArch64. Instead we
1812 can access system bits describing the process state with the
1813 MRS/MSR instructions, namely the condition flags. We save them as
1814 if they are part of a CPSR register because that's how GDB
1815 interprets these system bits. At the moment, only the condition
1816 flags are saved in CPSR (NZCV).
1817
1818 Stack layout, each cell is 16 bytes (descending):
1819
1820 High *-------- SIMD&FP registers from 31 down to 0. --------*
1821 | q31 |
1822 . .
1823 . . 32 cells
1824 . .
1825 | q0 |
1826 *---- General purpose registers from 30 down to 0. ----*
1827 | x30 |
1828 . .
1829 . . 31 cells
1830 . .
1831 | x0 |
1832 *------------- Special purpose registers. -------------*
1833 | SP |
1834 | PC |
1835 | CPSR (NZCV) | 5 cells
1836 | FPSR |
1837 | FPCR | <- SP + 16
1838 *------------- collecting_t object --------------------*
1839 | TPIDR_EL0 | struct tracepoint * |
1840 Low *------------------------------------------------------*
1841
1842 After this stack is set up, we issue a call to the collector, passing
1843 it the saved registers at (SP + 16). */
1844
1845 /* Push SIMD&FP registers on the stack:
1846
1847 SUB sp, sp, #(32 * 16)
1848
1849 STP q30, q31, [sp, #(30 * 16)]
1850 ...
1851 STP q0, q1, [sp]
1852
1853 */
1854 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
1855 for (i = 30; i >= 0; i -= 2)
1856 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
1857
1858 /* Push general puspose registers on the stack. Note that we do not need
1859 to push x31 as it represents the xzr register and not the stack
1860 pointer in a STR instruction.
1861
1862 SUB sp, sp, #(31 * 16)
1863
1864 STR x30, [sp, #(30 * 16)]
1865 ...
1866 STR x0, [sp]
1867
1868 */
1869 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
1870 for (i = 30; i >= 0; i -= 1)
1871 p += emit_str (p, aarch64_register (i, 1), sp,
1872 offset_memory_operand (i * 16));
1873
1874 /* Make space for 5 more cells.
1875
1876 SUB sp, sp, #(5 * 16)
1877
1878 */
1879 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
1880
1881
1882 /* Save SP:
1883
1884 ADD x4, sp, #((32 + 31 + 5) * 16)
1885 STR x4, [sp, #(4 * 16)]
1886
1887 */
1888 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
1889 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
1890
1891 /* Save PC (tracepoint address):
1892
1893 MOV x3, #(tpaddr)
1894 ...
1895
1896 STR x3, [sp, #(3 * 16)]
1897
1898 */
1899
1900 p += emit_mov_addr (p, x3, tpaddr);
1901 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
1902
1903 /* Save CPSR (NZCV), FPSR and FPCR:
1904
1905 MRS x2, nzcv
1906 MRS x1, fpsr
1907 MRS x0, fpcr
1908
1909 STR x2, [sp, #(2 * 16)]
1910 STR x1, [sp, #(1 * 16)]
1911 STR x0, [sp, #(0 * 16)]
1912
1913 */
1914 p += emit_mrs (p, x2, NZCV);
1915 p += emit_mrs (p, x1, FPSR);
1916 p += emit_mrs (p, x0, FPCR);
1917 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
1918 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
1919 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
1920
1921 /* Push the collecting_t object. It consist of the address of the
1922 tracepoint and an ID for the current thread. We get the latter by
1923 reading the tpidr_el0 system register. It corresponds to the
1924 NT_ARM_TLS register accessible with ptrace.
1925
1926 MOV x0, #(tpoint)
1927 ...
1928
1929 MRS x1, tpidr_el0
1930
1931 STP x0, x1, [sp, #-16]!
1932
1933 */
1934
1935 p += emit_mov_addr (p, x0, tpoint);
1936 p += emit_mrs (p, x1, TPIDR_EL0);
1937 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
1938
1939 /* Spin-lock:
1940
1941 The shared memory for the lock is at lockaddr. It will hold zero
1942 if no-one is holding the lock, otherwise it contains the address of
1943 the collecting_t object on the stack of the thread which acquired it.
1944
1945 At this stage, the stack pointer points to this thread's collecting_t
1946 object.
1947
1948 We use the following registers:
1949 - x0: Address of the lock.
1950 - x1: Pointer to collecting_t object.
1951 - x2: Scratch register.
1952
1953 MOV x0, #(lockaddr)
1954 ...
1955 MOV x1, sp
1956
1957 ; Trigger an event local to this core. So the following WFE
1958 ; instruction is ignored.
1959 SEVL
1960 again:
1961 ; Wait for an event. The event is triggered by either the SEVL
1962 ; or STLR instructions (store release).
1963 WFE
1964
1965 ; Atomically read at lockaddr. This marks the memory location as
1966 ; exclusive. This instruction also has memory constraints which
1967 ; make sure all previous data reads and writes are done before
1968 ; executing it.
1969 LDAXR x2, [x0]
1970
1971 ; Try again if another thread holds the lock.
1972 CBNZ x2, again
1973
1974 ; We can lock it! Write the address of the collecting_t object.
1975 ; This instruction will fail if the memory location is not marked
1976 ; as exclusive anymore. If it succeeds, it will remove the
1977 ; exclusive mark on the memory location. This way, if another
1978 ; thread executes this instruction before us, we will fail and try
1979 ; all over again.
1980 STXR w2, x1, [x0]
1981 CBNZ w2, again
1982
1983 */
1984
1985 p += emit_mov_addr (p, x0, lockaddr);
1986 p += emit_mov (p, x1, register_operand (sp));
1987
1988 p += emit_sevl (p);
1989 p += emit_wfe (p);
1990 p += emit_ldaxr (p, x2, x0);
1991 p += emit_cb (p, 1, w2, -2 * 4);
1992 p += emit_stxr (p, w2, x1, x0);
1993 p += emit_cb (p, 1, x2, -4 * 4);
1994
1995 /* Call collector (struct tracepoint *, unsigned char *):
1996
1997 MOV x0, #(tpoint)
1998 ...
1999
2000 ; Saved registers start after the collecting_t object.
2001 ADD x1, sp, #16
2002
2003 ; We use an intra-procedure-call scratch register.
2004 MOV ip0, #(collector)
2005 ...
2006
2007 ; And call back to C!
2008 BLR ip0
2009
2010 */
2011
2012 p += emit_mov_addr (p, x0, tpoint);
2013 p += emit_add (p, x1, sp, immediate_operand (16));
2014
2015 p += emit_mov_addr (p, ip0, collector);
2016 p += emit_blr (p, ip0);
2017
2018 /* Release the lock.
2019
2020 MOV x0, #(lockaddr)
2021 ...
2022
2023 ; This instruction is a normal store with memory ordering
2024 ; constraints. Thanks to this we do not have to put a data
2025 ; barrier instruction to make sure all data read and writes are done
2026 ; before this instruction is executed. Furthermore, this instrucion
2027 ; will trigger an event, letting other threads know they can grab
2028 ; the lock.
2029 STLR xzr, [x0]
2030
2031 */
2032 p += emit_mov_addr (p, x0, lockaddr);
2033 p += emit_stlr (p, xzr, x0);
2034
2035 /* Free collecting_t object:
2036
2037 ADD sp, sp, #16
2038
2039 */
2040 p += emit_add (p, sp, sp, immediate_operand (16));
2041
2042 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2043 registers from the stack.
2044
2045 LDR x2, [sp, #(2 * 16)]
2046 LDR x1, [sp, #(1 * 16)]
2047 LDR x0, [sp, #(0 * 16)]
2048
2049 MSR NZCV, x2
2050 MSR FPSR, x1
2051 MSR FPCR, x0
2052
2053 ADD sp, sp #(5 * 16)
2054
2055 */
2056 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2057 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2058 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2059 p += emit_msr (p, NZCV, x2);
2060 p += emit_msr (p, FPSR, x1);
2061 p += emit_msr (p, FPCR, x0);
2062
2063 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2064
2065 /* Pop general purpose registers:
2066
2067 LDR x0, [sp]
2068 ...
2069 LDR x30, [sp, #(30 * 16)]
2070
2071 ADD sp, sp, #(31 * 16)
2072
2073 */
2074 for (i = 0; i <= 30; i += 1)
2075 p += emit_ldr (p, aarch64_register (i, 1), sp,
2076 offset_memory_operand (i * 16));
2077 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2078
2079 /* Pop SIMD&FP registers:
2080
2081 LDP q0, q1, [sp]
2082 ...
2083 LDP q30, q31, [sp, #(30 * 16)]
2084
2085 ADD sp, sp, #(32 * 16)
2086
2087 */
2088 for (i = 0; i <= 30; i += 2)
2089 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2090 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2091
2092 /* Write the code into the inferior memory. */
2093 append_insns (&buildaddr, p - buf, buf);
2094
2095 /* Now emit the relocated instruction. */
2096 *adjusted_insn_addr = buildaddr;
2097 target_read_uint32 (tpaddr, &insn);
2098
2099 insn_data.base.insn_addr = tpaddr;
2100 insn_data.new_addr = buildaddr;
2101 insn_data.insn_ptr = buf;
2102
2103 aarch64_relocate_instruction (insn, &visitor,
2104 (struct aarch64_insn_data *) &insn_data);
2105
2106 /* We may not have been able to relocate the instruction. */
2107 if (insn_data.insn_ptr == buf)
2108 {
2109 sprintf (err,
2110 "E.Could not relocate instruction from %s to %s.",
2111 core_addr_to_string_nz (tpaddr),
2112 core_addr_to_string_nz (buildaddr));
2113 return 1;
2114 }
2115 else
2116 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
2117 *adjusted_insn_addr_end = buildaddr;
2118
2119 /* Go back to the start of the buffer. */
2120 p = buf;
2121
2122 /* Emit a branch back from the jump pad. */
2123 offset = (tpaddr + orig_size - buildaddr);
2124 if (!can_encode_int32 (offset, 28))
2125 {
2126 sprintf (err,
2127 "E.Jump back from jump pad too far from tracepoint "
2128 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2129 offset);
2130 return 1;
2131 }
2132
2133 p += emit_b (p, 0, offset);
2134 append_insns (&buildaddr, p - buf, buf);
2135
2136 /* Give the caller a branch instruction into the jump pad. */
2137 offset = (*jump_entry - tpaddr);
2138 if (!can_encode_int32 (offset, 28))
2139 {
2140 sprintf (err,
2141 "E.Jump pad too far from tracepoint "
2142 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2143 offset);
2144 return 1;
2145 }
2146
2147 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2148 *jjump_pad_insn_size = 4;
2149
2150 /* Return the end address of our pad. */
2151 *jump_entry = buildaddr;
2152
2153 return 0;
2154 }
2155
2156 /* Helper function writing LEN instructions from START into
2157 current_insn_ptr. */
2158
2159 static void
2160 emit_ops_insns (const uint32_t *start, int len)
2161 {
2162 CORE_ADDR buildaddr = current_insn_ptr;
2163
2164 if (debug_threads)
2165 debug_printf ("Adding %d instrucions at %s\n",
2166 len, paddress (buildaddr));
2167
2168 append_insns (&buildaddr, len, start);
2169 current_insn_ptr = buildaddr;
2170 }
2171
2172 /* Pop a register from the stack. */
2173
2174 static int
2175 emit_pop (uint32_t *buf, struct aarch64_register rt)
2176 {
2177 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2178 }
2179
2180 /* Push a register on the stack. */
2181
2182 static int
2183 emit_push (uint32_t *buf, struct aarch64_register rt)
2184 {
2185 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2186 }
2187
2188 /* Implementation of emit_ops method "emit_prologue". */
2189
2190 static void
2191 aarch64_emit_prologue (void)
2192 {
2193 uint32_t buf[16];
2194 uint32_t *p = buf;
2195
2196 /* This function emit a prologue for the following function prototype:
2197
2198 enum eval_result_type f (unsigned char *regs,
2199 ULONGEST *value);
2200
2201 The first argument is a buffer of raw registers. The second
2202 argument is the result of
2203 evaluating the expression, which will be set to whatever is on top of
2204 the stack at the end.
2205
2206 The stack set up by the prologue is as such:
2207
2208 High *------------------------------------------------------*
2209 | LR |
2210 | FP | <- FP
2211 | x1 (ULONGEST *value) |
2212 | x0 (unsigned char *regs) |
2213 Low *------------------------------------------------------*
2214
2215 As we are implementing a stack machine, each opcode can expand the
2216 stack so we never know how far we are from the data saved by this
2217 prologue. In order to be able refer to value and regs later, we save
2218 the current stack pointer in the frame pointer. This way, it is not
2219 clobbered when calling C functions.
2220
2221 Finally, throughtout every operation, we are using register x0 as the
2222 top of the stack, and x1 as a scratch register. */
2223
2224 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2225 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2226 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2227
2228 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2229
2230
2231 emit_ops_insns (buf, p - buf);
2232 }
2233
2234 /* Implementation of emit_ops method "emit_epilogue". */
2235
2236 static void
2237 aarch64_emit_epilogue (void)
2238 {
2239 uint32_t buf[16];
2240 uint32_t *p = buf;
2241
2242 /* Store the result of the expression (x0) in *value. */
2243 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2244 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2245 p += emit_str (p, x0, x1, offset_memory_operand (0));
2246
2247 /* Restore the previous state. */
2248 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2249 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2250
2251 /* Return expr_eval_no_error. */
2252 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2253 p += emit_ret (p, lr);
2254
2255 emit_ops_insns (buf, p - buf);
2256 }
2257
2258 /* Implementation of emit_ops method "emit_add". */
2259
2260 static void
2261 aarch64_emit_add (void)
2262 {
2263 uint32_t buf[16];
2264 uint32_t *p = buf;
2265
2266 p += emit_pop (p, x1);
2267 p += emit_add (p, x0, x1, register_operand (x0));
2268
2269 emit_ops_insns (buf, p - buf);
2270 }
2271
2272 /* Implementation of emit_ops method "emit_sub". */
2273
2274 static void
2275 aarch64_emit_sub (void)
2276 {
2277 uint32_t buf[16];
2278 uint32_t *p = buf;
2279
2280 p += emit_pop (p, x1);
2281 p += emit_sub (p, x0, x1, register_operand (x0));
2282
2283 emit_ops_insns (buf, p - buf);
2284 }
2285
2286 /* Implementation of emit_ops method "emit_mul". */
2287
2288 static void
2289 aarch64_emit_mul (void)
2290 {
2291 uint32_t buf[16];
2292 uint32_t *p = buf;
2293
2294 p += emit_pop (p, x1);
2295 p += emit_mul (p, x0, x1, x0);
2296
2297 emit_ops_insns (buf, p - buf);
2298 }
2299
2300 /* Implementation of emit_ops method "emit_lsh". */
2301
2302 static void
2303 aarch64_emit_lsh (void)
2304 {
2305 uint32_t buf[16];
2306 uint32_t *p = buf;
2307
2308 p += emit_pop (p, x1);
2309 p += emit_lslv (p, x0, x1, x0);
2310
2311 emit_ops_insns (buf, p - buf);
2312 }
2313
2314 /* Implementation of emit_ops method "emit_rsh_signed". */
2315
2316 static void
2317 aarch64_emit_rsh_signed (void)
2318 {
2319 uint32_t buf[16];
2320 uint32_t *p = buf;
2321
2322 p += emit_pop (p, x1);
2323 p += emit_asrv (p, x0, x1, x0);
2324
2325 emit_ops_insns (buf, p - buf);
2326 }
2327
2328 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2329
2330 static void
2331 aarch64_emit_rsh_unsigned (void)
2332 {
2333 uint32_t buf[16];
2334 uint32_t *p = buf;
2335
2336 p += emit_pop (p, x1);
2337 p += emit_lsrv (p, x0, x1, x0);
2338
2339 emit_ops_insns (buf, p - buf);
2340 }
2341
2342 /* Implementation of emit_ops method "emit_ext". */
2343
2344 static void
2345 aarch64_emit_ext (int arg)
2346 {
2347 uint32_t buf[16];
2348 uint32_t *p = buf;
2349
2350 p += emit_sbfx (p, x0, x0, 0, arg);
2351
2352 emit_ops_insns (buf, p - buf);
2353 }
2354
2355 /* Implementation of emit_ops method "emit_log_not". */
2356
2357 static void
2358 aarch64_emit_log_not (void)
2359 {
2360 uint32_t buf[16];
2361 uint32_t *p = buf;
2362
2363 /* If the top of the stack is 0, replace it with 1. Else replace it with
2364 0. */
2365
2366 p += emit_cmp (p, x0, immediate_operand (0));
2367 p += emit_cset (p, x0, EQ);
2368
2369 emit_ops_insns (buf, p - buf);
2370 }
2371
2372 /* Implementation of emit_ops method "emit_bit_and". */
2373
2374 static void
2375 aarch64_emit_bit_and (void)
2376 {
2377 uint32_t buf[16];
2378 uint32_t *p = buf;
2379
2380 p += emit_pop (p, x1);
2381 p += emit_and (p, x0, x0, x1);
2382
2383 emit_ops_insns (buf, p - buf);
2384 }
2385
2386 /* Implementation of emit_ops method "emit_bit_or". */
2387
2388 static void
2389 aarch64_emit_bit_or (void)
2390 {
2391 uint32_t buf[16];
2392 uint32_t *p = buf;
2393
2394 p += emit_pop (p, x1);
2395 p += emit_orr (p, x0, x0, x1);
2396
2397 emit_ops_insns (buf, p - buf);
2398 }
2399
2400 /* Implementation of emit_ops method "emit_bit_xor". */
2401
2402 static void
2403 aarch64_emit_bit_xor (void)
2404 {
2405 uint32_t buf[16];
2406 uint32_t *p = buf;
2407
2408 p += emit_pop (p, x1);
2409 p += emit_eor (p, x0, x0, x1);
2410
2411 emit_ops_insns (buf, p - buf);
2412 }
2413
2414 /* Implementation of emit_ops method "emit_bit_not". */
2415
2416 static void
2417 aarch64_emit_bit_not (void)
2418 {
2419 uint32_t buf[16];
2420 uint32_t *p = buf;
2421
2422 p += emit_mvn (p, x0, x0);
2423
2424 emit_ops_insns (buf, p - buf);
2425 }
2426
2427 /* Implementation of emit_ops method "emit_equal". */
2428
2429 static void
2430 aarch64_emit_equal (void)
2431 {
2432 uint32_t buf[16];
2433 uint32_t *p = buf;
2434
2435 p += emit_pop (p, x1);
2436 p += emit_cmp (p, x0, register_operand (x1));
2437 p += emit_cset (p, x0, EQ);
2438
2439 emit_ops_insns (buf, p - buf);
2440 }
2441
2442 /* Implementation of emit_ops method "emit_less_signed". */
2443
2444 static void
2445 aarch64_emit_less_signed (void)
2446 {
2447 uint32_t buf[16];
2448 uint32_t *p = buf;
2449
2450 p += emit_pop (p, x1);
2451 p += emit_cmp (p, x1, register_operand (x0));
2452 p += emit_cset (p, x0, LT);
2453
2454 emit_ops_insns (buf, p - buf);
2455 }
2456
2457 /* Implementation of emit_ops method "emit_less_unsigned". */
2458
2459 static void
2460 aarch64_emit_less_unsigned (void)
2461 {
2462 uint32_t buf[16];
2463 uint32_t *p = buf;
2464
2465 p += emit_pop (p, x1);
2466 p += emit_cmp (p, x1, register_operand (x0));
2467 p += emit_cset (p, x0, LO);
2468
2469 emit_ops_insns (buf, p - buf);
2470 }
2471
2472 /* Implementation of emit_ops method "emit_ref". */
2473
2474 static void
2475 aarch64_emit_ref (int size)
2476 {
2477 uint32_t buf[16];
2478 uint32_t *p = buf;
2479
2480 switch (size)
2481 {
2482 case 1:
2483 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2484 break;
2485 case 2:
2486 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2487 break;
2488 case 4:
2489 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2490 break;
2491 case 8:
2492 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2493 break;
2494 default:
2495 /* Unknown size, bail on compilation. */
2496 emit_error = 1;
2497 break;
2498 }
2499
2500 emit_ops_insns (buf, p - buf);
2501 }
2502
2503 /* Implementation of emit_ops method "emit_if_goto". */
2504
2505 static void
2506 aarch64_emit_if_goto (int *offset_p, int *size_p)
2507 {
2508 uint32_t buf[16];
2509 uint32_t *p = buf;
2510
2511 /* The Z flag is set or cleared here. */
2512 p += emit_cmp (p, x0, immediate_operand (0));
2513 /* This instruction must not change the Z flag. */
2514 p += emit_pop (p, x0);
2515 /* Branch over the next instruction if x0 == 0. */
2516 p += emit_bcond (p, EQ, 8);
2517
2518 /* The NOP instruction will be patched with an unconditional branch. */
2519 if (offset_p)
2520 *offset_p = (p - buf) * 4;
2521 if (size_p)
2522 *size_p = 4;
2523 p += emit_nop (p);
2524
2525 emit_ops_insns (buf, p - buf);
2526 }
2527
2528 /* Implementation of emit_ops method "emit_goto". */
2529
2530 static void
2531 aarch64_emit_goto (int *offset_p, int *size_p)
2532 {
2533 uint32_t buf[16];
2534 uint32_t *p = buf;
2535
2536 /* The NOP instruction will be patched with an unconditional branch. */
2537 if (offset_p)
2538 *offset_p = 0;
2539 if (size_p)
2540 *size_p = 4;
2541 p += emit_nop (p);
2542
2543 emit_ops_insns (buf, p - buf);
2544 }
2545
2546 /* Implementation of emit_ops method "write_goto_address". */
2547
2548 void
2549 aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2550 {
2551 uint32_t insn;
2552
2553 emit_b (&insn, 0, to - from);
2554 append_insns (&from, 1, &insn);
2555 }
2556
2557 /* Implementation of emit_ops method "emit_const". */
2558
2559 static void
2560 aarch64_emit_const (LONGEST num)
2561 {
2562 uint32_t buf[16];
2563 uint32_t *p = buf;
2564
2565 p += emit_mov_addr (p, x0, num);
2566
2567 emit_ops_insns (buf, p - buf);
2568 }
2569
2570 /* Implementation of emit_ops method "emit_call". */
2571
2572 static void
2573 aarch64_emit_call (CORE_ADDR fn)
2574 {
2575 uint32_t buf[16];
2576 uint32_t *p = buf;
2577
2578 p += emit_mov_addr (p, ip0, fn);
2579 p += emit_blr (p, ip0);
2580
2581 emit_ops_insns (buf, p - buf);
2582 }
2583
2584 /* Implementation of emit_ops method "emit_reg". */
2585
2586 static void
2587 aarch64_emit_reg (int reg)
2588 {
2589 uint32_t buf[16];
2590 uint32_t *p = buf;
2591
2592 /* Set x0 to unsigned char *regs. */
2593 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2594 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2595 p += emit_mov (p, x1, immediate_operand (reg));
2596
2597 emit_ops_insns (buf, p - buf);
2598
2599 aarch64_emit_call (get_raw_reg_func_addr ());
2600 }
2601
2602 /* Implementation of emit_ops method "emit_pop". */
2603
2604 static void
2605 aarch64_emit_pop (void)
2606 {
2607 uint32_t buf[16];
2608 uint32_t *p = buf;
2609
2610 p += emit_pop (p, x0);
2611
2612 emit_ops_insns (buf, p - buf);
2613 }
2614
2615 /* Implementation of emit_ops method "emit_stack_flush". */
2616
2617 static void
2618 aarch64_emit_stack_flush (void)
2619 {
2620 uint32_t buf[16];
2621 uint32_t *p = buf;
2622
2623 p += emit_push (p, x0);
2624
2625 emit_ops_insns (buf, p - buf);
2626 }
2627
2628 /* Implementation of emit_ops method "emit_zero_ext". */
2629
2630 static void
2631 aarch64_emit_zero_ext (int arg)
2632 {
2633 uint32_t buf[16];
2634 uint32_t *p = buf;
2635
2636 p += emit_ubfx (p, x0, x0, 0, arg);
2637
2638 emit_ops_insns (buf, p - buf);
2639 }
2640
2641 /* Implementation of emit_ops method "emit_swap". */
2642
2643 static void
2644 aarch64_emit_swap (void)
2645 {
2646 uint32_t buf[16];
2647 uint32_t *p = buf;
2648
2649 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2650 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2651 p += emit_mov (p, x0, register_operand (x1));
2652
2653 emit_ops_insns (buf, p - buf);
2654 }
2655
2656 /* Implementation of emit_ops method "emit_stack_adjust". */
2657
2658 static void
2659 aarch64_emit_stack_adjust (int n)
2660 {
2661 /* This is not needed with our design. */
2662 uint32_t buf[16];
2663 uint32_t *p = buf;
2664
2665 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2666
2667 emit_ops_insns (buf, p - buf);
2668 }
2669
2670 /* Implementation of emit_ops method "emit_int_call_1". */
2671
2672 static void
2673 aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2674 {
2675 uint32_t buf[16];
2676 uint32_t *p = buf;
2677
2678 p += emit_mov (p, x0, immediate_operand (arg1));
2679
2680 emit_ops_insns (buf, p - buf);
2681
2682 aarch64_emit_call (fn);
2683 }
2684
2685 /* Implementation of emit_ops method "emit_void_call_2". */
2686
2687 static void
2688 aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2689 {
2690 uint32_t buf[16];
2691 uint32_t *p = buf;
2692
2693 /* Push x0 on the stack. */
2694 aarch64_emit_stack_flush ();
2695
2696 /* Setup arguments for the function call:
2697
2698 x0: arg1
2699 x1: top of the stack
2700
2701 MOV x1, x0
2702 MOV x0, #arg1 */
2703
2704 p += emit_mov (p, x1, register_operand (x0));
2705 p += emit_mov (p, x0, immediate_operand (arg1));
2706
2707 emit_ops_insns (buf, p - buf);
2708
2709 aarch64_emit_call (fn);
2710
2711 /* Restore x0. */
2712 aarch64_emit_pop ();
2713 }
2714
2715 /* Implementation of emit_ops method "emit_eq_goto". */
2716
2717 static void
2718 aarch64_emit_eq_goto (int *offset_p, int *size_p)
2719 {
2720 uint32_t buf[16];
2721 uint32_t *p = buf;
2722
2723 p += emit_pop (p, x1);
2724 p += emit_cmp (p, x1, register_operand (x0));
2725 /* Branch over the next instruction if x0 != x1. */
2726 p += emit_bcond (p, NE, 8);
2727 /* The NOP instruction will be patched with an unconditional branch. */
2728 if (offset_p)
2729 *offset_p = (p - buf) * 4;
2730 if (size_p)
2731 *size_p = 4;
2732 p += emit_nop (p);
2733
2734 emit_ops_insns (buf, p - buf);
2735 }
2736
2737 /* Implementation of emit_ops method "emit_ne_goto". */
2738
2739 static void
2740 aarch64_emit_ne_goto (int *offset_p, int *size_p)
2741 {
2742 uint32_t buf[16];
2743 uint32_t *p = buf;
2744
2745 p += emit_pop (p, x1);
2746 p += emit_cmp (p, x1, register_operand (x0));
2747 /* Branch over the next instruction if x0 == x1. */
2748 p += emit_bcond (p, EQ, 8);
2749 /* The NOP instruction will be patched with an unconditional branch. */
2750 if (offset_p)
2751 *offset_p = (p - buf) * 4;
2752 if (size_p)
2753 *size_p = 4;
2754 p += emit_nop (p);
2755
2756 emit_ops_insns (buf, p - buf);
2757 }
2758
2759 /* Implementation of emit_ops method "emit_lt_goto". */
2760
2761 static void
2762 aarch64_emit_lt_goto (int *offset_p, int *size_p)
2763 {
2764 uint32_t buf[16];
2765 uint32_t *p = buf;
2766
2767 p += emit_pop (p, x1);
2768 p += emit_cmp (p, x1, register_operand (x0));
2769 /* Branch over the next instruction if x0 >= x1. */
2770 p += emit_bcond (p, GE, 8);
2771 /* The NOP instruction will be patched with an unconditional branch. */
2772 if (offset_p)
2773 *offset_p = (p - buf) * 4;
2774 if (size_p)
2775 *size_p = 4;
2776 p += emit_nop (p);
2777
2778 emit_ops_insns (buf, p - buf);
2779 }
2780
2781 /* Implementation of emit_ops method "emit_le_goto". */
2782
2783 static void
2784 aarch64_emit_le_goto (int *offset_p, int *size_p)
2785 {
2786 uint32_t buf[16];
2787 uint32_t *p = buf;
2788
2789 p += emit_pop (p, x1);
2790 p += emit_cmp (p, x1, register_operand (x0));
2791 /* Branch over the next instruction if x0 > x1. */
2792 p += emit_bcond (p, GT, 8);
2793 /* The NOP instruction will be patched with an unconditional branch. */
2794 if (offset_p)
2795 *offset_p = (p - buf) * 4;
2796 if (size_p)
2797 *size_p = 4;
2798 p += emit_nop (p);
2799
2800 emit_ops_insns (buf, p - buf);
2801 }
2802
2803 /* Implementation of emit_ops method "emit_gt_goto". */
2804
2805 static void
2806 aarch64_emit_gt_goto (int *offset_p, int *size_p)
2807 {
2808 uint32_t buf[16];
2809 uint32_t *p = buf;
2810
2811 p += emit_pop (p, x1);
2812 p += emit_cmp (p, x1, register_operand (x0));
2813 /* Branch over the next instruction if x0 <= x1. */
2814 p += emit_bcond (p, LE, 8);
2815 /* The NOP instruction will be patched with an unconditional branch. */
2816 if (offset_p)
2817 *offset_p = (p - buf) * 4;
2818 if (size_p)
2819 *size_p = 4;
2820 p += emit_nop (p);
2821
2822 emit_ops_insns (buf, p - buf);
2823 }
2824
2825 /* Implementation of emit_ops method "emit_ge_got". */
2826
2827 static void
2828 aarch64_emit_ge_got (int *offset_p, int *size_p)
2829 {
2830 uint32_t buf[16];
2831 uint32_t *p = buf;
2832
2833 p += emit_pop (p, x1);
2834 p += emit_cmp (p, x1, register_operand (x0));
2835 /* Branch over the next instruction if x0 <= x1. */
2836 p += emit_bcond (p, LT, 8);
2837 /* The NOP instruction will be patched with an unconditional branch. */
2838 if (offset_p)
2839 *offset_p = (p - buf) * 4;
2840 if (size_p)
2841 *size_p = 4;
2842 p += emit_nop (p);
2843
2844 emit_ops_insns (buf, p - buf);
2845 }
2846
2847 static struct emit_ops aarch64_emit_ops_impl =
2848 {
2849 aarch64_emit_prologue,
2850 aarch64_emit_epilogue,
2851 aarch64_emit_add,
2852 aarch64_emit_sub,
2853 aarch64_emit_mul,
2854 aarch64_emit_lsh,
2855 aarch64_emit_rsh_signed,
2856 aarch64_emit_rsh_unsigned,
2857 aarch64_emit_ext,
2858 aarch64_emit_log_not,
2859 aarch64_emit_bit_and,
2860 aarch64_emit_bit_or,
2861 aarch64_emit_bit_xor,
2862 aarch64_emit_bit_not,
2863 aarch64_emit_equal,
2864 aarch64_emit_less_signed,
2865 aarch64_emit_less_unsigned,
2866 aarch64_emit_ref,
2867 aarch64_emit_if_goto,
2868 aarch64_emit_goto,
2869 aarch64_write_goto_address,
2870 aarch64_emit_const,
2871 aarch64_emit_call,
2872 aarch64_emit_reg,
2873 aarch64_emit_pop,
2874 aarch64_emit_stack_flush,
2875 aarch64_emit_zero_ext,
2876 aarch64_emit_swap,
2877 aarch64_emit_stack_adjust,
2878 aarch64_emit_int_call_1,
2879 aarch64_emit_void_call_2,
2880 aarch64_emit_eq_goto,
2881 aarch64_emit_ne_goto,
2882 aarch64_emit_lt_goto,
2883 aarch64_emit_le_goto,
2884 aarch64_emit_gt_goto,
2885 aarch64_emit_ge_got,
2886 };
2887
2888 /* Implementation of linux_target_ops method "emit_ops". */
2889
2890 static struct emit_ops *
2891 aarch64_emit_ops (void)
2892 {
2893 return &aarch64_emit_ops_impl;
2894 }
2895
2896 /* Implementation of linux_target_ops method
2897 "get_min_fast_tracepoint_insn_len". */
2898
2899 static int
2900 aarch64_get_min_fast_tracepoint_insn_len (void)
2901 {
2902 return 4;
2903 }
2904
2905 /* Implementation of linux_target_ops method "supports_range_stepping". */
2906
2907 static int
2908 aarch64_supports_range_stepping (void)
2909 {
2910 return 1;
2911 }
2912
2913 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2914
2915 static const gdb_byte *
2916 aarch64_sw_breakpoint_from_kind (int kind, int *size)
2917 {
2918 if (is_64bit_tdesc ())
2919 {
2920 *size = aarch64_breakpoint_len;
2921 return aarch64_breakpoint;
2922 }
2923 else
2924 return arm_sw_breakpoint_from_kind (kind, size);
2925 }
2926
2927 /* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
2928
2929 static int
2930 aarch64_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
2931 {
2932 if (is_64bit_tdesc ())
2933 return aarch64_breakpoint_len;
2934 else
2935 return arm_breakpoint_kind_from_pc (pcptr);
2936 }
2937
2938 /* Implementation of the linux_target_ops method
2939 "breakpoint_kind_from_current_state". */
2940
2941 static int
2942 aarch64_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
2943 {
2944 if (is_64bit_tdesc ())
2945 return aarch64_breakpoint_len;
2946 else
2947 return arm_breakpoint_kind_from_current_state (pcptr);
2948 }
2949
2950 /* Support for hardware single step. */
2951
2952 static int
2953 aarch64_supports_hardware_single_step (void)
2954 {
2955 return 1;
2956 }
2957
2958 struct linux_target_ops the_low_target =
2959 {
2960 aarch64_arch_setup,
2961 aarch64_regs_info,
2962 aarch64_cannot_fetch_register,
2963 aarch64_cannot_store_register,
2964 NULL, /* fetch_register */
2965 aarch64_get_pc,
2966 aarch64_set_pc,
2967 aarch64_breakpoint_kind_from_pc,
2968 aarch64_sw_breakpoint_from_kind,
2969 NULL, /* get_next_pcs */
2970 0, /* decr_pc_after_break */
2971 aarch64_breakpoint_at,
2972 aarch64_supports_z_point_type,
2973 aarch64_insert_point,
2974 aarch64_remove_point,
2975 aarch64_stopped_by_watchpoint,
2976 aarch64_stopped_data_address,
2977 NULL, /* collect_ptrace_register */
2978 NULL, /* supply_ptrace_register */
2979 aarch64_linux_siginfo_fixup,
2980 aarch64_linux_new_process,
2981 aarch64_linux_delete_process,
2982 aarch64_linux_new_thread,
2983 aarch64_linux_delete_thread,
2984 aarch64_linux_new_fork,
2985 aarch64_linux_prepare_to_resume,
2986 NULL, /* process_qsupported */
2987 aarch64_supports_tracepoints,
2988 aarch64_get_thread_area,
2989 aarch64_install_fast_tracepoint_jump_pad,
2990 aarch64_emit_ops,
2991 aarch64_get_min_fast_tracepoint_insn_len,
2992 aarch64_supports_range_stepping,
2993 aarch64_breakpoint_kind_from_current_state,
2994 aarch64_supports_hardware_single_step,
2995 aarch64_get_syscall_trapinfo,
2996 };
2997
2998 void
2999 initialize_low_arch (void)
3000 {
3001 init_registers_aarch64 ();
3002
3003 initialize_low_arch_aarch32 ();
3004
3005 initialize_regsets_info (&aarch64_regsets_info);
3006 }
This page took 0.097548 seconds and 4 git commands to generate.