Remove usage of find_inferior in linux_stabilize_threads
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-aarch64-low.c
1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
4 Copyright (C) 2009-2017 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "server.h"
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
29 #include "ax.h"
30 #include "tracepoint.h"
31
32 #include <signal.h>
33 #include <sys/user.h>
34 #include "nat/gdb_ptrace.h"
35 #include <asm/ptrace.h>
36 #include <inttypes.h>
37 #include <endian.h>
38 #include <sys/uio.h>
39
40 #include "gdb_proc_service.h"
41 #include "arch/aarch64.h"
42
43 /* Defined in auto-generated files. */
44 void init_registers_aarch64 (void);
45 extern const struct target_desc *tdesc_aarch64;
46
47 #ifdef HAVE_SYS_REG_H
48 #include <sys/reg.h>
49 #endif
50
51 /* Per-process arch-specific data we want to keep. */
52
53 struct arch_process_info
54 {
55 /* Hardware breakpoint/watchpoint data.
56 The reason for them to be per-process rather than per-thread is
57 due to the lack of information in the gdbserver environment;
58 gdbserver is not told that whether a requested hardware
59 breakpoint/watchpoint is thread specific or not, so it has to set
60 each hw bp/wp for every thread in the current process. The
61 higher level bp/wp management in gdb will resume a thread if a hw
62 bp/wp trap is not expected for it. Since the hw bp/wp setting is
63 same for each thread, it is reasonable for the data to live here.
64 */
65 struct aarch64_debug_reg_state debug_reg_state;
66 };
67
68 /* Return true if the size of register 0 is 8 byte. */
69
70 static int
71 is_64bit_tdesc (void)
72 {
73 struct regcache *regcache = get_thread_regcache (current_thread, 0);
74
75 return register_size (regcache->tdesc, 0) == 8;
76 }
77
78 /* Implementation of linux_target_ops method "cannot_store_register". */
79
80 static int
81 aarch64_cannot_store_register (int regno)
82 {
83 return regno >= AARCH64_NUM_REGS;
84 }
85
86 /* Implementation of linux_target_ops method "cannot_fetch_register". */
87
88 static int
89 aarch64_cannot_fetch_register (int regno)
90 {
91 return regno >= AARCH64_NUM_REGS;
92 }
93
94 static void
95 aarch64_fill_gregset (struct regcache *regcache, void *buf)
96 {
97 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
98 int i;
99
100 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
101 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
102 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
103 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
104 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
105 }
106
107 static void
108 aarch64_store_gregset (struct regcache *regcache, const void *buf)
109 {
110 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
111 int i;
112
113 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
114 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
115 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
116 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
117 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
118 }
119
120 static void
121 aarch64_fill_fpregset (struct regcache *regcache, void *buf)
122 {
123 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
124 int i;
125
126 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
127 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
128 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
129 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
130 }
131
132 static void
133 aarch64_store_fpregset (struct regcache *regcache, const void *buf)
134 {
135 const struct user_fpsimd_state *regset
136 = (const struct user_fpsimd_state *) buf;
137 int i;
138
139 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
140 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
141 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
142 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
143 }
144
145 /* Enable miscellaneous debugging output. The name is historical - it
146 was originally used to debug LinuxThreads support. */
147 extern int debug_threads;
148
149 /* Implementation of linux_target_ops method "get_pc". */
150
151 static CORE_ADDR
152 aarch64_get_pc (struct regcache *regcache)
153 {
154 if (register_size (regcache->tdesc, 0) == 8)
155 return linux_get_pc_64bit (regcache);
156 else
157 return linux_get_pc_32bit (regcache);
158 }
159
160 /* Implementation of linux_target_ops method "set_pc". */
161
162 static void
163 aarch64_set_pc (struct regcache *regcache, CORE_ADDR pc)
164 {
165 if (register_size (regcache->tdesc, 0) == 8)
166 linux_set_pc_64bit (regcache, pc);
167 else
168 linux_set_pc_32bit (regcache, pc);
169 }
170
171 #define aarch64_breakpoint_len 4
172
173 /* AArch64 BRK software debug mode instruction.
174 This instruction needs to match gdb/aarch64-tdep.c
175 (aarch64_default_breakpoint). */
176 static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
177
178 /* Implementation of linux_target_ops method "breakpoint_at". */
179
180 static int
181 aarch64_breakpoint_at (CORE_ADDR where)
182 {
183 if (is_64bit_tdesc ())
184 {
185 gdb_byte insn[aarch64_breakpoint_len];
186
187 (*the_target->read_memory) (where, (unsigned char *) &insn,
188 aarch64_breakpoint_len);
189 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
190 return 1;
191
192 return 0;
193 }
194 else
195 return arm_breakpoint_at (where);
196 }
197
198 static void
199 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
200 {
201 int i;
202
203 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
204 {
205 state->dr_addr_bp[i] = 0;
206 state->dr_ctrl_bp[i] = 0;
207 state->dr_ref_count_bp[i] = 0;
208 }
209
210 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
211 {
212 state->dr_addr_wp[i] = 0;
213 state->dr_ctrl_wp[i] = 0;
214 state->dr_ref_count_wp[i] = 0;
215 }
216 }
217
218 /* Return the pointer to the debug register state structure in the
219 current process' arch-specific data area. */
220
221 struct aarch64_debug_reg_state *
222 aarch64_get_debug_reg_state (pid_t pid)
223 {
224 struct process_info *proc = find_process_pid (pid);
225
226 return &proc->priv->arch_private->debug_reg_state;
227 }
228
229 /* Implementation of linux_target_ops method "supports_z_point_type". */
230
231 static int
232 aarch64_supports_z_point_type (char z_type)
233 {
234 switch (z_type)
235 {
236 case Z_PACKET_SW_BP:
237 case Z_PACKET_HW_BP:
238 case Z_PACKET_WRITE_WP:
239 case Z_PACKET_READ_WP:
240 case Z_PACKET_ACCESS_WP:
241 return 1;
242 default:
243 return 0;
244 }
245 }
246
247 /* Implementation of linux_target_ops method "insert_point".
248
249 It actually only records the info of the to-be-inserted bp/wp;
250 the actual insertion will happen when threads are resumed. */
251
252 static int
253 aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
254 int len, struct raw_breakpoint *bp)
255 {
256 int ret;
257 enum target_hw_bp_type targ_type;
258 struct aarch64_debug_reg_state *state
259 = aarch64_get_debug_reg_state (pid_of (current_thread));
260
261 if (show_debug_regs)
262 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
263 (unsigned long) addr, len);
264
265 /* Determine the type from the raw breakpoint type. */
266 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
267
268 if (targ_type != hw_execute)
269 {
270 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
271 ret = aarch64_handle_watchpoint (targ_type, addr, len,
272 1 /* is_insert */, state);
273 else
274 ret = -1;
275 }
276 else
277 {
278 if (len == 3)
279 {
280 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
281 instruction. Set it to 2 to correctly encode length bit
282 mask in hardware/watchpoint control register. */
283 len = 2;
284 }
285 ret = aarch64_handle_breakpoint (targ_type, addr, len,
286 1 /* is_insert */, state);
287 }
288
289 if (show_debug_regs)
290 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
291 targ_type);
292
293 return ret;
294 }
295
296 /* Implementation of linux_target_ops method "remove_point".
297
298 It actually only records the info of the to-be-removed bp/wp,
299 the actual removal will be done when threads are resumed. */
300
301 static int
302 aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
303 int len, struct raw_breakpoint *bp)
304 {
305 int ret;
306 enum target_hw_bp_type targ_type;
307 struct aarch64_debug_reg_state *state
308 = aarch64_get_debug_reg_state (pid_of (current_thread));
309
310 if (show_debug_regs)
311 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
312 (unsigned long) addr, len);
313
314 /* Determine the type from the raw breakpoint type. */
315 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
316
317 /* Set up state pointers. */
318 if (targ_type != hw_execute)
319 ret =
320 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
321 state);
322 else
323 {
324 if (len == 3)
325 {
326 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
327 instruction. Set it to 2 to correctly encode length bit
328 mask in hardware/watchpoint control register. */
329 len = 2;
330 }
331 ret = aarch64_handle_breakpoint (targ_type, addr, len,
332 0 /* is_insert */, state);
333 }
334
335 if (show_debug_regs)
336 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
337 targ_type);
338
339 return ret;
340 }
341
342 /* Implementation of linux_target_ops method "stopped_data_address". */
343
344 static CORE_ADDR
345 aarch64_stopped_data_address (void)
346 {
347 siginfo_t siginfo;
348 int pid, i;
349 struct aarch64_debug_reg_state *state;
350
351 pid = lwpid_of (current_thread);
352
353 /* Get the siginfo. */
354 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
355 return (CORE_ADDR) 0;
356
357 /* Need to be a hardware breakpoint/watchpoint trap. */
358 if (siginfo.si_signo != SIGTRAP
359 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
360 return (CORE_ADDR) 0;
361
362 /* Check if the address matches any watched address. */
363 state = aarch64_get_debug_reg_state (pid_of (current_thread));
364 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
365 {
366 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
367 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
368 const CORE_ADDR addr_watch = state->dr_addr_wp[i];
369 if (state->dr_ref_count_wp[i]
370 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
371 && addr_trap >= addr_watch
372 && addr_trap < addr_watch + len)
373 return addr_trap;
374 }
375
376 return (CORE_ADDR) 0;
377 }
378
379 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
380
381 static int
382 aarch64_stopped_by_watchpoint (void)
383 {
384 if (aarch64_stopped_data_address () != 0)
385 return 1;
386 else
387 return 0;
388 }
389
390 /* Fetch the thread-local storage pointer for libthread_db. */
391
392 ps_err_e
393 ps_get_thread_area (struct ps_prochandle *ph,
394 lwpid_t lwpid, int idx, void **base)
395 {
396 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
397 is_64bit_tdesc ());
398 }
399
400 /* Implementation of linux_target_ops method "siginfo_fixup". */
401
402 static int
403 aarch64_linux_siginfo_fixup (siginfo_t *native, gdb_byte *inf, int direction)
404 {
405 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
406 if (!is_64bit_tdesc ())
407 {
408 if (direction == 0)
409 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
410 native);
411 else
412 aarch64_siginfo_from_compat_siginfo (native,
413 (struct compat_siginfo *) inf);
414
415 return 1;
416 }
417
418 return 0;
419 }
420
421 /* Implementation of linux_target_ops method "new_process". */
422
423 static struct arch_process_info *
424 aarch64_linux_new_process (void)
425 {
426 struct arch_process_info *info = XCNEW (struct arch_process_info);
427
428 aarch64_init_debug_reg_state (&info->debug_reg_state);
429
430 return info;
431 }
432
433 /* Implementation of linux_target_ops method "delete_process". */
434
435 static void
436 aarch64_linux_delete_process (struct arch_process_info *info)
437 {
438 xfree (info);
439 }
440
441 /* Implementation of linux_target_ops method "linux_new_fork". */
442
443 static void
444 aarch64_linux_new_fork (struct process_info *parent,
445 struct process_info *child)
446 {
447 /* These are allocated by linux_add_process. */
448 gdb_assert (parent->priv != NULL
449 && parent->priv->arch_private != NULL);
450 gdb_assert (child->priv != NULL
451 && child->priv->arch_private != NULL);
452
453 /* Linux kernel before 2.6.33 commit
454 72f674d203cd230426437cdcf7dd6f681dad8b0d
455 will inherit hardware debug registers from parent
456 on fork/vfork/clone. Newer Linux kernels create such tasks with
457 zeroed debug registers.
458
459 GDB core assumes the child inherits the watchpoints/hw
460 breakpoints of the parent, and will remove them all from the
461 forked off process. Copy the debug registers mirrors into the
462 new process so that all breakpoints and watchpoints can be
463 removed together. The debug registers mirror will become zeroed
464 in the end before detaching the forked off process, thus making
465 this compatible with older Linux kernels too. */
466
467 *child->priv->arch_private = *parent->priv->arch_private;
468 }
469
470 /* Return the right target description according to the ELF file of
471 current thread. */
472
473 static const struct target_desc *
474 aarch64_linux_read_description (void)
475 {
476 unsigned int machine;
477 int is_elf64;
478 int tid;
479
480 tid = lwpid_of (current_thread);
481
482 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
483
484 if (is_elf64)
485 return tdesc_aarch64;
486 else
487 return tdesc_arm_with_neon;
488 }
489
490 /* Implementation of linux_target_ops method "arch_setup". */
491
492 static void
493 aarch64_arch_setup (void)
494 {
495 current_process ()->tdesc = aarch64_linux_read_description ();
496
497 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
498 }
499
500 static struct regset_info aarch64_regsets[] =
501 {
502 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
503 sizeof (struct user_pt_regs), GENERAL_REGS,
504 aarch64_fill_gregset, aarch64_store_gregset },
505 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
506 sizeof (struct user_fpsimd_state), FP_REGS,
507 aarch64_fill_fpregset, aarch64_store_fpregset
508 },
509 NULL_REGSET
510 };
511
512 static struct regsets_info aarch64_regsets_info =
513 {
514 aarch64_regsets, /* regsets */
515 0, /* num_regsets */
516 NULL, /* disabled_regsets */
517 };
518
519 static struct regs_info regs_info_aarch64 =
520 {
521 NULL, /* regset_bitmap */
522 NULL, /* usrregs */
523 &aarch64_regsets_info,
524 };
525
526 /* Implementation of linux_target_ops method "regs_info". */
527
528 static const struct regs_info *
529 aarch64_regs_info (void)
530 {
531 if (is_64bit_tdesc ())
532 return &regs_info_aarch64;
533 else
534 return &regs_info_aarch32;
535 }
536
537 /* Implementation of linux_target_ops method "supports_tracepoints". */
538
539 static int
540 aarch64_supports_tracepoints (void)
541 {
542 if (current_thread == NULL)
543 return 1;
544 else
545 {
546 /* We don't support tracepoints on aarch32 now. */
547 return is_64bit_tdesc ();
548 }
549 }
550
551 /* Implementation of linux_target_ops method "get_thread_area". */
552
553 static int
554 aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
555 {
556 struct iovec iovec;
557 uint64_t reg;
558
559 iovec.iov_base = &reg;
560 iovec.iov_len = sizeof (reg);
561
562 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
563 return -1;
564
565 *addrp = reg;
566
567 return 0;
568 }
569
570 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
571
572 static void
573 aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
574 {
575 int use_64bit = register_size (regcache->tdesc, 0) == 8;
576
577 if (use_64bit)
578 {
579 long l_sysno;
580
581 collect_register_by_name (regcache, "x8", &l_sysno);
582 *sysno = (int) l_sysno;
583 }
584 else
585 collect_register_by_name (regcache, "r7", sysno);
586 }
587
588 /* List of condition codes that we need. */
589
590 enum aarch64_condition_codes
591 {
592 EQ = 0x0,
593 NE = 0x1,
594 LO = 0x3,
595 GE = 0xa,
596 LT = 0xb,
597 GT = 0xc,
598 LE = 0xd,
599 };
600
601 enum aarch64_operand_type
602 {
603 OPERAND_IMMEDIATE,
604 OPERAND_REGISTER,
605 };
606
607 /* Representation of an operand. At this time, it only supports register
608 and immediate types. */
609
610 struct aarch64_operand
611 {
612 /* Type of the operand. */
613 enum aarch64_operand_type type;
614
615 /* Value of the operand according to the type. */
616 union
617 {
618 uint32_t imm;
619 struct aarch64_register reg;
620 };
621 };
622
623 /* List of registers that we are currently using, we can add more here as
624 we need to use them. */
625
626 /* General purpose scratch registers (64 bit). */
627 static const struct aarch64_register x0 = { 0, 1 };
628 static const struct aarch64_register x1 = { 1, 1 };
629 static const struct aarch64_register x2 = { 2, 1 };
630 static const struct aarch64_register x3 = { 3, 1 };
631 static const struct aarch64_register x4 = { 4, 1 };
632
633 /* General purpose scratch registers (32 bit). */
634 static const struct aarch64_register w0 = { 0, 0 };
635 static const struct aarch64_register w2 = { 2, 0 };
636
637 /* Intra-procedure scratch registers. */
638 static const struct aarch64_register ip0 = { 16, 1 };
639
640 /* Special purpose registers. */
641 static const struct aarch64_register fp = { 29, 1 };
642 static const struct aarch64_register lr = { 30, 1 };
643 static const struct aarch64_register sp = { 31, 1 };
644 static const struct aarch64_register xzr = { 31, 1 };
645
646 /* Dynamically allocate a new register. If we know the register
647 statically, we should make it a global as above instead of using this
648 helper function. */
649
650 static struct aarch64_register
651 aarch64_register (unsigned num, int is64)
652 {
653 return (struct aarch64_register) { num, is64 };
654 }
655
656 /* Helper function to create a register operand, for instructions with
657 different types of operands.
658
659 For example:
660 p += emit_mov (p, x0, register_operand (x1)); */
661
662 static struct aarch64_operand
663 register_operand (struct aarch64_register reg)
664 {
665 struct aarch64_operand operand;
666
667 operand.type = OPERAND_REGISTER;
668 operand.reg = reg;
669
670 return operand;
671 }
672
673 /* Helper function to create an immediate operand, for instructions with
674 different types of operands.
675
676 For example:
677 p += emit_mov (p, x0, immediate_operand (12)); */
678
679 static struct aarch64_operand
680 immediate_operand (uint32_t imm)
681 {
682 struct aarch64_operand operand;
683
684 operand.type = OPERAND_IMMEDIATE;
685 operand.imm = imm;
686
687 return operand;
688 }
689
690 /* Helper function to create an offset memory operand.
691
692 For example:
693 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
694
695 static struct aarch64_memory_operand
696 offset_memory_operand (int32_t offset)
697 {
698 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
699 }
700
701 /* Helper function to create a pre-index memory operand.
702
703 For example:
704 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
705
706 static struct aarch64_memory_operand
707 preindex_memory_operand (int32_t index)
708 {
709 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
710 }
711
712 /* Helper function to create a post-index memory operand.
713
714 For example:
715 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
716
717 static struct aarch64_memory_operand
718 postindex_memory_operand (int32_t index)
719 {
720 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
721 }
722
723 /* System control registers. These special registers can be written and
724 read with the MRS and MSR instructions.
725
726 - NZCV: Condition flags. GDB refers to this register under the CPSR
727 name.
728 - FPSR: Floating-point status register.
729 - FPCR: Floating-point control registers.
730 - TPIDR_EL0: Software thread ID register. */
731
732 enum aarch64_system_control_registers
733 {
734 /* op0 op1 crn crm op2 */
735 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
736 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
737 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
738 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
739 };
740
741 /* Write a BLR instruction into *BUF.
742
743 BLR rn
744
745 RN is the register to branch to. */
746
747 static int
748 emit_blr (uint32_t *buf, struct aarch64_register rn)
749 {
750 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
751 }
752
753 /* Write a RET instruction into *BUF.
754
755 RET xn
756
757 RN is the register to branch to. */
758
759 static int
760 emit_ret (uint32_t *buf, struct aarch64_register rn)
761 {
762 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
763 }
764
765 static int
766 emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
767 struct aarch64_register rt,
768 struct aarch64_register rt2,
769 struct aarch64_register rn,
770 struct aarch64_memory_operand operand)
771 {
772 uint32_t opc;
773 uint32_t pre_index;
774 uint32_t write_back;
775
776 if (rt.is64)
777 opc = ENCODE (2, 2, 30);
778 else
779 opc = ENCODE (0, 2, 30);
780
781 switch (operand.type)
782 {
783 case MEMORY_OPERAND_OFFSET:
784 {
785 pre_index = ENCODE (1, 1, 24);
786 write_back = ENCODE (0, 1, 23);
787 break;
788 }
789 case MEMORY_OPERAND_POSTINDEX:
790 {
791 pre_index = ENCODE (0, 1, 24);
792 write_back = ENCODE (1, 1, 23);
793 break;
794 }
795 case MEMORY_OPERAND_PREINDEX:
796 {
797 pre_index = ENCODE (1, 1, 24);
798 write_back = ENCODE (1, 1, 23);
799 break;
800 }
801 default:
802 return 0;
803 }
804
805 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
806 | ENCODE (operand.index >> 3, 7, 15)
807 | ENCODE (rt2.num, 5, 10)
808 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
809 }
810
811 /* Write a STP instruction into *BUF.
812
813 STP rt, rt2, [rn, #offset]
814 STP rt, rt2, [rn, #index]!
815 STP rt, rt2, [rn], #index
816
817 RT and RT2 are the registers to store.
818 RN is the base address register.
819 OFFSET is the immediate to add to the base address. It is limited to a
820 -512 .. 504 range (7 bits << 3). */
821
822 static int
823 emit_stp (uint32_t *buf, struct aarch64_register rt,
824 struct aarch64_register rt2, struct aarch64_register rn,
825 struct aarch64_memory_operand operand)
826 {
827 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
828 }
829
830 /* Write a LDP instruction into *BUF.
831
832 LDP rt, rt2, [rn, #offset]
833 LDP rt, rt2, [rn, #index]!
834 LDP rt, rt2, [rn], #index
835
836 RT and RT2 are the registers to store.
837 RN is the base address register.
838 OFFSET is the immediate to add to the base address. It is limited to a
839 -512 .. 504 range (7 bits << 3). */
840
841 static int
842 emit_ldp (uint32_t *buf, struct aarch64_register rt,
843 struct aarch64_register rt2, struct aarch64_register rn,
844 struct aarch64_memory_operand operand)
845 {
846 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
847 }
848
849 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
850
851 LDP qt, qt2, [rn, #offset]
852
853 RT and RT2 are the Q registers to store.
854 RN is the base address register.
855 OFFSET is the immediate to add to the base address. It is limited to
856 -1024 .. 1008 range (7 bits << 4). */
857
858 static int
859 emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
860 struct aarch64_register rn, int32_t offset)
861 {
862 uint32_t opc = ENCODE (2, 2, 30);
863 uint32_t pre_index = ENCODE (1, 1, 24);
864
865 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
866 | ENCODE (offset >> 4, 7, 15)
867 | ENCODE (rt2, 5, 10)
868 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
869 }
870
871 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
872
873 STP qt, qt2, [rn, #offset]
874
875 RT and RT2 are the Q registers to store.
876 RN is the base address register.
877 OFFSET is the immediate to add to the base address. It is limited to
878 -1024 .. 1008 range (7 bits << 4). */
879
880 static int
881 emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
882 struct aarch64_register rn, int32_t offset)
883 {
884 uint32_t opc = ENCODE (2, 2, 30);
885 uint32_t pre_index = ENCODE (1, 1, 24);
886
887 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
888 | ENCODE (offset >> 4, 7, 15)
889 | ENCODE (rt2, 5, 10)
890 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
891 }
892
893 /* Write a LDRH instruction into *BUF.
894
895 LDRH wt, [xn, #offset]
896 LDRH wt, [xn, #index]!
897 LDRH wt, [xn], #index
898
899 RT is the register to store.
900 RN is the base address register.
901 OFFSET is the immediate to add to the base address. It is limited to
902 0 .. 32760 range (12 bits << 3). */
903
904 static int
905 emit_ldrh (uint32_t *buf, struct aarch64_register rt,
906 struct aarch64_register rn,
907 struct aarch64_memory_operand operand)
908 {
909 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
910 }
911
912 /* Write a LDRB instruction into *BUF.
913
914 LDRB wt, [xn, #offset]
915 LDRB wt, [xn, #index]!
916 LDRB wt, [xn], #index
917
918 RT is the register to store.
919 RN is the base address register.
920 OFFSET is the immediate to add to the base address. It is limited to
921 0 .. 32760 range (12 bits << 3). */
922
923 static int
924 emit_ldrb (uint32_t *buf, struct aarch64_register rt,
925 struct aarch64_register rn,
926 struct aarch64_memory_operand operand)
927 {
928 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
929 }
930
931
932
933 /* Write a STR instruction into *BUF.
934
935 STR rt, [rn, #offset]
936 STR rt, [rn, #index]!
937 STR rt, [rn], #index
938
939 RT is the register to store.
940 RN is the base address register.
941 OFFSET is the immediate to add to the base address. It is limited to
942 0 .. 32760 range (12 bits << 3). */
943
944 static int
945 emit_str (uint32_t *buf, struct aarch64_register rt,
946 struct aarch64_register rn,
947 struct aarch64_memory_operand operand)
948 {
949 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
950 }
951
952 /* Helper function emitting an exclusive load or store instruction. */
953
954 static int
955 emit_load_store_exclusive (uint32_t *buf, uint32_t size,
956 enum aarch64_opcodes opcode,
957 struct aarch64_register rs,
958 struct aarch64_register rt,
959 struct aarch64_register rt2,
960 struct aarch64_register rn)
961 {
962 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
963 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
964 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
965 }
966
967 /* Write a LAXR instruction into *BUF.
968
969 LDAXR rt, [xn]
970
971 RT is the destination register.
972 RN is the base address register. */
973
974 static int
975 emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
976 struct aarch64_register rn)
977 {
978 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
979 xzr, rn);
980 }
981
982 /* Write a STXR instruction into *BUF.
983
984 STXR ws, rt, [xn]
985
986 RS is the result register, it indicates if the store succeeded or not.
987 RT is the destination register.
988 RN is the base address register. */
989
990 static int
991 emit_stxr (uint32_t *buf, struct aarch64_register rs,
992 struct aarch64_register rt, struct aarch64_register rn)
993 {
994 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
995 xzr, rn);
996 }
997
998 /* Write a STLR instruction into *BUF.
999
1000 STLR rt, [xn]
1001
1002 RT is the register to store.
1003 RN is the base address register. */
1004
1005 static int
1006 emit_stlr (uint32_t *buf, struct aarch64_register rt,
1007 struct aarch64_register rn)
1008 {
1009 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1010 xzr, rn);
1011 }
1012
1013 /* Helper function for data processing instructions with register sources. */
1014
1015 static int
1016 emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
1017 struct aarch64_register rd,
1018 struct aarch64_register rn,
1019 struct aarch64_register rm)
1020 {
1021 uint32_t size = ENCODE (rd.is64, 1, 31);
1022
1023 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1024 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
1025 }
1026
1027 /* Helper function for data processing instructions taking either a register
1028 or an immediate. */
1029
1030 static int
1031 emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1032 struct aarch64_register rd,
1033 struct aarch64_register rn,
1034 struct aarch64_operand operand)
1035 {
1036 uint32_t size = ENCODE (rd.is64, 1, 31);
1037 /* The opcode is different for register and immediate source operands. */
1038 uint32_t operand_opcode;
1039
1040 if (operand.type == OPERAND_IMMEDIATE)
1041 {
1042 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1043 operand_opcode = ENCODE (8, 4, 25);
1044
1045 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1046 | ENCODE (operand.imm, 12, 10)
1047 | ENCODE (rn.num, 5, 5)
1048 | ENCODE (rd.num, 5, 0));
1049 }
1050 else
1051 {
1052 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1053 operand_opcode = ENCODE (5, 4, 25);
1054
1055 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1056 rn, operand.reg);
1057 }
1058 }
1059
1060 /* Write an ADD instruction into *BUF.
1061
1062 ADD rd, rn, #imm
1063 ADD rd, rn, rm
1064
1065 This function handles both an immediate and register add.
1066
1067 RD is the destination register.
1068 RN is the input register.
1069 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1070 OPERAND_REGISTER. */
1071
1072 static int
1073 emit_add (uint32_t *buf, struct aarch64_register rd,
1074 struct aarch64_register rn, struct aarch64_operand operand)
1075 {
1076 return emit_data_processing (buf, ADD, rd, rn, operand);
1077 }
1078
1079 /* Write a SUB instruction into *BUF.
1080
1081 SUB rd, rn, #imm
1082 SUB rd, rn, rm
1083
1084 This function handles both an immediate and register sub.
1085
1086 RD is the destination register.
1087 RN is the input register.
1088 IMM is the immediate to substract to RN. */
1089
1090 static int
1091 emit_sub (uint32_t *buf, struct aarch64_register rd,
1092 struct aarch64_register rn, struct aarch64_operand operand)
1093 {
1094 return emit_data_processing (buf, SUB, rd, rn, operand);
1095 }
1096
1097 /* Write a MOV instruction into *BUF.
1098
1099 MOV rd, #imm
1100 MOV rd, rm
1101
1102 This function handles both a wide immediate move and a register move,
1103 with the condition that the source register is not xzr. xzr and the
1104 stack pointer share the same encoding and this function only supports
1105 the stack pointer.
1106
1107 RD is the destination register.
1108 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1109 OPERAND_REGISTER. */
1110
1111 static int
1112 emit_mov (uint32_t *buf, struct aarch64_register rd,
1113 struct aarch64_operand operand)
1114 {
1115 if (operand.type == OPERAND_IMMEDIATE)
1116 {
1117 uint32_t size = ENCODE (rd.is64, 1, 31);
1118 /* Do not shift the immediate. */
1119 uint32_t shift = ENCODE (0, 2, 21);
1120
1121 return aarch64_emit_insn (buf, MOV | size | shift
1122 | ENCODE (operand.imm, 16, 5)
1123 | ENCODE (rd.num, 5, 0));
1124 }
1125 else
1126 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1127 }
1128
1129 /* Write a MOVK instruction into *BUF.
1130
1131 MOVK rd, #imm, lsl #shift
1132
1133 RD is the destination register.
1134 IMM is the immediate.
1135 SHIFT is the logical shift left to apply to IMM. */
1136
1137 static int
1138 emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1139 unsigned shift)
1140 {
1141 uint32_t size = ENCODE (rd.is64, 1, 31);
1142
1143 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1144 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
1145 }
1146
1147 /* Write instructions into *BUF in order to move ADDR into a register.
1148 ADDR can be a 64-bit value.
1149
1150 This function will emit a series of MOV and MOVK instructions, such as:
1151
1152 MOV xd, #(addr)
1153 MOVK xd, #(addr >> 16), lsl #16
1154 MOVK xd, #(addr >> 32), lsl #32
1155 MOVK xd, #(addr >> 48), lsl #48 */
1156
1157 static int
1158 emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1159 {
1160 uint32_t *p = buf;
1161
1162 /* The MOV (wide immediate) instruction clears to top bits of the
1163 register. */
1164 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1165
1166 if ((addr >> 16) != 0)
1167 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1168 else
1169 return p - buf;
1170
1171 if ((addr >> 32) != 0)
1172 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1173 else
1174 return p - buf;
1175
1176 if ((addr >> 48) != 0)
1177 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1178
1179 return p - buf;
1180 }
1181
1182 /* Write a SUBS instruction into *BUF.
1183
1184 SUBS rd, rn, rm
1185
1186 This instruction update the condition flags.
1187
1188 RD is the destination register.
1189 RN and RM are the source registers. */
1190
1191 static int
1192 emit_subs (uint32_t *buf, struct aarch64_register rd,
1193 struct aarch64_register rn, struct aarch64_operand operand)
1194 {
1195 return emit_data_processing (buf, SUBS, rd, rn, operand);
1196 }
1197
1198 /* Write a CMP instruction into *BUF.
1199
1200 CMP rn, rm
1201
1202 This instruction is an alias of SUBS xzr, rn, rm.
1203
1204 RN and RM are the registers to compare. */
1205
1206 static int
1207 emit_cmp (uint32_t *buf, struct aarch64_register rn,
1208 struct aarch64_operand operand)
1209 {
1210 return emit_subs (buf, xzr, rn, operand);
1211 }
1212
1213 /* Write a AND instruction into *BUF.
1214
1215 AND rd, rn, rm
1216
1217 RD is the destination register.
1218 RN and RM are the source registers. */
1219
1220 static int
1221 emit_and (uint32_t *buf, struct aarch64_register rd,
1222 struct aarch64_register rn, struct aarch64_register rm)
1223 {
1224 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1225 }
1226
1227 /* Write a ORR instruction into *BUF.
1228
1229 ORR rd, rn, rm
1230
1231 RD is the destination register.
1232 RN and RM are the source registers. */
1233
1234 static int
1235 emit_orr (uint32_t *buf, struct aarch64_register rd,
1236 struct aarch64_register rn, struct aarch64_register rm)
1237 {
1238 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1239 }
1240
1241 /* Write a ORN instruction into *BUF.
1242
1243 ORN rd, rn, rm
1244
1245 RD is the destination register.
1246 RN and RM are the source registers. */
1247
1248 static int
1249 emit_orn (uint32_t *buf, struct aarch64_register rd,
1250 struct aarch64_register rn, struct aarch64_register rm)
1251 {
1252 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1253 }
1254
1255 /* Write a EOR instruction into *BUF.
1256
1257 EOR rd, rn, rm
1258
1259 RD is the destination register.
1260 RN and RM are the source registers. */
1261
1262 static int
1263 emit_eor (uint32_t *buf, struct aarch64_register rd,
1264 struct aarch64_register rn, struct aarch64_register rm)
1265 {
1266 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1267 }
1268
1269 /* Write a MVN instruction into *BUF.
1270
1271 MVN rd, rm
1272
1273 This is an alias for ORN rd, xzr, rm.
1274
1275 RD is the destination register.
1276 RM is the source register. */
1277
1278 static int
1279 emit_mvn (uint32_t *buf, struct aarch64_register rd,
1280 struct aarch64_register rm)
1281 {
1282 return emit_orn (buf, rd, xzr, rm);
1283 }
1284
1285 /* Write a LSLV instruction into *BUF.
1286
1287 LSLV rd, rn, rm
1288
1289 RD is the destination register.
1290 RN and RM are the source registers. */
1291
1292 static int
1293 emit_lslv (uint32_t *buf, struct aarch64_register rd,
1294 struct aarch64_register rn, struct aarch64_register rm)
1295 {
1296 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1297 }
1298
1299 /* Write a LSRV instruction into *BUF.
1300
1301 LSRV rd, rn, rm
1302
1303 RD is the destination register.
1304 RN and RM are the source registers. */
1305
1306 static int
1307 emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1308 struct aarch64_register rn, struct aarch64_register rm)
1309 {
1310 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1311 }
1312
1313 /* Write a ASRV instruction into *BUF.
1314
1315 ASRV rd, rn, rm
1316
1317 RD is the destination register.
1318 RN and RM are the source registers. */
1319
1320 static int
1321 emit_asrv (uint32_t *buf, struct aarch64_register rd,
1322 struct aarch64_register rn, struct aarch64_register rm)
1323 {
1324 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1325 }
1326
1327 /* Write a MUL instruction into *BUF.
1328
1329 MUL rd, rn, rm
1330
1331 RD is the destination register.
1332 RN and RM are the source registers. */
1333
1334 static int
1335 emit_mul (uint32_t *buf, struct aarch64_register rd,
1336 struct aarch64_register rn, struct aarch64_register rm)
1337 {
1338 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1339 }
1340
1341 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1342
1343 MRS xt, system_reg
1344
1345 RT is the destination register.
1346 SYSTEM_REG is special purpose register to read. */
1347
1348 static int
1349 emit_mrs (uint32_t *buf, struct aarch64_register rt,
1350 enum aarch64_system_control_registers system_reg)
1351 {
1352 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1353 | ENCODE (rt.num, 5, 0));
1354 }
1355
1356 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1357
1358 MSR system_reg, xt
1359
1360 SYSTEM_REG is special purpose register to write.
1361 RT is the input register. */
1362
1363 static int
1364 emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1365 struct aarch64_register rt)
1366 {
1367 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1368 | ENCODE (rt.num, 5, 0));
1369 }
1370
1371 /* Write a SEVL instruction into *BUF.
1372
1373 This is a hint instruction telling the hardware to trigger an event. */
1374
1375 static int
1376 emit_sevl (uint32_t *buf)
1377 {
1378 return aarch64_emit_insn (buf, SEVL);
1379 }
1380
1381 /* Write a WFE instruction into *BUF.
1382
1383 This is a hint instruction telling the hardware to wait for an event. */
1384
1385 static int
1386 emit_wfe (uint32_t *buf)
1387 {
1388 return aarch64_emit_insn (buf, WFE);
1389 }
1390
1391 /* Write a SBFM instruction into *BUF.
1392
1393 SBFM rd, rn, #immr, #imms
1394
1395 This instruction moves the bits from #immr to #imms into the
1396 destination, sign extending the result.
1397
1398 RD is the destination register.
1399 RN is the source register.
1400 IMMR is the bit number to start at (least significant bit).
1401 IMMS is the bit number to stop at (most significant bit). */
1402
1403 static int
1404 emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1405 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1406 {
1407 uint32_t size = ENCODE (rd.is64, 1, 31);
1408 uint32_t n = ENCODE (rd.is64, 1, 22);
1409
1410 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1411 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1412 | ENCODE (rd.num, 5, 0));
1413 }
1414
1415 /* Write a SBFX instruction into *BUF.
1416
1417 SBFX rd, rn, #lsb, #width
1418
1419 This instruction moves #width bits from #lsb into the destination, sign
1420 extending the result. This is an alias for:
1421
1422 SBFM rd, rn, #lsb, #(lsb + width - 1)
1423
1424 RD is the destination register.
1425 RN is the source register.
1426 LSB is the bit number to start at (least significant bit).
1427 WIDTH is the number of bits to move. */
1428
1429 static int
1430 emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1431 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1432 {
1433 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1434 }
1435
1436 /* Write a UBFM instruction into *BUF.
1437
1438 UBFM rd, rn, #immr, #imms
1439
1440 This instruction moves the bits from #immr to #imms into the
1441 destination, extending the result with zeros.
1442
1443 RD is the destination register.
1444 RN is the source register.
1445 IMMR is the bit number to start at (least significant bit).
1446 IMMS is the bit number to stop at (most significant bit). */
1447
1448 static int
1449 emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1450 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1451 {
1452 uint32_t size = ENCODE (rd.is64, 1, 31);
1453 uint32_t n = ENCODE (rd.is64, 1, 22);
1454
1455 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1456 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1457 | ENCODE (rd.num, 5, 0));
1458 }
1459
1460 /* Write a UBFX instruction into *BUF.
1461
1462 UBFX rd, rn, #lsb, #width
1463
1464 This instruction moves #width bits from #lsb into the destination,
1465 extending the result with zeros. This is an alias for:
1466
1467 UBFM rd, rn, #lsb, #(lsb + width - 1)
1468
1469 RD is the destination register.
1470 RN is the source register.
1471 LSB is the bit number to start at (least significant bit).
1472 WIDTH is the number of bits to move. */
1473
1474 static int
1475 emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1476 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1477 {
1478 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1479 }
1480
1481 /* Write a CSINC instruction into *BUF.
1482
1483 CSINC rd, rn, rm, cond
1484
1485 This instruction conditionally increments rn or rm and places the result
1486 in rd. rn is chosen is the condition is true.
1487
1488 RD is the destination register.
1489 RN and RM are the source registers.
1490 COND is the encoded condition. */
1491
1492 static int
1493 emit_csinc (uint32_t *buf, struct aarch64_register rd,
1494 struct aarch64_register rn, struct aarch64_register rm,
1495 unsigned cond)
1496 {
1497 uint32_t size = ENCODE (rd.is64, 1, 31);
1498
1499 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1500 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1501 | ENCODE (rd.num, 5, 0));
1502 }
1503
1504 /* Write a CSET instruction into *BUF.
1505
1506 CSET rd, cond
1507
1508 This instruction conditionally write 1 or 0 in the destination register.
1509 1 is written if the condition is true. This is an alias for:
1510
1511 CSINC rd, xzr, xzr, !cond
1512
1513 Note that the condition needs to be inverted.
1514
1515 RD is the destination register.
1516 RN and RM are the source registers.
1517 COND is the encoded condition. */
1518
1519 static int
1520 emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1521 {
1522 /* The least significant bit of the condition needs toggling in order to
1523 invert it. */
1524 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1525 }
1526
1527 /* Write LEN instructions from BUF into the inferior memory at *TO.
1528
1529 Note instructions are always little endian on AArch64, unlike data. */
1530
1531 static void
1532 append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1533 {
1534 size_t byte_len = len * sizeof (uint32_t);
1535 #if (__BYTE_ORDER == __BIG_ENDIAN)
1536 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
1537 size_t i;
1538
1539 for (i = 0; i < len; i++)
1540 le_buf[i] = htole32 (buf[i]);
1541
1542 write_inferior_memory (*to, (const unsigned char *) le_buf, byte_len);
1543
1544 xfree (le_buf);
1545 #else
1546 write_inferior_memory (*to, (const unsigned char *) buf, byte_len);
1547 #endif
1548
1549 *to += byte_len;
1550 }
1551
1552 /* Sub-class of struct aarch64_insn_data, store information of
1553 instruction relocation for fast tracepoint. Visitor can
1554 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1555 the relocated instructions in buffer pointed by INSN_PTR. */
1556
1557 struct aarch64_insn_relocation_data
1558 {
1559 struct aarch64_insn_data base;
1560
1561 /* The new address the instruction is relocated to. */
1562 CORE_ADDR new_addr;
1563 /* Pointer to the buffer of relocated instruction(s). */
1564 uint32_t *insn_ptr;
1565 };
1566
1567 /* Implementation of aarch64_insn_visitor method "b". */
1568
1569 static void
1570 aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1571 struct aarch64_insn_data *data)
1572 {
1573 struct aarch64_insn_relocation_data *insn_reloc
1574 = (struct aarch64_insn_relocation_data *) data;
1575 int64_t new_offset
1576 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1577
1578 if (can_encode_int32 (new_offset, 28))
1579 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1580 }
1581
1582 /* Implementation of aarch64_insn_visitor method "b_cond". */
1583
1584 static void
1585 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1586 struct aarch64_insn_data *data)
1587 {
1588 struct aarch64_insn_relocation_data *insn_reloc
1589 = (struct aarch64_insn_relocation_data *) data;
1590 int64_t new_offset
1591 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1592
1593 if (can_encode_int32 (new_offset, 21))
1594 {
1595 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1596 new_offset);
1597 }
1598 else if (can_encode_int32 (new_offset, 28))
1599 {
1600 /* The offset is out of range for a conditional branch
1601 instruction but not for a unconditional branch. We can use
1602 the following instructions instead:
1603
1604 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1605 B NOT_TAKEN ; Else jump over TAKEN and continue.
1606 TAKEN:
1607 B #(offset - 8)
1608 NOT_TAKEN:
1609
1610 */
1611
1612 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1613 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1614 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1615 }
1616 }
1617
1618 /* Implementation of aarch64_insn_visitor method "cb". */
1619
1620 static void
1621 aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1622 const unsigned rn, int is64,
1623 struct aarch64_insn_data *data)
1624 {
1625 struct aarch64_insn_relocation_data *insn_reloc
1626 = (struct aarch64_insn_relocation_data *) data;
1627 int64_t new_offset
1628 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1629
1630 if (can_encode_int32 (new_offset, 21))
1631 {
1632 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1633 aarch64_register (rn, is64), new_offset);
1634 }
1635 else if (can_encode_int32 (new_offset, 28))
1636 {
1637 /* The offset is out of range for a compare and branch
1638 instruction but not for a unconditional branch. We can use
1639 the following instructions instead:
1640
1641 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1642 B NOT_TAKEN ; Else jump over TAKEN and continue.
1643 TAKEN:
1644 B #(offset - 8)
1645 NOT_TAKEN:
1646
1647 */
1648 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1649 aarch64_register (rn, is64), 8);
1650 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1651 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1652 }
1653 }
1654
1655 /* Implementation of aarch64_insn_visitor method "tb". */
1656
1657 static void
1658 aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1659 const unsigned rt, unsigned bit,
1660 struct aarch64_insn_data *data)
1661 {
1662 struct aarch64_insn_relocation_data *insn_reloc
1663 = (struct aarch64_insn_relocation_data *) data;
1664 int64_t new_offset
1665 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1666
1667 if (can_encode_int32 (new_offset, 16))
1668 {
1669 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1670 aarch64_register (rt, 1), new_offset);
1671 }
1672 else if (can_encode_int32 (new_offset, 28))
1673 {
1674 /* The offset is out of range for a test bit and branch
1675 instruction but not for a unconditional branch. We can use
1676 the following instructions instead:
1677
1678 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1679 B NOT_TAKEN ; Else jump over TAKEN and continue.
1680 TAKEN:
1681 B #(offset - 8)
1682 NOT_TAKEN:
1683
1684 */
1685 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1686 aarch64_register (rt, 1), 8);
1687 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1688 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1689 new_offset - 8);
1690 }
1691 }
1692
1693 /* Implementation of aarch64_insn_visitor method "adr". */
1694
1695 static void
1696 aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1697 const int is_adrp,
1698 struct aarch64_insn_data *data)
1699 {
1700 struct aarch64_insn_relocation_data *insn_reloc
1701 = (struct aarch64_insn_relocation_data *) data;
1702 /* We know exactly the address the ADR{P,} instruction will compute.
1703 We can just write it to the destination register. */
1704 CORE_ADDR address = data->insn_addr + offset;
1705
1706 if (is_adrp)
1707 {
1708 /* Clear the lower 12 bits of the offset to get the 4K page. */
1709 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1710 aarch64_register (rd, 1),
1711 address & ~0xfff);
1712 }
1713 else
1714 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1715 aarch64_register (rd, 1), address);
1716 }
1717
1718 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1719
1720 static void
1721 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1722 const unsigned rt, const int is64,
1723 struct aarch64_insn_data *data)
1724 {
1725 struct aarch64_insn_relocation_data *insn_reloc
1726 = (struct aarch64_insn_relocation_data *) data;
1727 CORE_ADDR address = data->insn_addr + offset;
1728
1729 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1730 aarch64_register (rt, 1), address);
1731
1732 /* We know exactly what address to load from, and what register we
1733 can use:
1734
1735 MOV xd, #(oldloc + offset)
1736 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1737 ...
1738
1739 LDR xd, [xd] ; or LDRSW xd, [xd]
1740
1741 */
1742
1743 if (is_sw)
1744 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1745 aarch64_register (rt, 1),
1746 aarch64_register (rt, 1),
1747 offset_memory_operand (0));
1748 else
1749 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1750 aarch64_register (rt, is64),
1751 aarch64_register (rt, 1),
1752 offset_memory_operand (0));
1753 }
1754
1755 /* Implementation of aarch64_insn_visitor method "others". */
1756
1757 static void
1758 aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1759 struct aarch64_insn_data *data)
1760 {
1761 struct aarch64_insn_relocation_data *insn_reloc
1762 = (struct aarch64_insn_relocation_data *) data;
1763
1764 /* The instruction is not PC relative. Just re-emit it at the new
1765 location. */
1766 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
1767 }
1768
1769 static const struct aarch64_insn_visitor visitor =
1770 {
1771 aarch64_ftrace_insn_reloc_b,
1772 aarch64_ftrace_insn_reloc_b_cond,
1773 aarch64_ftrace_insn_reloc_cb,
1774 aarch64_ftrace_insn_reloc_tb,
1775 aarch64_ftrace_insn_reloc_adr,
1776 aarch64_ftrace_insn_reloc_ldr_literal,
1777 aarch64_ftrace_insn_reloc_others,
1778 };
1779
1780 /* Implementation of linux_target_ops method
1781 "install_fast_tracepoint_jump_pad". */
1782
1783 static int
1784 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1785 CORE_ADDR tpaddr,
1786 CORE_ADDR collector,
1787 CORE_ADDR lockaddr,
1788 ULONGEST orig_size,
1789 CORE_ADDR *jump_entry,
1790 CORE_ADDR *trampoline,
1791 ULONGEST *trampoline_size,
1792 unsigned char *jjump_pad_insn,
1793 ULONGEST *jjump_pad_insn_size,
1794 CORE_ADDR *adjusted_insn_addr,
1795 CORE_ADDR *adjusted_insn_addr_end,
1796 char *err)
1797 {
1798 uint32_t buf[256];
1799 uint32_t *p = buf;
1800 int64_t offset;
1801 int i;
1802 uint32_t insn;
1803 CORE_ADDR buildaddr = *jump_entry;
1804 struct aarch64_insn_relocation_data insn_data;
1805
1806 /* We need to save the current state on the stack both to restore it
1807 later and to collect register values when the tracepoint is hit.
1808
1809 The saved registers are pushed in a layout that needs to be in sync
1810 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1811 the supply_fast_tracepoint_registers function will fill in the
1812 register cache from a pointer to saved registers on the stack we build
1813 here.
1814
1815 For simplicity, we set the size of each cell on the stack to 16 bytes.
1816 This way one cell can hold any register type, from system registers
1817 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1818 has to be 16 bytes aligned anyway.
1819
1820 Note that the CPSR register does not exist on AArch64. Instead we
1821 can access system bits describing the process state with the
1822 MRS/MSR instructions, namely the condition flags. We save them as
1823 if they are part of a CPSR register because that's how GDB
1824 interprets these system bits. At the moment, only the condition
1825 flags are saved in CPSR (NZCV).
1826
1827 Stack layout, each cell is 16 bytes (descending):
1828
1829 High *-------- SIMD&FP registers from 31 down to 0. --------*
1830 | q31 |
1831 . .
1832 . . 32 cells
1833 . .
1834 | q0 |
1835 *---- General purpose registers from 30 down to 0. ----*
1836 | x30 |
1837 . .
1838 . . 31 cells
1839 . .
1840 | x0 |
1841 *------------- Special purpose registers. -------------*
1842 | SP |
1843 | PC |
1844 | CPSR (NZCV) | 5 cells
1845 | FPSR |
1846 | FPCR | <- SP + 16
1847 *------------- collecting_t object --------------------*
1848 | TPIDR_EL0 | struct tracepoint * |
1849 Low *------------------------------------------------------*
1850
1851 After this stack is set up, we issue a call to the collector, passing
1852 it the saved registers at (SP + 16). */
1853
1854 /* Push SIMD&FP registers on the stack:
1855
1856 SUB sp, sp, #(32 * 16)
1857
1858 STP q30, q31, [sp, #(30 * 16)]
1859 ...
1860 STP q0, q1, [sp]
1861
1862 */
1863 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
1864 for (i = 30; i >= 0; i -= 2)
1865 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
1866
1867 /* Push general puspose registers on the stack. Note that we do not need
1868 to push x31 as it represents the xzr register and not the stack
1869 pointer in a STR instruction.
1870
1871 SUB sp, sp, #(31 * 16)
1872
1873 STR x30, [sp, #(30 * 16)]
1874 ...
1875 STR x0, [sp]
1876
1877 */
1878 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
1879 for (i = 30; i >= 0; i -= 1)
1880 p += emit_str (p, aarch64_register (i, 1), sp,
1881 offset_memory_operand (i * 16));
1882
1883 /* Make space for 5 more cells.
1884
1885 SUB sp, sp, #(5 * 16)
1886
1887 */
1888 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
1889
1890
1891 /* Save SP:
1892
1893 ADD x4, sp, #((32 + 31 + 5) * 16)
1894 STR x4, [sp, #(4 * 16)]
1895
1896 */
1897 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
1898 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
1899
1900 /* Save PC (tracepoint address):
1901
1902 MOV x3, #(tpaddr)
1903 ...
1904
1905 STR x3, [sp, #(3 * 16)]
1906
1907 */
1908
1909 p += emit_mov_addr (p, x3, tpaddr);
1910 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
1911
1912 /* Save CPSR (NZCV), FPSR and FPCR:
1913
1914 MRS x2, nzcv
1915 MRS x1, fpsr
1916 MRS x0, fpcr
1917
1918 STR x2, [sp, #(2 * 16)]
1919 STR x1, [sp, #(1 * 16)]
1920 STR x0, [sp, #(0 * 16)]
1921
1922 */
1923 p += emit_mrs (p, x2, NZCV);
1924 p += emit_mrs (p, x1, FPSR);
1925 p += emit_mrs (p, x0, FPCR);
1926 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
1927 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
1928 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
1929
1930 /* Push the collecting_t object. It consist of the address of the
1931 tracepoint and an ID for the current thread. We get the latter by
1932 reading the tpidr_el0 system register. It corresponds to the
1933 NT_ARM_TLS register accessible with ptrace.
1934
1935 MOV x0, #(tpoint)
1936 ...
1937
1938 MRS x1, tpidr_el0
1939
1940 STP x0, x1, [sp, #-16]!
1941
1942 */
1943
1944 p += emit_mov_addr (p, x0, tpoint);
1945 p += emit_mrs (p, x1, TPIDR_EL0);
1946 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
1947
1948 /* Spin-lock:
1949
1950 The shared memory for the lock is at lockaddr. It will hold zero
1951 if no-one is holding the lock, otherwise it contains the address of
1952 the collecting_t object on the stack of the thread which acquired it.
1953
1954 At this stage, the stack pointer points to this thread's collecting_t
1955 object.
1956
1957 We use the following registers:
1958 - x0: Address of the lock.
1959 - x1: Pointer to collecting_t object.
1960 - x2: Scratch register.
1961
1962 MOV x0, #(lockaddr)
1963 ...
1964 MOV x1, sp
1965
1966 ; Trigger an event local to this core. So the following WFE
1967 ; instruction is ignored.
1968 SEVL
1969 again:
1970 ; Wait for an event. The event is triggered by either the SEVL
1971 ; or STLR instructions (store release).
1972 WFE
1973
1974 ; Atomically read at lockaddr. This marks the memory location as
1975 ; exclusive. This instruction also has memory constraints which
1976 ; make sure all previous data reads and writes are done before
1977 ; executing it.
1978 LDAXR x2, [x0]
1979
1980 ; Try again if another thread holds the lock.
1981 CBNZ x2, again
1982
1983 ; We can lock it! Write the address of the collecting_t object.
1984 ; This instruction will fail if the memory location is not marked
1985 ; as exclusive anymore. If it succeeds, it will remove the
1986 ; exclusive mark on the memory location. This way, if another
1987 ; thread executes this instruction before us, we will fail and try
1988 ; all over again.
1989 STXR w2, x1, [x0]
1990 CBNZ w2, again
1991
1992 */
1993
1994 p += emit_mov_addr (p, x0, lockaddr);
1995 p += emit_mov (p, x1, register_operand (sp));
1996
1997 p += emit_sevl (p);
1998 p += emit_wfe (p);
1999 p += emit_ldaxr (p, x2, x0);
2000 p += emit_cb (p, 1, w2, -2 * 4);
2001 p += emit_stxr (p, w2, x1, x0);
2002 p += emit_cb (p, 1, x2, -4 * 4);
2003
2004 /* Call collector (struct tracepoint *, unsigned char *):
2005
2006 MOV x0, #(tpoint)
2007 ...
2008
2009 ; Saved registers start after the collecting_t object.
2010 ADD x1, sp, #16
2011
2012 ; We use an intra-procedure-call scratch register.
2013 MOV ip0, #(collector)
2014 ...
2015
2016 ; And call back to C!
2017 BLR ip0
2018
2019 */
2020
2021 p += emit_mov_addr (p, x0, tpoint);
2022 p += emit_add (p, x1, sp, immediate_operand (16));
2023
2024 p += emit_mov_addr (p, ip0, collector);
2025 p += emit_blr (p, ip0);
2026
2027 /* Release the lock.
2028
2029 MOV x0, #(lockaddr)
2030 ...
2031
2032 ; This instruction is a normal store with memory ordering
2033 ; constraints. Thanks to this we do not have to put a data
2034 ; barrier instruction to make sure all data read and writes are done
2035 ; before this instruction is executed. Furthermore, this instrucion
2036 ; will trigger an event, letting other threads know they can grab
2037 ; the lock.
2038 STLR xzr, [x0]
2039
2040 */
2041 p += emit_mov_addr (p, x0, lockaddr);
2042 p += emit_stlr (p, xzr, x0);
2043
2044 /* Free collecting_t object:
2045
2046 ADD sp, sp, #16
2047
2048 */
2049 p += emit_add (p, sp, sp, immediate_operand (16));
2050
2051 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2052 registers from the stack.
2053
2054 LDR x2, [sp, #(2 * 16)]
2055 LDR x1, [sp, #(1 * 16)]
2056 LDR x0, [sp, #(0 * 16)]
2057
2058 MSR NZCV, x2
2059 MSR FPSR, x1
2060 MSR FPCR, x0
2061
2062 ADD sp, sp #(5 * 16)
2063
2064 */
2065 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2066 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2067 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2068 p += emit_msr (p, NZCV, x2);
2069 p += emit_msr (p, FPSR, x1);
2070 p += emit_msr (p, FPCR, x0);
2071
2072 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2073
2074 /* Pop general purpose registers:
2075
2076 LDR x0, [sp]
2077 ...
2078 LDR x30, [sp, #(30 * 16)]
2079
2080 ADD sp, sp, #(31 * 16)
2081
2082 */
2083 for (i = 0; i <= 30; i += 1)
2084 p += emit_ldr (p, aarch64_register (i, 1), sp,
2085 offset_memory_operand (i * 16));
2086 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2087
2088 /* Pop SIMD&FP registers:
2089
2090 LDP q0, q1, [sp]
2091 ...
2092 LDP q30, q31, [sp, #(30 * 16)]
2093
2094 ADD sp, sp, #(32 * 16)
2095
2096 */
2097 for (i = 0; i <= 30; i += 2)
2098 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2099 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2100
2101 /* Write the code into the inferior memory. */
2102 append_insns (&buildaddr, p - buf, buf);
2103
2104 /* Now emit the relocated instruction. */
2105 *adjusted_insn_addr = buildaddr;
2106 target_read_uint32 (tpaddr, &insn);
2107
2108 insn_data.base.insn_addr = tpaddr;
2109 insn_data.new_addr = buildaddr;
2110 insn_data.insn_ptr = buf;
2111
2112 aarch64_relocate_instruction (insn, &visitor,
2113 (struct aarch64_insn_data *) &insn_data);
2114
2115 /* We may not have been able to relocate the instruction. */
2116 if (insn_data.insn_ptr == buf)
2117 {
2118 sprintf (err,
2119 "E.Could not relocate instruction from %s to %s.",
2120 core_addr_to_string_nz (tpaddr),
2121 core_addr_to_string_nz (buildaddr));
2122 return 1;
2123 }
2124 else
2125 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
2126 *adjusted_insn_addr_end = buildaddr;
2127
2128 /* Go back to the start of the buffer. */
2129 p = buf;
2130
2131 /* Emit a branch back from the jump pad. */
2132 offset = (tpaddr + orig_size - buildaddr);
2133 if (!can_encode_int32 (offset, 28))
2134 {
2135 sprintf (err,
2136 "E.Jump back from jump pad too far from tracepoint "
2137 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2138 offset);
2139 return 1;
2140 }
2141
2142 p += emit_b (p, 0, offset);
2143 append_insns (&buildaddr, p - buf, buf);
2144
2145 /* Give the caller a branch instruction into the jump pad. */
2146 offset = (*jump_entry - tpaddr);
2147 if (!can_encode_int32 (offset, 28))
2148 {
2149 sprintf (err,
2150 "E.Jump pad too far from tracepoint "
2151 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2152 offset);
2153 return 1;
2154 }
2155
2156 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2157 *jjump_pad_insn_size = 4;
2158
2159 /* Return the end address of our pad. */
2160 *jump_entry = buildaddr;
2161
2162 return 0;
2163 }
2164
2165 /* Helper function writing LEN instructions from START into
2166 current_insn_ptr. */
2167
2168 static void
2169 emit_ops_insns (const uint32_t *start, int len)
2170 {
2171 CORE_ADDR buildaddr = current_insn_ptr;
2172
2173 if (debug_threads)
2174 debug_printf ("Adding %d instrucions at %s\n",
2175 len, paddress (buildaddr));
2176
2177 append_insns (&buildaddr, len, start);
2178 current_insn_ptr = buildaddr;
2179 }
2180
2181 /* Pop a register from the stack. */
2182
2183 static int
2184 emit_pop (uint32_t *buf, struct aarch64_register rt)
2185 {
2186 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2187 }
2188
2189 /* Push a register on the stack. */
2190
2191 static int
2192 emit_push (uint32_t *buf, struct aarch64_register rt)
2193 {
2194 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2195 }
2196
2197 /* Implementation of emit_ops method "emit_prologue". */
2198
2199 static void
2200 aarch64_emit_prologue (void)
2201 {
2202 uint32_t buf[16];
2203 uint32_t *p = buf;
2204
2205 /* This function emit a prologue for the following function prototype:
2206
2207 enum eval_result_type f (unsigned char *regs,
2208 ULONGEST *value);
2209
2210 The first argument is a buffer of raw registers. The second
2211 argument is the result of
2212 evaluating the expression, which will be set to whatever is on top of
2213 the stack at the end.
2214
2215 The stack set up by the prologue is as such:
2216
2217 High *------------------------------------------------------*
2218 | LR |
2219 | FP | <- FP
2220 | x1 (ULONGEST *value) |
2221 | x0 (unsigned char *regs) |
2222 Low *------------------------------------------------------*
2223
2224 As we are implementing a stack machine, each opcode can expand the
2225 stack so we never know how far we are from the data saved by this
2226 prologue. In order to be able refer to value and regs later, we save
2227 the current stack pointer in the frame pointer. This way, it is not
2228 clobbered when calling C functions.
2229
2230 Finally, throughtout every operation, we are using register x0 as the
2231 top of the stack, and x1 as a scratch register. */
2232
2233 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2234 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2235 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2236
2237 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2238
2239
2240 emit_ops_insns (buf, p - buf);
2241 }
2242
2243 /* Implementation of emit_ops method "emit_epilogue". */
2244
2245 static void
2246 aarch64_emit_epilogue (void)
2247 {
2248 uint32_t buf[16];
2249 uint32_t *p = buf;
2250
2251 /* Store the result of the expression (x0) in *value. */
2252 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2253 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2254 p += emit_str (p, x0, x1, offset_memory_operand (0));
2255
2256 /* Restore the previous state. */
2257 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2258 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2259
2260 /* Return expr_eval_no_error. */
2261 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2262 p += emit_ret (p, lr);
2263
2264 emit_ops_insns (buf, p - buf);
2265 }
2266
2267 /* Implementation of emit_ops method "emit_add". */
2268
2269 static void
2270 aarch64_emit_add (void)
2271 {
2272 uint32_t buf[16];
2273 uint32_t *p = buf;
2274
2275 p += emit_pop (p, x1);
2276 p += emit_add (p, x0, x1, register_operand (x0));
2277
2278 emit_ops_insns (buf, p - buf);
2279 }
2280
2281 /* Implementation of emit_ops method "emit_sub". */
2282
2283 static void
2284 aarch64_emit_sub (void)
2285 {
2286 uint32_t buf[16];
2287 uint32_t *p = buf;
2288
2289 p += emit_pop (p, x1);
2290 p += emit_sub (p, x0, x1, register_operand (x0));
2291
2292 emit_ops_insns (buf, p - buf);
2293 }
2294
2295 /* Implementation of emit_ops method "emit_mul". */
2296
2297 static void
2298 aarch64_emit_mul (void)
2299 {
2300 uint32_t buf[16];
2301 uint32_t *p = buf;
2302
2303 p += emit_pop (p, x1);
2304 p += emit_mul (p, x0, x1, x0);
2305
2306 emit_ops_insns (buf, p - buf);
2307 }
2308
2309 /* Implementation of emit_ops method "emit_lsh". */
2310
2311 static void
2312 aarch64_emit_lsh (void)
2313 {
2314 uint32_t buf[16];
2315 uint32_t *p = buf;
2316
2317 p += emit_pop (p, x1);
2318 p += emit_lslv (p, x0, x1, x0);
2319
2320 emit_ops_insns (buf, p - buf);
2321 }
2322
2323 /* Implementation of emit_ops method "emit_rsh_signed". */
2324
2325 static void
2326 aarch64_emit_rsh_signed (void)
2327 {
2328 uint32_t buf[16];
2329 uint32_t *p = buf;
2330
2331 p += emit_pop (p, x1);
2332 p += emit_asrv (p, x0, x1, x0);
2333
2334 emit_ops_insns (buf, p - buf);
2335 }
2336
2337 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2338
2339 static void
2340 aarch64_emit_rsh_unsigned (void)
2341 {
2342 uint32_t buf[16];
2343 uint32_t *p = buf;
2344
2345 p += emit_pop (p, x1);
2346 p += emit_lsrv (p, x0, x1, x0);
2347
2348 emit_ops_insns (buf, p - buf);
2349 }
2350
2351 /* Implementation of emit_ops method "emit_ext". */
2352
2353 static void
2354 aarch64_emit_ext (int arg)
2355 {
2356 uint32_t buf[16];
2357 uint32_t *p = buf;
2358
2359 p += emit_sbfx (p, x0, x0, 0, arg);
2360
2361 emit_ops_insns (buf, p - buf);
2362 }
2363
2364 /* Implementation of emit_ops method "emit_log_not". */
2365
2366 static void
2367 aarch64_emit_log_not (void)
2368 {
2369 uint32_t buf[16];
2370 uint32_t *p = buf;
2371
2372 /* If the top of the stack is 0, replace it with 1. Else replace it with
2373 0. */
2374
2375 p += emit_cmp (p, x0, immediate_operand (0));
2376 p += emit_cset (p, x0, EQ);
2377
2378 emit_ops_insns (buf, p - buf);
2379 }
2380
2381 /* Implementation of emit_ops method "emit_bit_and". */
2382
2383 static void
2384 aarch64_emit_bit_and (void)
2385 {
2386 uint32_t buf[16];
2387 uint32_t *p = buf;
2388
2389 p += emit_pop (p, x1);
2390 p += emit_and (p, x0, x0, x1);
2391
2392 emit_ops_insns (buf, p - buf);
2393 }
2394
2395 /* Implementation of emit_ops method "emit_bit_or". */
2396
2397 static void
2398 aarch64_emit_bit_or (void)
2399 {
2400 uint32_t buf[16];
2401 uint32_t *p = buf;
2402
2403 p += emit_pop (p, x1);
2404 p += emit_orr (p, x0, x0, x1);
2405
2406 emit_ops_insns (buf, p - buf);
2407 }
2408
2409 /* Implementation of emit_ops method "emit_bit_xor". */
2410
2411 static void
2412 aarch64_emit_bit_xor (void)
2413 {
2414 uint32_t buf[16];
2415 uint32_t *p = buf;
2416
2417 p += emit_pop (p, x1);
2418 p += emit_eor (p, x0, x0, x1);
2419
2420 emit_ops_insns (buf, p - buf);
2421 }
2422
2423 /* Implementation of emit_ops method "emit_bit_not". */
2424
2425 static void
2426 aarch64_emit_bit_not (void)
2427 {
2428 uint32_t buf[16];
2429 uint32_t *p = buf;
2430
2431 p += emit_mvn (p, x0, x0);
2432
2433 emit_ops_insns (buf, p - buf);
2434 }
2435
2436 /* Implementation of emit_ops method "emit_equal". */
2437
2438 static void
2439 aarch64_emit_equal (void)
2440 {
2441 uint32_t buf[16];
2442 uint32_t *p = buf;
2443
2444 p += emit_pop (p, x1);
2445 p += emit_cmp (p, x0, register_operand (x1));
2446 p += emit_cset (p, x0, EQ);
2447
2448 emit_ops_insns (buf, p - buf);
2449 }
2450
2451 /* Implementation of emit_ops method "emit_less_signed". */
2452
2453 static void
2454 aarch64_emit_less_signed (void)
2455 {
2456 uint32_t buf[16];
2457 uint32_t *p = buf;
2458
2459 p += emit_pop (p, x1);
2460 p += emit_cmp (p, x1, register_operand (x0));
2461 p += emit_cset (p, x0, LT);
2462
2463 emit_ops_insns (buf, p - buf);
2464 }
2465
2466 /* Implementation of emit_ops method "emit_less_unsigned". */
2467
2468 static void
2469 aarch64_emit_less_unsigned (void)
2470 {
2471 uint32_t buf[16];
2472 uint32_t *p = buf;
2473
2474 p += emit_pop (p, x1);
2475 p += emit_cmp (p, x1, register_operand (x0));
2476 p += emit_cset (p, x0, LO);
2477
2478 emit_ops_insns (buf, p - buf);
2479 }
2480
2481 /* Implementation of emit_ops method "emit_ref". */
2482
2483 static void
2484 aarch64_emit_ref (int size)
2485 {
2486 uint32_t buf[16];
2487 uint32_t *p = buf;
2488
2489 switch (size)
2490 {
2491 case 1:
2492 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2493 break;
2494 case 2:
2495 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2496 break;
2497 case 4:
2498 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2499 break;
2500 case 8:
2501 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2502 break;
2503 default:
2504 /* Unknown size, bail on compilation. */
2505 emit_error = 1;
2506 break;
2507 }
2508
2509 emit_ops_insns (buf, p - buf);
2510 }
2511
2512 /* Implementation of emit_ops method "emit_if_goto". */
2513
2514 static void
2515 aarch64_emit_if_goto (int *offset_p, int *size_p)
2516 {
2517 uint32_t buf[16];
2518 uint32_t *p = buf;
2519
2520 /* The Z flag is set or cleared here. */
2521 p += emit_cmp (p, x0, immediate_operand (0));
2522 /* This instruction must not change the Z flag. */
2523 p += emit_pop (p, x0);
2524 /* Branch over the next instruction if x0 == 0. */
2525 p += emit_bcond (p, EQ, 8);
2526
2527 /* The NOP instruction will be patched with an unconditional branch. */
2528 if (offset_p)
2529 *offset_p = (p - buf) * 4;
2530 if (size_p)
2531 *size_p = 4;
2532 p += emit_nop (p);
2533
2534 emit_ops_insns (buf, p - buf);
2535 }
2536
2537 /* Implementation of emit_ops method "emit_goto". */
2538
2539 static void
2540 aarch64_emit_goto (int *offset_p, int *size_p)
2541 {
2542 uint32_t buf[16];
2543 uint32_t *p = buf;
2544
2545 /* The NOP instruction will be patched with an unconditional branch. */
2546 if (offset_p)
2547 *offset_p = 0;
2548 if (size_p)
2549 *size_p = 4;
2550 p += emit_nop (p);
2551
2552 emit_ops_insns (buf, p - buf);
2553 }
2554
2555 /* Implementation of emit_ops method "write_goto_address". */
2556
2557 void
2558 aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2559 {
2560 uint32_t insn;
2561
2562 emit_b (&insn, 0, to - from);
2563 append_insns (&from, 1, &insn);
2564 }
2565
2566 /* Implementation of emit_ops method "emit_const". */
2567
2568 static void
2569 aarch64_emit_const (LONGEST num)
2570 {
2571 uint32_t buf[16];
2572 uint32_t *p = buf;
2573
2574 p += emit_mov_addr (p, x0, num);
2575
2576 emit_ops_insns (buf, p - buf);
2577 }
2578
2579 /* Implementation of emit_ops method "emit_call". */
2580
2581 static void
2582 aarch64_emit_call (CORE_ADDR fn)
2583 {
2584 uint32_t buf[16];
2585 uint32_t *p = buf;
2586
2587 p += emit_mov_addr (p, ip0, fn);
2588 p += emit_blr (p, ip0);
2589
2590 emit_ops_insns (buf, p - buf);
2591 }
2592
2593 /* Implementation of emit_ops method "emit_reg". */
2594
2595 static void
2596 aarch64_emit_reg (int reg)
2597 {
2598 uint32_t buf[16];
2599 uint32_t *p = buf;
2600
2601 /* Set x0 to unsigned char *regs. */
2602 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2603 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2604 p += emit_mov (p, x1, immediate_operand (reg));
2605
2606 emit_ops_insns (buf, p - buf);
2607
2608 aarch64_emit_call (get_raw_reg_func_addr ());
2609 }
2610
2611 /* Implementation of emit_ops method "emit_pop". */
2612
2613 static void
2614 aarch64_emit_pop (void)
2615 {
2616 uint32_t buf[16];
2617 uint32_t *p = buf;
2618
2619 p += emit_pop (p, x0);
2620
2621 emit_ops_insns (buf, p - buf);
2622 }
2623
2624 /* Implementation of emit_ops method "emit_stack_flush". */
2625
2626 static void
2627 aarch64_emit_stack_flush (void)
2628 {
2629 uint32_t buf[16];
2630 uint32_t *p = buf;
2631
2632 p += emit_push (p, x0);
2633
2634 emit_ops_insns (buf, p - buf);
2635 }
2636
2637 /* Implementation of emit_ops method "emit_zero_ext". */
2638
2639 static void
2640 aarch64_emit_zero_ext (int arg)
2641 {
2642 uint32_t buf[16];
2643 uint32_t *p = buf;
2644
2645 p += emit_ubfx (p, x0, x0, 0, arg);
2646
2647 emit_ops_insns (buf, p - buf);
2648 }
2649
2650 /* Implementation of emit_ops method "emit_swap". */
2651
2652 static void
2653 aarch64_emit_swap (void)
2654 {
2655 uint32_t buf[16];
2656 uint32_t *p = buf;
2657
2658 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2659 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2660 p += emit_mov (p, x0, register_operand (x1));
2661
2662 emit_ops_insns (buf, p - buf);
2663 }
2664
2665 /* Implementation of emit_ops method "emit_stack_adjust". */
2666
2667 static void
2668 aarch64_emit_stack_adjust (int n)
2669 {
2670 /* This is not needed with our design. */
2671 uint32_t buf[16];
2672 uint32_t *p = buf;
2673
2674 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2675
2676 emit_ops_insns (buf, p - buf);
2677 }
2678
2679 /* Implementation of emit_ops method "emit_int_call_1". */
2680
2681 static void
2682 aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2683 {
2684 uint32_t buf[16];
2685 uint32_t *p = buf;
2686
2687 p += emit_mov (p, x0, immediate_operand (arg1));
2688
2689 emit_ops_insns (buf, p - buf);
2690
2691 aarch64_emit_call (fn);
2692 }
2693
2694 /* Implementation of emit_ops method "emit_void_call_2". */
2695
2696 static void
2697 aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2698 {
2699 uint32_t buf[16];
2700 uint32_t *p = buf;
2701
2702 /* Push x0 on the stack. */
2703 aarch64_emit_stack_flush ();
2704
2705 /* Setup arguments for the function call:
2706
2707 x0: arg1
2708 x1: top of the stack
2709
2710 MOV x1, x0
2711 MOV x0, #arg1 */
2712
2713 p += emit_mov (p, x1, register_operand (x0));
2714 p += emit_mov (p, x0, immediate_operand (arg1));
2715
2716 emit_ops_insns (buf, p - buf);
2717
2718 aarch64_emit_call (fn);
2719
2720 /* Restore x0. */
2721 aarch64_emit_pop ();
2722 }
2723
2724 /* Implementation of emit_ops method "emit_eq_goto". */
2725
2726 static void
2727 aarch64_emit_eq_goto (int *offset_p, int *size_p)
2728 {
2729 uint32_t buf[16];
2730 uint32_t *p = buf;
2731
2732 p += emit_pop (p, x1);
2733 p += emit_cmp (p, x1, register_operand (x0));
2734 /* Branch over the next instruction if x0 != x1. */
2735 p += emit_bcond (p, NE, 8);
2736 /* The NOP instruction will be patched with an unconditional branch. */
2737 if (offset_p)
2738 *offset_p = (p - buf) * 4;
2739 if (size_p)
2740 *size_p = 4;
2741 p += emit_nop (p);
2742
2743 emit_ops_insns (buf, p - buf);
2744 }
2745
2746 /* Implementation of emit_ops method "emit_ne_goto". */
2747
2748 static void
2749 aarch64_emit_ne_goto (int *offset_p, int *size_p)
2750 {
2751 uint32_t buf[16];
2752 uint32_t *p = buf;
2753
2754 p += emit_pop (p, x1);
2755 p += emit_cmp (p, x1, register_operand (x0));
2756 /* Branch over the next instruction if x0 == x1. */
2757 p += emit_bcond (p, EQ, 8);
2758 /* The NOP instruction will be patched with an unconditional branch. */
2759 if (offset_p)
2760 *offset_p = (p - buf) * 4;
2761 if (size_p)
2762 *size_p = 4;
2763 p += emit_nop (p);
2764
2765 emit_ops_insns (buf, p - buf);
2766 }
2767
2768 /* Implementation of emit_ops method "emit_lt_goto". */
2769
2770 static void
2771 aarch64_emit_lt_goto (int *offset_p, int *size_p)
2772 {
2773 uint32_t buf[16];
2774 uint32_t *p = buf;
2775
2776 p += emit_pop (p, x1);
2777 p += emit_cmp (p, x1, register_operand (x0));
2778 /* Branch over the next instruction if x0 >= x1. */
2779 p += emit_bcond (p, GE, 8);
2780 /* The NOP instruction will be patched with an unconditional branch. */
2781 if (offset_p)
2782 *offset_p = (p - buf) * 4;
2783 if (size_p)
2784 *size_p = 4;
2785 p += emit_nop (p);
2786
2787 emit_ops_insns (buf, p - buf);
2788 }
2789
2790 /* Implementation of emit_ops method "emit_le_goto". */
2791
2792 static void
2793 aarch64_emit_le_goto (int *offset_p, int *size_p)
2794 {
2795 uint32_t buf[16];
2796 uint32_t *p = buf;
2797
2798 p += emit_pop (p, x1);
2799 p += emit_cmp (p, x1, register_operand (x0));
2800 /* Branch over the next instruction if x0 > x1. */
2801 p += emit_bcond (p, GT, 8);
2802 /* The NOP instruction will be patched with an unconditional branch. */
2803 if (offset_p)
2804 *offset_p = (p - buf) * 4;
2805 if (size_p)
2806 *size_p = 4;
2807 p += emit_nop (p);
2808
2809 emit_ops_insns (buf, p - buf);
2810 }
2811
2812 /* Implementation of emit_ops method "emit_gt_goto". */
2813
2814 static void
2815 aarch64_emit_gt_goto (int *offset_p, int *size_p)
2816 {
2817 uint32_t buf[16];
2818 uint32_t *p = buf;
2819
2820 p += emit_pop (p, x1);
2821 p += emit_cmp (p, x1, register_operand (x0));
2822 /* Branch over the next instruction if x0 <= x1. */
2823 p += emit_bcond (p, LE, 8);
2824 /* The NOP instruction will be patched with an unconditional branch. */
2825 if (offset_p)
2826 *offset_p = (p - buf) * 4;
2827 if (size_p)
2828 *size_p = 4;
2829 p += emit_nop (p);
2830
2831 emit_ops_insns (buf, p - buf);
2832 }
2833
2834 /* Implementation of emit_ops method "emit_ge_got". */
2835
2836 static void
2837 aarch64_emit_ge_got (int *offset_p, int *size_p)
2838 {
2839 uint32_t buf[16];
2840 uint32_t *p = buf;
2841
2842 p += emit_pop (p, x1);
2843 p += emit_cmp (p, x1, register_operand (x0));
2844 /* Branch over the next instruction if x0 <= x1. */
2845 p += emit_bcond (p, LT, 8);
2846 /* The NOP instruction will be patched with an unconditional branch. */
2847 if (offset_p)
2848 *offset_p = (p - buf) * 4;
2849 if (size_p)
2850 *size_p = 4;
2851 p += emit_nop (p);
2852
2853 emit_ops_insns (buf, p - buf);
2854 }
2855
2856 static struct emit_ops aarch64_emit_ops_impl =
2857 {
2858 aarch64_emit_prologue,
2859 aarch64_emit_epilogue,
2860 aarch64_emit_add,
2861 aarch64_emit_sub,
2862 aarch64_emit_mul,
2863 aarch64_emit_lsh,
2864 aarch64_emit_rsh_signed,
2865 aarch64_emit_rsh_unsigned,
2866 aarch64_emit_ext,
2867 aarch64_emit_log_not,
2868 aarch64_emit_bit_and,
2869 aarch64_emit_bit_or,
2870 aarch64_emit_bit_xor,
2871 aarch64_emit_bit_not,
2872 aarch64_emit_equal,
2873 aarch64_emit_less_signed,
2874 aarch64_emit_less_unsigned,
2875 aarch64_emit_ref,
2876 aarch64_emit_if_goto,
2877 aarch64_emit_goto,
2878 aarch64_write_goto_address,
2879 aarch64_emit_const,
2880 aarch64_emit_call,
2881 aarch64_emit_reg,
2882 aarch64_emit_pop,
2883 aarch64_emit_stack_flush,
2884 aarch64_emit_zero_ext,
2885 aarch64_emit_swap,
2886 aarch64_emit_stack_adjust,
2887 aarch64_emit_int_call_1,
2888 aarch64_emit_void_call_2,
2889 aarch64_emit_eq_goto,
2890 aarch64_emit_ne_goto,
2891 aarch64_emit_lt_goto,
2892 aarch64_emit_le_goto,
2893 aarch64_emit_gt_goto,
2894 aarch64_emit_ge_got,
2895 };
2896
2897 /* Implementation of linux_target_ops method "emit_ops". */
2898
2899 static struct emit_ops *
2900 aarch64_emit_ops (void)
2901 {
2902 return &aarch64_emit_ops_impl;
2903 }
2904
2905 /* Implementation of linux_target_ops method
2906 "get_min_fast_tracepoint_insn_len". */
2907
2908 static int
2909 aarch64_get_min_fast_tracepoint_insn_len (void)
2910 {
2911 return 4;
2912 }
2913
2914 /* Implementation of linux_target_ops method "supports_range_stepping". */
2915
2916 static int
2917 aarch64_supports_range_stepping (void)
2918 {
2919 return 1;
2920 }
2921
2922 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2923
2924 static const gdb_byte *
2925 aarch64_sw_breakpoint_from_kind (int kind, int *size)
2926 {
2927 if (is_64bit_tdesc ())
2928 {
2929 *size = aarch64_breakpoint_len;
2930 return aarch64_breakpoint;
2931 }
2932 else
2933 return arm_sw_breakpoint_from_kind (kind, size);
2934 }
2935
2936 /* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
2937
2938 static int
2939 aarch64_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
2940 {
2941 if (is_64bit_tdesc ())
2942 return aarch64_breakpoint_len;
2943 else
2944 return arm_breakpoint_kind_from_pc (pcptr);
2945 }
2946
2947 /* Implementation of the linux_target_ops method
2948 "breakpoint_kind_from_current_state". */
2949
2950 static int
2951 aarch64_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
2952 {
2953 if (is_64bit_tdesc ())
2954 return aarch64_breakpoint_len;
2955 else
2956 return arm_breakpoint_kind_from_current_state (pcptr);
2957 }
2958
2959 /* Support for hardware single step. */
2960
2961 static int
2962 aarch64_supports_hardware_single_step (void)
2963 {
2964 return 1;
2965 }
2966
2967 struct linux_target_ops the_low_target =
2968 {
2969 aarch64_arch_setup,
2970 aarch64_regs_info,
2971 aarch64_cannot_fetch_register,
2972 aarch64_cannot_store_register,
2973 NULL, /* fetch_register */
2974 aarch64_get_pc,
2975 aarch64_set_pc,
2976 aarch64_breakpoint_kind_from_pc,
2977 aarch64_sw_breakpoint_from_kind,
2978 NULL, /* get_next_pcs */
2979 0, /* decr_pc_after_break */
2980 aarch64_breakpoint_at,
2981 aarch64_supports_z_point_type,
2982 aarch64_insert_point,
2983 aarch64_remove_point,
2984 aarch64_stopped_by_watchpoint,
2985 aarch64_stopped_data_address,
2986 NULL, /* collect_ptrace_register */
2987 NULL, /* supply_ptrace_register */
2988 aarch64_linux_siginfo_fixup,
2989 aarch64_linux_new_process,
2990 aarch64_linux_delete_process,
2991 aarch64_linux_new_thread,
2992 aarch64_linux_delete_thread,
2993 aarch64_linux_new_fork,
2994 aarch64_linux_prepare_to_resume,
2995 NULL, /* process_qsupported */
2996 aarch64_supports_tracepoints,
2997 aarch64_get_thread_area,
2998 aarch64_install_fast_tracepoint_jump_pad,
2999 aarch64_emit_ops,
3000 aarch64_get_min_fast_tracepoint_insn_len,
3001 aarch64_supports_range_stepping,
3002 aarch64_breakpoint_kind_from_current_state,
3003 aarch64_supports_hardware_single_step,
3004 aarch64_get_syscall_trapinfo,
3005 };
3006
3007 void
3008 initialize_low_arch (void)
3009 {
3010 init_registers_aarch64 ();
3011
3012 initialize_low_arch_aarch32 ();
3013
3014 initialize_regsets_info (&aarch64_regsets_info);
3015 }
This page took 0.131189 seconds and 4 git commands to generate.