linux low: Make the arch code free arch_process_info
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-aarch64-low.c
1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
4 Copyright (C) 2009-2017 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "server.h"
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
29 #include "ax.h"
30 #include "tracepoint.h"
31
32 #include <signal.h>
33 #include <sys/user.h>
34 #include "nat/gdb_ptrace.h"
35 #include <asm/ptrace.h>
36 #include <inttypes.h>
37 #include <endian.h>
38 #include <sys/uio.h>
39
40 #include "gdb_proc_service.h"
41
42 /* Defined in auto-generated files. */
43 void init_registers_aarch64 (void);
44 extern const struct target_desc *tdesc_aarch64;
45
46 #ifdef HAVE_SYS_REG_H
47 #include <sys/reg.h>
48 #endif
49
50 #define AARCH64_X_REGS_NUM 31
51 #define AARCH64_V_REGS_NUM 32
52 #define AARCH64_X0_REGNO 0
53 #define AARCH64_SP_REGNO 31
54 #define AARCH64_PC_REGNO 32
55 #define AARCH64_CPSR_REGNO 33
56 #define AARCH64_V0_REGNO 34
57 #define AARCH64_FPSR_REGNO (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM)
58 #define AARCH64_FPCR_REGNO (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM + 1)
59
60 #define AARCH64_NUM_REGS (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM + 2)
61
62 /* Per-process arch-specific data we want to keep. */
63
64 struct arch_process_info
65 {
66 /* Hardware breakpoint/watchpoint data.
67 The reason for them to be per-process rather than per-thread is
68 due to the lack of information in the gdbserver environment;
69 gdbserver is not told that whether a requested hardware
70 breakpoint/watchpoint is thread specific or not, so it has to set
71 each hw bp/wp for every thread in the current process. The
72 higher level bp/wp management in gdb will resume a thread if a hw
73 bp/wp trap is not expected for it. Since the hw bp/wp setting is
74 same for each thread, it is reasonable for the data to live here.
75 */
76 struct aarch64_debug_reg_state debug_reg_state;
77 };
78
79 /* Return true if the size of register 0 is 8 byte. */
80
81 static int
82 is_64bit_tdesc (void)
83 {
84 struct regcache *regcache = get_thread_regcache (current_thread, 0);
85
86 return register_size (regcache->tdesc, 0) == 8;
87 }
88
89 /* Implementation of linux_target_ops method "cannot_store_register". */
90
91 static int
92 aarch64_cannot_store_register (int regno)
93 {
94 return regno >= AARCH64_NUM_REGS;
95 }
96
97 /* Implementation of linux_target_ops method "cannot_fetch_register". */
98
99 static int
100 aarch64_cannot_fetch_register (int regno)
101 {
102 return regno >= AARCH64_NUM_REGS;
103 }
104
105 static void
106 aarch64_fill_gregset (struct regcache *regcache, void *buf)
107 {
108 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
109 int i;
110
111 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
112 collect_register (regcache, AARCH64_X0_REGNO + i, &regset->regs[i]);
113 collect_register (regcache, AARCH64_SP_REGNO, &regset->sp);
114 collect_register (regcache, AARCH64_PC_REGNO, &regset->pc);
115 collect_register (regcache, AARCH64_CPSR_REGNO, &regset->pstate);
116 }
117
118 static void
119 aarch64_store_gregset (struct regcache *regcache, const void *buf)
120 {
121 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
122 int i;
123
124 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
125 supply_register (regcache, AARCH64_X0_REGNO + i, &regset->regs[i]);
126 supply_register (regcache, AARCH64_SP_REGNO, &regset->sp);
127 supply_register (regcache, AARCH64_PC_REGNO, &regset->pc);
128 supply_register (regcache, AARCH64_CPSR_REGNO, &regset->pstate);
129 }
130
131 static void
132 aarch64_fill_fpregset (struct regcache *regcache, void *buf)
133 {
134 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
135 int i;
136
137 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
138 collect_register (regcache, AARCH64_V0_REGNO + i, &regset->vregs[i]);
139 collect_register (regcache, AARCH64_FPSR_REGNO, &regset->fpsr);
140 collect_register (regcache, AARCH64_FPCR_REGNO, &regset->fpcr);
141 }
142
143 static void
144 aarch64_store_fpregset (struct regcache *regcache, const void *buf)
145 {
146 const struct user_fpsimd_state *regset
147 = (const struct user_fpsimd_state *) buf;
148 int i;
149
150 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
151 supply_register (regcache, AARCH64_V0_REGNO + i, &regset->vregs[i]);
152 supply_register (regcache, AARCH64_FPSR_REGNO, &regset->fpsr);
153 supply_register (regcache, AARCH64_FPCR_REGNO, &regset->fpcr);
154 }
155
156 /* Enable miscellaneous debugging output. The name is historical - it
157 was originally used to debug LinuxThreads support. */
158 extern int debug_threads;
159
160 /* Implementation of linux_target_ops method "get_pc". */
161
162 static CORE_ADDR
163 aarch64_get_pc (struct regcache *regcache)
164 {
165 if (register_size (regcache->tdesc, 0) == 8)
166 return linux_get_pc_64bit (regcache);
167 else
168 return linux_get_pc_32bit (regcache);
169 }
170
171 /* Implementation of linux_target_ops method "set_pc". */
172
173 static void
174 aarch64_set_pc (struct regcache *regcache, CORE_ADDR pc)
175 {
176 if (register_size (regcache->tdesc, 0) == 8)
177 linux_set_pc_64bit (regcache, pc);
178 else
179 linux_set_pc_32bit (regcache, pc);
180 }
181
182 #define aarch64_breakpoint_len 4
183
184 /* AArch64 BRK software debug mode instruction.
185 This instruction needs to match gdb/aarch64-tdep.c
186 (aarch64_default_breakpoint). */
187 static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
188
189 /* Implementation of linux_target_ops method "breakpoint_at". */
190
191 static int
192 aarch64_breakpoint_at (CORE_ADDR where)
193 {
194 if (is_64bit_tdesc ())
195 {
196 gdb_byte insn[aarch64_breakpoint_len];
197
198 (*the_target->read_memory) (where, (unsigned char *) &insn,
199 aarch64_breakpoint_len);
200 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
201 return 1;
202
203 return 0;
204 }
205 else
206 return arm_breakpoint_at (where);
207 }
208
209 static void
210 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
211 {
212 int i;
213
214 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
215 {
216 state->dr_addr_bp[i] = 0;
217 state->dr_ctrl_bp[i] = 0;
218 state->dr_ref_count_bp[i] = 0;
219 }
220
221 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
222 {
223 state->dr_addr_wp[i] = 0;
224 state->dr_ctrl_wp[i] = 0;
225 state->dr_ref_count_wp[i] = 0;
226 }
227 }
228
229 /* Return the pointer to the debug register state structure in the
230 current process' arch-specific data area. */
231
232 struct aarch64_debug_reg_state *
233 aarch64_get_debug_reg_state (pid_t pid)
234 {
235 struct process_info *proc = find_process_pid (pid);
236
237 return &proc->priv->arch_private->debug_reg_state;
238 }
239
240 /* Implementation of linux_target_ops method "supports_z_point_type". */
241
242 static int
243 aarch64_supports_z_point_type (char z_type)
244 {
245 switch (z_type)
246 {
247 case Z_PACKET_SW_BP:
248 case Z_PACKET_HW_BP:
249 case Z_PACKET_WRITE_WP:
250 case Z_PACKET_READ_WP:
251 case Z_PACKET_ACCESS_WP:
252 return 1;
253 default:
254 return 0;
255 }
256 }
257
258 /* Implementation of linux_target_ops method "insert_point".
259
260 It actually only records the info of the to-be-inserted bp/wp;
261 the actual insertion will happen when threads are resumed. */
262
263 static int
264 aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
265 int len, struct raw_breakpoint *bp)
266 {
267 int ret;
268 enum target_hw_bp_type targ_type;
269 struct aarch64_debug_reg_state *state
270 = aarch64_get_debug_reg_state (pid_of (current_thread));
271
272 if (show_debug_regs)
273 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
274 (unsigned long) addr, len);
275
276 /* Determine the type from the raw breakpoint type. */
277 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
278
279 if (targ_type != hw_execute)
280 {
281 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
282 ret = aarch64_handle_watchpoint (targ_type, addr, len,
283 1 /* is_insert */, state);
284 else
285 ret = -1;
286 }
287 else
288 {
289 if (len == 3)
290 {
291 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
292 instruction. Set it to 2 to correctly encode length bit
293 mask in hardware/watchpoint control register. */
294 len = 2;
295 }
296 ret = aarch64_handle_breakpoint (targ_type, addr, len,
297 1 /* is_insert */, state);
298 }
299
300 if (show_debug_regs)
301 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
302 targ_type);
303
304 return ret;
305 }
306
307 /* Implementation of linux_target_ops method "remove_point".
308
309 It actually only records the info of the to-be-removed bp/wp,
310 the actual removal will be done when threads are resumed. */
311
312 static int
313 aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
314 int len, struct raw_breakpoint *bp)
315 {
316 int ret;
317 enum target_hw_bp_type targ_type;
318 struct aarch64_debug_reg_state *state
319 = aarch64_get_debug_reg_state (pid_of (current_thread));
320
321 if (show_debug_regs)
322 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
323 (unsigned long) addr, len);
324
325 /* Determine the type from the raw breakpoint type. */
326 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
327
328 /* Set up state pointers. */
329 if (targ_type != hw_execute)
330 ret =
331 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
332 state);
333 else
334 {
335 if (len == 3)
336 {
337 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
338 instruction. Set it to 2 to correctly encode length bit
339 mask in hardware/watchpoint control register. */
340 len = 2;
341 }
342 ret = aarch64_handle_breakpoint (targ_type, addr, len,
343 0 /* is_insert */, state);
344 }
345
346 if (show_debug_regs)
347 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
348 targ_type);
349
350 return ret;
351 }
352
353 /* Implementation of linux_target_ops method "stopped_data_address". */
354
355 static CORE_ADDR
356 aarch64_stopped_data_address (void)
357 {
358 siginfo_t siginfo;
359 int pid, i;
360 struct aarch64_debug_reg_state *state;
361
362 pid = lwpid_of (current_thread);
363
364 /* Get the siginfo. */
365 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
366 return (CORE_ADDR) 0;
367
368 /* Need to be a hardware breakpoint/watchpoint trap. */
369 if (siginfo.si_signo != SIGTRAP
370 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
371 return (CORE_ADDR) 0;
372
373 /* Check if the address matches any watched address. */
374 state = aarch64_get_debug_reg_state (pid_of (current_thread));
375 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
376 {
377 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
378 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
379 const CORE_ADDR addr_watch = state->dr_addr_wp[i];
380 if (state->dr_ref_count_wp[i]
381 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
382 && addr_trap >= addr_watch
383 && addr_trap < addr_watch + len)
384 return addr_trap;
385 }
386
387 return (CORE_ADDR) 0;
388 }
389
390 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
391
392 static int
393 aarch64_stopped_by_watchpoint (void)
394 {
395 if (aarch64_stopped_data_address () != 0)
396 return 1;
397 else
398 return 0;
399 }
400
401 /* Fetch the thread-local storage pointer for libthread_db. */
402
403 ps_err_e
404 ps_get_thread_area (struct ps_prochandle *ph,
405 lwpid_t lwpid, int idx, void **base)
406 {
407 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
408 is_64bit_tdesc ());
409 }
410
411 /* Implementation of linux_target_ops method "siginfo_fixup". */
412
413 static int
414 aarch64_linux_siginfo_fixup (siginfo_t *native, gdb_byte *inf, int direction)
415 {
416 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
417 if (!is_64bit_tdesc ())
418 {
419 if (direction == 0)
420 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
421 native);
422 else
423 aarch64_siginfo_from_compat_siginfo (native,
424 (struct compat_siginfo *) inf);
425
426 return 1;
427 }
428
429 return 0;
430 }
431
432 /* Implementation of linux_target_ops method "new_process". */
433
434 static struct arch_process_info *
435 aarch64_linux_new_process (void)
436 {
437 struct arch_process_info *info = XCNEW (struct arch_process_info);
438
439 aarch64_init_debug_reg_state (&info->debug_reg_state);
440
441 return info;
442 }
443
444 /* Implementation of linux_target_ops method "delete_process". */
445
446 static void
447 aarch64_linux_delete_process (struct arch_process_info *info)
448 {
449 xfree (info);
450 }
451
452 /* Implementation of linux_target_ops method "linux_new_fork". */
453
454 static void
455 aarch64_linux_new_fork (struct process_info *parent,
456 struct process_info *child)
457 {
458 /* These are allocated by linux_add_process. */
459 gdb_assert (parent->priv != NULL
460 && parent->priv->arch_private != NULL);
461 gdb_assert (child->priv != NULL
462 && child->priv->arch_private != NULL);
463
464 /* Linux kernel before 2.6.33 commit
465 72f674d203cd230426437cdcf7dd6f681dad8b0d
466 will inherit hardware debug registers from parent
467 on fork/vfork/clone. Newer Linux kernels create such tasks with
468 zeroed debug registers.
469
470 GDB core assumes the child inherits the watchpoints/hw
471 breakpoints of the parent, and will remove them all from the
472 forked off process. Copy the debug registers mirrors into the
473 new process so that all breakpoints and watchpoints can be
474 removed together. The debug registers mirror will become zeroed
475 in the end before detaching the forked off process, thus making
476 this compatible with older Linux kernels too. */
477
478 *child->priv->arch_private = *parent->priv->arch_private;
479 }
480
481 /* Return the right target description according to the ELF file of
482 current thread. */
483
484 static const struct target_desc *
485 aarch64_linux_read_description (void)
486 {
487 unsigned int machine;
488 int is_elf64;
489 int tid;
490
491 tid = lwpid_of (current_thread);
492
493 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
494
495 if (is_elf64)
496 return tdesc_aarch64;
497 else
498 return tdesc_arm_with_neon;
499 }
500
501 /* Implementation of linux_target_ops method "arch_setup". */
502
503 static void
504 aarch64_arch_setup (void)
505 {
506 current_process ()->tdesc = aarch64_linux_read_description ();
507
508 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
509 }
510
511 static struct regset_info aarch64_regsets[] =
512 {
513 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
514 sizeof (struct user_pt_regs), GENERAL_REGS,
515 aarch64_fill_gregset, aarch64_store_gregset },
516 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
517 sizeof (struct user_fpsimd_state), FP_REGS,
518 aarch64_fill_fpregset, aarch64_store_fpregset
519 },
520 NULL_REGSET
521 };
522
523 static struct regsets_info aarch64_regsets_info =
524 {
525 aarch64_regsets, /* regsets */
526 0, /* num_regsets */
527 NULL, /* disabled_regsets */
528 };
529
530 static struct regs_info regs_info_aarch64 =
531 {
532 NULL, /* regset_bitmap */
533 NULL, /* usrregs */
534 &aarch64_regsets_info,
535 };
536
537 /* Implementation of linux_target_ops method "regs_info". */
538
539 static const struct regs_info *
540 aarch64_regs_info (void)
541 {
542 if (is_64bit_tdesc ())
543 return &regs_info_aarch64;
544 else
545 return &regs_info_aarch32;
546 }
547
548 /* Implementation of linux_target_ops method "supports_tracepoints". */
549
550 static int
551 aarch64_supports_tracepoints (void)
552 {
553 if (current_thread == NULL)
554 return 1;
555 else
556 {
557 /* We don't support tracepoints on aarch32 now. */
558 return is_64bit_tdesc ();
559 }
560 }
561
562 /* Implementation of linux_target_ops method "get_thread_area". */
563
564 static int
565 aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
566 {
567 struct iovec iovec;
568 uint64_t reg;
569
570 iovec.iov_base = &reg;
571 iovec.iov_len = sizeof (reg);
572
573 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
574 return -1;
575
576 *addrp = reg;
577
578 return 0;
579 }
580
581 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
582
583 static void
584 aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
585 {
586 int use_64bit = register_size (regcache->tdesc, 0) == 8;
587
588 if (use_64bit)
589 {
590 long l_sysno;
591
592 collect_register_by_name (regcache, "x8", &l_sysno);
593 *sysno = (int) l_sysno;
594 }
595 else
596 collect_register_by_name (regcache, "r7", sysno);
597 }
598
599 /* List of condition codes that we need. */
600
601 enum aarch64_condition_codes
602 {
603 EQ = 0x0,
604 NE = 0x1,
605 LO = 0x3,
606 GE = 0xa,
607 LT = 0xb,
608 GT = 0xc,
609 LE = 0xd,
610 };
611
612 enum aarch64_operand_type
613 {
614 OPERAND_IMMEDIATE,
615 OPERAND_REGISTER,
616 };
617
618 /* Representation of an operand. At this time, it only supports register
619 and immediate types. */
620
621 struct aarch64_operand
622 {
623 /* Type of the operand. */
624 enum aarch64_operand_type type;
625
626 /* Value of the operand according to the type. */
627 union
628 {
629 uint32_t imm;
630 struct aarch64_register reg;
631 };
632 };
633
634 /* List of registers that we are currently using, we can add more here as
635 we need to use them. */
636
637 /* General purpose scratch registers (64 bit). */
638 static const struct aarch64_register x0 = { 0, 1 };
639 static const struct aarch64_register x1 = { 1, 1 };
640 static const struct aarch64_register x2 = { 2, 1 };
641 static const struct aarch64_register x3 = { 3, 1 };
642 static const struct aarch64_register x4 = { 4, 1 };
643
644 /* General purpose scratch registers (32 bit). */
645 static const struct aarch64_register w0 = { 0, 0 };
646 static const struct aarch64_register w2 = { 2, 0 };
647
648 /* Intra-procedure scratch registers. */
649 static const struct aarch64_register ip0 = { 16, 1 };
650
651 /* Special purpose registers. */
652 static const struct aarch64_register fp = { 29, 1 };
653 static const struct aarch64_register lr = { 30, 1 };
654 static const struct aarch64_register sp = { 31, 1 };
655 static const struct aarch64_register xzr = { 31, 1 };
656
657 /* Dynamically allocate a new register. If we know the register
658 statically, we should make it a global as above instead of using this
659 helper function. */
660
661 static struct aarch64_register
662 aarch64_register (unsigned num, int is64)
663 {
664 return (struct aarch64_register) { num, is64 };
665 }
666
667 /* Helper function to create a register operand, for instructions with
668 different types of operands.
669
670 For example:
671 p += emit_mov (p, x0, register_operand (x1)); */
672
673 static struct aarch64_operand
674 register_operand (struct aarch64_register reg)
675 {
676 struct aarch64_operand operand;
677
678 operand.type = OPERAND_REGISTER;
679 operand.reg = reg;
680
681 return operand;
682 }
683
684 /* Helper function to create an immediate operand, for instructions with
685 different types of operands.
686
687 For example:
688 p += emit_mov (p, x0, immediate_operand (12)); */
689
690 static struct aarch64_operand
691 immediate_operand (uint32_t imm)
692 {
693 struct aarch64_operand operand;
694
695 operand.type = OPERAND_IMMEDIATE;
696 operand.imm = imm;
697
698 return operand;
699 }
700
701 /* Helper function to create an offset memory operand.
702
703 For example:
704 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
705
706 static struct aarch64_memory_operand
707 offset_memory_operand (int32_t offset)
708 {
709 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
710 }
711
712 /* Helper function to create a pre-index memory operand.
713
714 For example:
715 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
716
717 static struct aarch64_memory_operand
718 preindex_memory_operand (int32_t index)
719 {
720 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
721 }
722
723 /* Helper function to create a post-index memory operand.
724
725 For example:
726 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
727
728 static struct aarch64_memory_operand
729 postindex_memory_operand (int32_t index)
730 {
731 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
732 }
733
734 /* System control registers. These special registers can be written and
735 read with the MRS and MSR instructions.
736
737 - NZCV: Condition flags. GDB refers to this register under the CPSR
738 name.
739 - FPSR: Floating-point status register.
740 - FPCR: Floating-point control registers.
741 - TPIDR_EL0: Software thread ID register. */
742
743 enum aarch64_system_control_registers
744 {
745 /* op0 op1 crn crm op2 */
746 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
747 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
748 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
749 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
750 };
751
752 /* Write a BLR instruction into *BUF.
753
754 BLR rn
755
756 RN is the register to branch to. */
757
758 static int
759 emit_blr (uint32_t *buf, struct aarch64_register rn)
760 {
761 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
762 }
763
764 /* Write a RET instruction into *BUF.
765
766 RET xn
767
768 RN is the register to branch to. */
769
770 static int
771 emit_ret (uint32_t *buf, struct aarch64_register rn)
772 {
773 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
774 }
775
776 static int
777 emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
778 struct aarch64_register rt,
779 struct aarch64_register rt2,
780 struct aarch64_register rn,
781 struct aarch64_memory_operand operand)
782 {
783 uint32_t opc;
784 uint32_t pre_index;
785 uint32_t write_back;
786
787 if (rt.is64)
788 opc = ENCODE (2, 2, 30);
789 else
790 opc = ENCODE (0, 2, 30);
791
792 switch (operand.type)
793 {
794 case MEMORY_OPERAND_OFFSET:
795 {
796 pre_index = ENCODE (1, 1, 24);
797 write_back = ENCODE (0, 1, 23);
798 break;
799 }
800 case MEMORY_OPERAND_POSTINDEX:
801 {
802 pre_index = ENCODE (0, 1, 24);
803 write_back = ENCODE (1, 1, 23);
804 break;
805 }
806 case MEMORY_OPERAND_PREINDEX:
807 {
808 pre_index = ENCODE (1, 1, 24);
809 write_back = ENCODE (1, 1, 23);
810 break;
811 }
812 default:
813 return 0;
814 }
815
816 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
817 | ENCODE (operand.index >> 3, 7, 15)
818 | ENCODE (rt2.num, 5, 10)
819 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
820 }
821
822 /* Write a STP instruction into *BUF.
823
824 STP rt, rt2, [rn, #offset]
825 STP rt, rt2, [rn, #index]!
826 STP rt, rt2, [rn], #index
827
828 RT and RT2 are the registers to store.
829 RN is the base address register.
830 OFFSET is the immediate to add to the base address. It is limited to a
831 -512 .. 504 range (7 bits << 3). */
832
833 static int
834 emit_stp (uint32_t *buf, struct aarch64_register rt,
835 struct aarch64_register rt2, struct aarch64_register rn,
836 struct aarch64_memory_operand operand)
837 {
838 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
839 }
840
841 /* Write a LDP instruction into *BUF.
842
843 LDP rt, rt2, [rn, #offset]
844 LDP rt, rt2, [rn, #index]!
845 LDP rt, rt2, [rn], #index
846
847 RT and RT2 are the registers to store.
848 RN is the base address register.
849 OFFSET is the immediate to add to the base address. It is limited to a
850 -512 .. 504 range (7 bits << 3). */
851
852 static int
853 emit_ldp (uint32_t *buf, struct aarch64_register rt,
854 struct aarch64_register rt2, struct aarch64_register rn,
855 struct aarch64_memory_operand operand)
856 {
857 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
858 }
859
860 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
861
862 LDP qt, qt2, [rn, #offset]
863
864 RT and RT2 are the Q registers to store.
865 RN is the base address register.
866 OFFSET is the immediate to add to the base address. It is limited to
867 -1024 .. 1008 range (7 bits << 4). */
868
869 static int
870 emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
871 struct aarch64_register rn, int32_t offset)
872 {
873 uint32_t opc = ENCODE (2, 2, 30);
874 uint32_t pre_index = ENCODE (1, 1, 24);
875
876 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
877 | ENCODE (offset >> 4, 7, 15)
878 | ENCODE (rt2, 5, 10)
879 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
880 }
881
882 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
883
884 STP qt, qt2, [rn, #offset]
885
886 RT and RT2 are the Q registers to store.
887 RN is the base address register.
888 OFFSET is the immediate to add to the base address. It is limited to
889 -1024 .. 1008 range (7 bits << 4). */
890
891 static int
892 emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
893 struct aarch64_register rn, int32_t offset)
894 {
895 uint32_t opc = ENCODE (2, 2, 30);
896 uint32_t pre_index = ENCODE (1, 1, 24);
897
898 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
899 | ENCODE (offset >> 4, 7, 15)
900 | ENCODE (rt2, 5, 10)
901 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
902 }
903
904 /* Write a LDRH instruction into *BUF.
905
906 LDRH wt, [xn, #offset]
907 LDRH wt, [xn, #index]!
908 LDRH wt, [xn], #index
909
910 RT is the register to store.
911 RN is the base address register.
912 OFFSET is the immediate to add to the base address. It is limited to
913 0 .. 32760 range (12 bits << 3). */
914
915 static int
916 emit_ldrh (uint32_t *buf, struct aarch64_register rt,
917 struct aarch64_register rn,
918 struct aarch64_memory_operand operand)
919 {
920 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
921 }
922
923 /* Write a LDRB instruction into *BUF.
924
925 LDRB wt, [xn, #offset]
926 LDRB wt, [xn, #index]!
927 LDRB wt, [xn], #index
928
929 RT is the register to store.
930 RN is the base address register.
931 OFFSET is the immediate to add to the base address. It is limited to
932 0 .. 32760 range (12 bits << 3). */
933
934 static int
935 emit_ldrb (uint32_t *buf, struct aarch64_register rt,
936 struct aarch64_register rn,
937 struct aarch64_memory_operand operand)
938 {
939 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
940 }
941
942
943
944 /* Write a STR instruction into *BUF.
945
946 STR rt, [rn, #offset]
947 STR rt, [rn, #index]!
948 STR rt, [rn], #index
949
950 RT is the register to store.
951 RN is the base address register.
952 OFFSET is the immediate to add to the base address. It is limited to
953 0 .. 32760 range (12 bits << 3). */
954
955 static int
956 emit_str (uint32_t *buf, struct aarch64_register rt,
957 struct aarch64_register rn,
958 struct aarch64_memory_operand operand)
959 {
960 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
961 }
962
963 /* Helper function emitting an exclusive load or store instruction. */
964
965 static int
966 emit_load_store_exclusive (uint32_t *buf, uint32_t size,
967 enum aarch64_opcodes opcode,
968 struct aarch64_register rs,
969 struct aarch64_register rt,
970 struct aarch64_register rt2,
971 struct aarch64_register rn)
972 {
973 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
974 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
975 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
976 }
977
978 /* Write a LAXR instruction into *BUF.
979
980 LDAXR rt, [xn]
981
982 RT is the destination register.
983 RN is the base address register. */
984
985 static int
986 emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
987 struct aarch64_register rn)
988 {
989 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
990 xzr, rn);
991 }
992
993 /* Write a STXR instruction into *BUF.
994
995 STXR ws, rt, [xn]
996
997 RS is the result register, it indicates if the store succeeded or not.
998 RT is the destination register.
999 RN is the base address register. */
1000
1001 static int
1002 emit_stxr (uint32_t *buf, struct aarch64_register rs,
1003 struct aarch64_register rt, struct aarch64_register rn)
1004 {
1005 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1006 xzr, rn);
1007 }
1008
1009 /* Write a STLR instruction into *BUF.
1010
1011 STLR rt, [xn]
1012
1013 RT is the register to store.
1014 RN is the base address register. */
1015
1016 static int
1017 emit_stlr (uint32_t *buf, struct aarch64_register rt,
1018 struct aarch64_register rn)
1019 {
1020 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1021 xzr, rn);
1022 }
1023
1024 /* Helper function for data processing instructions with register sources. */
1025
1026 static int
1027 emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
1028 struct aarch64_register rd,
1029 struct aarch64_register rn,
1030 struct aarch64_register rm)
1031 {
1032 uint32_t size = ENCODE (rd.is64, 1, 31);
1033
1034 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1035 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
1036 }
1037
1038 /* Helper function for data processing instructions taking either a register
1039 or an immediate. */
1040
1041 static int
1042 emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1043 struct aarch64_register rd,
1044 struct aarch64_register rn,
1045 struct aarch64_operand operand)
1046 {
1047 uint32_t size = ENCODE (rd.is64, 1, 31);
1048 /* The opcode is different for register and immediate source operands. */
1049 uint32_t operand_opcode;
1050
1051 if (operand.type == OPERAND_IMMEDIATE)
1052 {
1053 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1054 operand_opcode = ENCODE (8, 4, 25);
1055
1056 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1057 | ENCODE (operand.imm, 12, 10)
1058 | ENCODE (rn.num, 5, 5)
1059 | ENCODE (rd.num, 5, 0));
1060 }
1061 else
1062 {
1063 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1064 operand_opcode = ENCODE (5, 4, 25);
1065
1066 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1067 rn, operand.reg);
1068 }
1069 }
1070
1071 /* Write an ADD instruction into *BUF.
1072
1073 ADD rd, rn, #imm
1074 ADD rd, rn, rm
1075
1076 This function handles both an immediate and register add.
1077
1078 RD is the destination register.
1079 RN is the input register.
1080 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1081 OPERAND_REGISTER. */
1082
1083 static int
1084 emit_add (uint32_t *buf, struct aarch64_register rd,
1085 struct aarch64_register rn, struct aarch64_operand operand)
1086 {
1087 return emit_data_processing (buf, ADD, rd, rn, operand);
1088 }
1089
1090 /* Write a SUB instruction into *BUF.
1091
1092 SUB rd, rn, #imm
1093 SUB rd, rn, rm
1094
1095 This function handles both an immediate and register sub.
1096
1097 RD is the destination register.
1098 RN is the input register.
1099 IMM is the immediate to substract to RN. */
1100
1101 static int
1102 emit_sub (uint32_t *buf, struct aarch64_register rd,
1103 struct aarch64_register rn, struct aarch64_operand operand)
1104 {
1105 return emit_data_processing (buf, SUB, rd, rn, operand);
1106 }
1107
1108 /* Write a MOV instruction into *BUF.
1109
1110 MOV rd, #imm
1111 MOV rd, rm
1112
1113 This function handles both a wide immediate move and a register move,
1114 with the condition that the source register is not xzr. xzr and the
1115 stack pointer share the same encoding and this function only supports
1116 the stack pointer.
1117
1118 RD is the destination register.
1119 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1120 OPERAND_REGISTER. */
1121
1122 static int
1123 emit_mov (uint32_t *buf, struct aarch64_register rd,
1124 struct aarch64_operand operand)
1125 {
1126 if (operand.type == OPERAND_IMMEDIATE)
1127 {
1128 uint32_t size = ENCODE (rd.is64, 1, 31);
1129 /* Do not shift the immediate. */
1130 uint32_t shift = ENCODE (0, 2, 21);
1131
1132 return aarch64_emit_insn (buf, MOV | size | shift
1133 | ENCODE (operand.imm, 16, 5)
1134 | ENCODE (rd.num, 5, 0));
1135 }
1136 else
1137 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1138 }
1139
1140 /* Write a MOVK instruction into *BUF.
1141
1142 MOVK rd, #imm, lsl #shift
1143
1144 RD is the destination register.
1145 IMM is the immediate.
1146 SHIFT is the logical shift left to apply to IMM. */
1147
1148 static int
1149 emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1150 unsigned shift)
1151 {
1152 uint32_t size = ENCODE (rd.is64, 1, 31);
1153
1154 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1155 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
1156 }
1157
1158 /* Write instructions into *BUF in order to move ADDR into a register.
1159 ADDR can be a 64-bit value.
1160
1161 This function will emit a series of MOV and MOVK instructions, such as:
1162
1163 MOV xd, #(addr)
1164 MOVK xd, #(addr >> 16), lsl #16
1165 MOVK xd, #(addr >> 32), lsl #32
1166 MOVK xd, #(addr >> 48), lsl #48 */
1167
1168 static int
1169 emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1170 {
1171 uint32_t *p = buf;
1172
1173 /* The MOV (wide immediate) instruction clears to top bits of the
1174 register. */
1175 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1176
1177 if ((addr >> 16) != 0)
1178 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1179 else
1180 return p - buf;
1181
1182 if ((addr >> 32) != 0)
1183 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1184 else
1185 return p - buf;
1186
1187 if ((addr >> 48) != 0)
1188 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1189
1190 return p - buf;
1191 }
1192
1193 /* Write a SUBS instruction into *BUF.
1194
1195 SUBS rd, rn, rm
1196
1197 This instruction update the condition flags.
1198
1199 RD is the destination register.
1200 RN and RM are the source registers. */
1201
1202 static int
1203 emit_subs (uint32_t *buf, struct aarch64_register rd,
1204 struct aarch64_register rn, struct aarch64_operand operand)
1205 {
1206 return emit_data_processing (buf, SUBS, rd, rn, operand);
1207 }
1208
1209 /* Write a CMP instruction into *BUF.
1210
1211 CMP rn, rm
1212
1213 This instruction is an alias of SUBS xzr, rn, rm.
1214
1215 RN and RM are the registers to compare. */
1216
1217 static int
1218 emit_cmp (uint32_t *buf, struct aarch64_register rn,
1219 struct aarch64_operand operand)
1220 {
1221 return emit_subs (buf, xzr, rn, operand);
1222 }
1223
1224 /* Write a AND instruction into *BUF.
1225
1226 AND rd, rn, rm
1227
1228 RD is the destination register.
1229 RN and RM are the source registers. */
1230
1231 static int
1232 emit_and (uint32_t *buf, struct aarch64_register rd,
1233 struct aarch64_register rn, struct aarch64_register rm)
1234 {
1235 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1236 }
1237
1238 /* Write a ORR instruction into *BUF.
1239
1240 ORR rd, rn, rm
1241
1242 RD is the destination register.
1243 RN and RM are the source registers. */
1244
1245 static int
1246 emit_orr (uint32_t *buf, struct aarch64_register rd,
1247 struct aarch64_register rn, struct aarch64_register rm)
1248 {
1249 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1250 }
1251
1252 /* Write a ORN instruction into *BUF.
1253
1254 ORN rd, rn, rm
1255
1256 RD is the destination register.
1257 RN and RM are the source registers. */
1258
1259 static int
1260 emit_orn (uint32_t *buf, struct aarch64_register rd,
1261 struct aarch64_register rn, struct aarch64_register rm)
1262 {
1263 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1264 }
1265
1266 /* Write a EOR instruction into *BUF.
1267
1268 EOR rd, rn, rm
1269
1270 RD is the destination register.
1271 RN and RM are the source registers. */
1272
1273 static int
1274 emit_eor (uint32_t *buf, struct aarch64_register rd,
1275 struct aarch64_register rn, struct aarch64_register rm)
1276 {
1277 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1278 }
1279
1280 /* Write a MVN instruction into *BUF.
1281
1282 MVN rd, rm
1283
1284 This is an alias for ORN rd, xzr, rm.
1285
1286 RD is the destination register.
1287 RM is the source register. */
1288
1289 static int
1290 emit_mvn (uint32_t *buf, struct aarch64_register rd,
1291 struct aarch64_register rm)
1292 {
1293 return emit_orn (buf, rd, xzr, rm);
1294 }
1295
1296 /* Write a LSLV instruction into *BUF.
1297
1298 LSLV rd, rn, rm
1299
1300 RD is the destination register.
1301 RN and RM are the source registers. */
1302
1303 static int
1304 emit_lslv (uint32_t *buf, struct aarch64_register rd,
1305 struct aarch64_register rn, struct aarch64_register rm)
1306 {
1307 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1308 }
1309
1310 /* Write a LSRV instruction into *BUF.
1311
1312 LSRV rd, rn, rm
1313
1314 RD is the destination register.
1315 RN and RM are the source registers. */
1316
1317 static int
1318 emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1319 struct aarch64_register rn, struct aarch64_register rm)
1320 {
1321 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1322 }
1323
1324 /* Write a ASRV instruction into *BUF.
1325
1326 ASRV rd, rn, rm
1327
1328 RD is the destination register.
1329 RN and RM are the source registers. */
1330
1331 static int
1332 emit_asrv (uint32_t *buf, struct aarch64_register rd,
1333 struct aarch64_register rn, struct aarch64_register rm)
1334 {
1335 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1336 }
1337
1338 /* Write a MUL instruction into *BUF.
1339
1340 MUL rd, rn, rm
1341
1342 RD is the destination register.
1343 RN and RM are the source registers. */
1344
1345 static int
1346 emit_mul (uint32_t *buf, struct aarch64_register rd,
1347 struct aarch64_register rn, struct aarch64_register rm)
1348 {
1349 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1350 }
1351
1352 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1353
1354 MRS xt, system_reg
1355
1356 RT is the destination register.
1357 SYSTEM_REG is special purpose register to read. */
1358
1359 static int
1360 emit_mrs (uint32_t *buf, struct aarch64_register rt,
1361 enum aarch64_system_control_registers system_reg)
1362 {
1363 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1364 | ENCODE (rt.num, 5, 0));
1365 }
1366
1367 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1368
1369 MSR system_reg, xt
1370
1371 SYSTEM_REG is special purpose register to write.
1372 RT is the input register. */
1373
1374 static int
1375 emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1376 struct aarch64_register rt)
1377 {
1378 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1379 | ENCODE (rt.num, 5, 0));
1380 }
1381
1382 /* Write a SEVL instruction into *BUF.
1383
1384 This is a hint instruction telling the hardware to trigger an event. */
1385
1386 static int
1387 emit_sevl (uint32_t *buf)
1388 {
1389 return aarch64_emit_insn (buf, SEVL);
1390 }
1391
1392 /* Write a WFE instruction into *BUF.
1393
1394 This is a hint instruction telling the hardware to wait for an event. */
1395
1396 static int
1397 emit_wfe (uint32_t *buf)
1398 {
1399 return aarch64_emit_insn (buf, WFE);
1400 }
1401
1402 /* Write a SBFM instruction into *BUF.
1403
1404 SBFM rd, rn, #immr, #imms
1405
1406 This instruction moves the bits from #immr to #imms into the
1407 destination, sign extending the result.
1408
1409 RD is the destination register.
1410 RN is the source register.
1411 IMMR is the bit number to start at (least significant bit).
1412 IMMS is the bit number to stop at (most significant bit). */
1413
1414 static int
1415 emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1416 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1417 {
1418 uint32_t size = ENCODE (rd.is64, 1, 31);
1419 uint32_t n = ENCODE (rd.is64, 1, 22);
1420
1421 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1422 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1423 | ENCODE (rd.num, 5, 0));
1424 }
1425
1426 /* Write a SBFX instruction into *BUF.
1427
1428 SBFX rd, rn, #lsb, #width
1429
1430 This instruction moves #width bits from #lsb into the destination, sign
1431 extending the result. This is an alias for:
1432
1433 SBFM rd, rn, #lsb, #(lsb + width - 1)
1434
1435 RD is the destination register.
1436 RN is the source register.
1437 LSB is the bit number to start at (least significant bit).
1438 WIDTH is the number of bits to move. */
1439
1440 static int
1441 emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1442 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1443 {
1444 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1445 }
1446
1447 /* Write a UBFM instruction into *BUF.
1448
1449 UBFM rd, rn, #immr, #imms
1450
1451 This instruction moves the bits from #immr to #imms into the
1452 destination, extending the result with zeros.
1453
1454 RD is the destination register.
1455 RN is the source register.
1456 IMMR is the bit number to start at (least significant bit).
1457 IMMS is the bit number to stop at (most significant bit). */
1458
1459 static int
1460 emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1461 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1462 {
1463 uint32_t size = ENCODE (rd.is64, 1, 31);
1464 uint32_t n = ENCODE (rd.is64, 1, 22);
1465
1466 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1467 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1468 | ENCODE (rd.num, 5, 0));
1469 }
1470
1471 /* Write a UBFX instruction into *BUF.
1472
1473 UBFX rd, rn, #lsb, #width
1474
1475 This instruction moves #width bits from #lsb into the destination,
1476 extending the result with zeros. This is an alias for:
1477
1478 UBFM rd, rn, #lsb, #(lsb + width - 1)
1479
1480 RD is the destination register.
1481 RN is the source register.
1482 LSB is the bit number to start at (least significant bit).
1483 WIDTH is the number of bits to move. */
1484
1485 static int
1486 emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1487 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1488 {
1489 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1490 }
1491
1492 /* Write a CSINC instruction into *BUF.
1493
1494 CSINC rd, rn, rm, cond
1495
1496 This instruction conditionally increments rn or rm and places the result
1497 in rd. rn is chosen is the condition is true.
1498
1499 RD is the destination register.
1500 RN and RM are the source registers.
1501 COND is the encoded condition. */
1502
1503 static int
1504 emit_csinc (uint32_t *buf, struct aarch64_register rd,
1505 struct aarch64_register rn, struct aarch64_register rm,
1506 unsigned cond)
1507 {
1508 uint32_t size = ENCODE (rd.is64, 1, 31);
1509
1510 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1511 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1512 | ENCODE (rd.num, 5, 0));
1513 }
1514
1515 /* Write a CSET instruction into *BUF.
1516
1517 CSET rd, cond
1518
1519 This instruction conditionally write 1 or 0 in the destination register.
1520 1 is written if the condition is true. This is an alias for:
1521
1522 CSINC rd, xzr, xzr, !cond
1523
1524 Note that the condition needs to be inverted.
1525
1526 RD is the destination register.
1527 RN and RM are the source registers.
1528 COND is the encoded condition. */
1529
1530 static int
1531 emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1532 {
1533 /* The least significant bit of the condition needs toggling in order to
1534 invert it. */
1535 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1536 }
1537
1538 /* Write LEN instructions from BUF into the inferior memory at *TO.
1539
1540 Note instructions are always little endian on AArch64, unlike data. */
1541
1542 static void
1543 append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1544 {
1545 size_t byte_len = len * sizeof (uint32_t);
1546 #if (__BYTE_ORDER == __BIG_ENDIAN)
1547 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
1548 size_t i;
1549
1550 for (i = 0; i < len; i++)
1551 le_buf[i] = htole32 (buf[i]);
1552
1553 write_inferior_memory (*to, (const unsigned char *) le_buf, byte_len);
1554
1555 xfree (le_buf);
1556 #else
1557 write_inferior_memory (*to, (const unsigned char *) buf, byte_len);
1558 #endif
1559
1560 *to += byte_len;
1561 }
1562
1563 /* Sub-class of struct aarch64_insn_data, store information of
1564 instruction relocation for fast tracepoint. Visitor can
1565 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1566 the relocated instructions in buffer pointed by INSN_PTR. */
1567
1568 struct aarch64_insn_relocation_data
1569 {
1570 struct aarch64_insn_data base;
1571
1572 /* The new address the instruction is relocated to. */
1573 CORE_ADDR new_addr;
1574 /* Pointer to the buffer of relocated instruction(s). */
1575 uint32_t *insn_ptr;
1576 };
1577
1578 /* Implementation of aarch64_insn_visitor method "b". */
1579
1580 static void
1581 aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1582 struct aarch64_insn_data *data)
1583 {
1584 struct aarch64_insn_relocation_data *insn_reloc
1585 = (struct aarch64_insn_relocation_data *) data;
1586 int64_t new_offset
1587 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1588
1589 if (can_encode_int32 (new_offset, 28))
1590 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1591 }
1592
1593 /* Implementation of aarch64_insn_visitor method "b_cond". */
1594
1595 static void
1596 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1597 struct aarch64_insn_data *data)
1598 {
1599 struct aarch64_insn_relocation_data *insn_reloc
1600 = (struct aarch64_insn_relocation_data *) data;
1601 int64_t new_offset
1602 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1603
1604 if (can_encode_int32 (new_offset, 21))
1605 {
1606 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1607 new_offset);
1608 }
1609 else if (can_encode_int32 (new_offset, 28))
1610 {
1611 /* The offset is out of range for a conditional branch
1612 instruction but not for a unconditional branch. We can use
1613 the following instructions instead:
1614
1615 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1616 B NOT_TAKEN ; Else jump over TAKEN and continue.
1617 TAKEN:
1618 B #(offset - 8)
1619 NOT_TAKEN:
1620
1621 */
1622
1623 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1624 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1625 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1626 }
1627 }
1628
1629 /* Implementation of aarch64_insn_visitor method "cb". */
1630
1631 static void
1632 aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1633 const unsigned rn, int is64,
1634 struct aarch64_insn_data *data)
1635 {
1636 struct aarch64_insn_relocation_data *insn_reloc
1637 = (struct aarch64_insn_relocation_data *) data;
1638 int64_t new_offset
1639 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1640
1641 if (can_encode_int32 (new_offset, 21))
1642 {
1643 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1644 aarch64_register (rn, is64), new_offset);
1645 }
1646 else if (can_encode_int32 (new_offset, 28))
1647 {
1648 /* The offset is out of range for a compare and branch
1649 instruction but not for a unconditional branch. We can use
1650 the following instructions instead:
1651
1652 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1653 B NOT_TAKEN ; Else jump over TAKEN and continue.
1654 TAKEN:
1655 B #(offset - 8)
1656 NOT_TAKEN:
1657
1658 */
1659 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1660 aarch64_register (rn, is64), 8);
1661 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1662 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1663 }
1664 }
1665
1666 /* Implementation of aarch64_insn_visitor method "tb". */
1667
1668 static void
1669 aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1670 const unsigned rt, unsigned bit,
1671 struct aarch64_insn_data *data)
1672 {
1673 struct aarch64_insn_relocation_data *insn_reloc
1674 = (struct aarch64_insn_relocation_data *) data;
1675 int64_t new_offset
1676 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1677
1678 if (can_encode_int32 (new_offset, 16))
1679 {
1680 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1681 aarch64_register (rt, 1), new_offset);
1682 }
1683 else if (can_encode_int32 (new_offset, 28))
1684 {
1685 /* The offset is out of range for a test bit and branch
1686 instruction but not for a unconditional branch. We can use
1687 the following instructions instead:
1688
1689 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1690 B NOT_TAKEN ; Else jump over TAKEN and continue.
1691 TAKEN:
1692 B #(offset - 8)
1693 NOT_TAKEN:
1694
1695 */
1696 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1697 aarch64_register (rt, 1), 8);
1698 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1699 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1700 new_offset - 8);
1701 }
1702 }
1703
1704 /* Implementation of aarch64_insn_visitor method "adr". */
1705
1706 static void
1707 aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1708 const int is_adrp,
1709 struct aarch64_insn_data *data)
1710 {
1711 struct aarch64_insn_relocation_data *insn_reloc
1712 = (struct aarch64_insn_relocation_data *) data;
1713 /* We know exactly the address the ADR{P,} instruction will compute.
1714 We can just write it to the destination register. */
1715 CORE_ADDR address = data->insn_addr + offset;
1716
1717 if (is_adrp)
1718 {
1719 /* Clear the lower 12 bits of the offset to get the 4K page. */
1720 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1721 aarch64_register (rd, 1),
1722 address & ~0xfff);
1723 }
1724 else
1725 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1726 aarch64_register (rd, 1), address);
1727 }
1728
1729 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1730
1731 static void
1732 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1733 const unsigned rt, const int is64,
1734 struct aarch64_insn_data *data)
1735 {
1736 struct aarch64_insn_relocation_data *insn_reloc
1737 = (struct aarch64_insn_relocation_data *) data;
1738 CORE_ADDR address = data->insn_addr + offset;
1739
1740 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1741 aarch64_register (rt, 1), address);
1742
1743 /* We know exactly what address to load from, and what register we
1744 can use:
1745
1746 MOV xd, #(oldloc + offset)
1747 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1748 ...
1749
1750 LDR xd, [xd] ; or LDRSW xd, [xd]
1751
1752 */
1753
1754 if (is_sw)
1755 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1756 aarch64_register (rt, 1),
1757 aarch64_register (rt, 1),
1758 offset_memory_operand (0));
1759 else
1760 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1761 aarch64_register (rt, is64),
1762 aarch64_register (rt, 1),
1763 offset_memory_operand (0));
1764 }
1765
1766 /* Implementation of aarch64_insn_visitor method "others". */
1767
1768 static void
1769 aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1770 struct aarch64_insn_data *data)
1771 {
1772 struct aarch64_insn_relocation_data *insn_reloc
1773 = (struct aarch64_insn_relocation_data *) data;
1774
1775 /* The instruction is not PC relative. Just re-emit it at the new
1776 location. */
1777 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
1778 }
1779
1780 static const struct aarch64_insn_visitor visitor =
1781 {
1782 aarch64_ftrace_insn_reloc_b,
1783 aarch64_ftrace_insn_reloc_b_cond,
1784 aarch64_ftrace_insn_reloc_cb,
1785 aarch64_ftrace_insn_reloc_tb,
1786 aarch64_ftrace_insn_reloc_adr,
1787 aarch64_ftrace_insn_reloc_ldr_literal,
1788 aarch64_ftrace_insn_reloc_others,
1789 };
1790
1791 /* Implementation of linux_target_ops method
1792 "install_fast_tracepoint_jump_pad". */
1793
1794 static int
1795 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1796 CORE_ADDR tpaddr,
1797 CORE_ADDR collector,
1798 CORE_ADDR lockaddr,
1799 ULONGEST orig_size,
1800 CORE_ADDR *jump_entry,
1801 CORE_ADDR *trampoline,
1802 ULONGEST *trampoline_size,
1803 unsigned char *jjump_pad_insn,
1804 ULONGEST *jjump_pad_insn_size,
1805 CORE_ADDR *adjusted_insn_addr,
1806 CORE_ADDR *adjusted_insn_addr_end,
1807 char *err)
1808 {
1809 uint32_t buf[256];
1810 uint32_t *p = buf;
1811 int64_t offset;
1812 int i;
1813 uint32_t insn;
1814 CORE_ADDR buildaddr = *jump_entry;
1815 struct aarch64_insn_relocation_data insn_data;
1816
1817 /* We need to save the current state on the stack both to restore it
1818 later and to collect register values when the tracepoint is hit.
1819
1820 The saved registers are pushed in a layout that needs to be in sync
1821 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1822 the supply_fast_tracepoint_registers function will fill in the
1823 register cache from a pointer to saved registers on the stack we build
1824 here.
1825
1826 For simplicity, we set the size of each cell on the stack to 16 bytes.
1827 This way one cell can hold any register type, from system registers
1828 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1829 has to be 16 bytes aligned anyway.
1830
1831 Note that the CPSR register does not exist on AArch64. Instead we
1832 can access system bits describing the process state with the
1833 MRS/MSR instructions, namely the condition flags. We save them as
1834 if they are part of a CPSR register because that's how GDB
1835 interprets these system bits. At the moment, only the condition
1836 flags are saved in CPSR (NZCV).
1837
1838 Stack layout, each cell is 16 bytes (descending):
1839
1840 High *-------- SIMD&FP registers from 31 down to 0. --------*
1841 | q31 |
1842 . .
1843 . . 32 cells
1844 . .
1845 | q0 |
1846 *---- General purpose registers from 30 down to 0. ----*
1847 | x30 |
1848 . .
1849 . . 31 cells
1850 . .
1851 | x0 |
1852 *------------- Special purpose registers. -------------*
1853 | SP |
1854 | PC |
1855 | CPSR (NZCV) | 5 cells
1856 | FPSR |
1857 | FPCR | <- SP + 16
1858 *------------- collecting_t object --------------------*
1859 | TPIDR_EL0 | struct tracepoint * |
1860 Low *------------------------------------------------------*
1861
1862 After this stack is set up, we issue a call to the collector, passing
1863 it the saved registers at (SP + 16). */
1864
1865 /* Push SIMD&FP registers on the stack:
1866
1867 SUB sp, sp, #(32 * 16)
1868
1869 STP q30, q31, [sp, #(30 * 16)]
1870 ...
1871 STP q0, q1, [sp]
1872
1873 */
1874 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
1875 for (i = 30; i >= 0; i -= 2)
1876 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
1877
1878 /* Push general puspose registers on the stack. Note that we do not need
1879 to push x31 as it represents the xzr register and not the stack
1880 pointer in a STR instruction.
1881
1882 SUB sp, sp, #(31 * 16)
1883
1884 STR x30, [sp, #(30 * 16)]
1885 ...
1886 STR x0, [sp]
1887
1888 */
1889 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
1890 for (i = 30; i >= 0; i -= 1)
1891 p += emit_str (p, aarch64_register (i, 1), sp,
1892 offset_memory_operand (i * 16));
1893
1894 /* Make space for 5 more cells.
1895
1896 SUB sp, sp, #(5 * 16)
1897
1898 */
1899 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
1900
1901
1902 /* Save SP:
1903
1904 ADD x4, sp, #((32 + 31 + 5) * 16)
1905 STR x4, [sp, #(4 * 16)]
1906
1907 */
1908 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
1909 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
1910
1911 /* Save PC (tracepoint address):
1912
1913 MOV x3, #(tpaddr)
1914 ...
1915
1916 STR x3, [sp, #(3 * 16)]
1917
1918 */
1919
1920 p += emit_mov_addr (p, x3, tpaddr);
1921 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
1922
1923 /* Save CPSR (NZCV), FPSR and FPCR:
1924
1925 MRS x2, nzcv
1926 MRS x1, fpsr
1927 MRS x0, fpcr
1928
1929 STR x2, [sp, #(2 * 16)]
1930 STR x1, [sp, #(1 * 16)]
1931 STR x0, [sp, #(0 * 16)]
1932
1933 */
1934 p += emit_mrs (p, x2, NZCV);
1935 p += emit_mrs (p, x1, FPSR);
1936 p += emit_mrs (p, x0, FPCR);
1937 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
1938 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
1939 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
1940
1941 /* Push the collecting_t object. It consist of the address of the
1942 tracepoint and an ID for the current thread. We get the latter by
1943 reading the tpidr_el0 system register. It corresponds to the
1944 NT_ARM_TLS register accessible with ptrace.
1945
1946 MOV x0, #(tpoint)
1947 ...
1948
1949 MRS x1, tpidr_el0
1950
1951 STP x0, x1, [sp, #-16]!
1952
1953 */
1954
1955 p += emit_mov_addr (p, x0, tpoint);
1956 p += emit_mrs (p, x1, TPIDR_EL0);
1957 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
1958
1959 /* Spin-lock:
1960
1961 The shared memory for the lock is at lockaddr. It will hold zero
1962 if no-one is holding the lock, otherwise it contains the address of
1963 the collecting_t object on the stack of the thread which acquired it.
1964
1965 At this stage, the stack pointer points to this thread's collecting_t
1966 object.
1967
1968 We use the following registers:
1969 - x0: Address of the lock.
1970 - x1: Pointer to collecting_t object.
1971 - x2: Scratch register.
1972
1973 MOV x0, #(lockaddr)
1974 ...
1975 MOV x1, sp
1976
1977 ; Trigger an event local to this core. So the following WFE
1978 ; instruction is ignored.
1979 SEVL
1980 again:
1981 ; Wait for an event. The event is triggered by either the SEVL
1982 ; or STLR instructions (store release).
1983 WFE
1984
1985 ; Atomically read at lockaddr. This marks the memory location as
1986 ; exclusive. This instruction also has memory constraints which
1987 ; make sure all previous data reads and writes are done before
1988 ; executing it.
1989 LDAXR x2, [x0]
1990
1991 ; Try again if another thread holds the lock.
1992 CBNZ x2, again
1993
1994 ; We can lock it! Write the address of the collecting_t object.
1995 ; This instruction will fail if the memory location is not marked
1996 ; as exclusive anymore. If it succeeds, it will remove the
1997 ; exclusive mark on the memory location. This way, if another
1998 ; thread executes this instruction before us, we will fail and try
1999 ; all over again.
2000 STXR w2, x1, [x0]
2001 CBNZ w2, again
2002
2003 */
2004
2005 p += emit_mov_addr (p, x0, lockaddr);
2006 p += emit_mov (p, x1, register_operand (sp));
2007
2008 p += emit_sevl (p);
2009 p += emit_wfe (p);
2010 p += emit_ldaxr (p, x2, x0);
2011 p += emit_cb (p, 1, w2, -2 * 4);
2012 p += emit_stxr (p, w2, x1, x0);
2013 p += emit_cb (p, 1, x2, -4 * 4);
2014
2015 /* Call collector (struct tracepoint *, unsigned char *):
2016
2017 MOV x0, #(tpoint)
2018 ...
2019
2020 ; Saved registers start after the collecting_t object.
2021 ADD x1, sp, #16
2022
2023 ; We use an intra-procedure-call scratch register.
2024 MOV ip0, #(collector)
2025 ...
2026
2027 ; And call back to C!
2028 BLR ip0
2029
2030 */
2031
2032 p += emit_mov_addr (p, x0, tpoint);
2033 p += emit_add (p, x1, sp, immediate_operand (16));
2034
2035 p += emit_mov_addr (p, ip0, collector);
2036 p += emit_blr (p, ip0);
2037
2038 /* Release the lock.
2039
2040 MOV x0, #(lockaddr)
2041 ...
2042
2043 ; This instruction is a normal store with memory ordering
2044 ; constraints. Thanks to this we do not have to put a data
2045 ; barrier instruction to make sure all data read and writes are done
2046 ; before this instruction is executed. Furthermore, this instrucion
2047 ; will trigger an event, letting other threads know they can grab
2048 ; the lock.
2049 STLR xzr, [x0]
2050
2051 */
2052 p += emit_mov_addr (p, x0, lockaddr);
2053 p += emit_stlr (p, xzr, x0);
2054
2055 /* Free collecting_t object:
2056
2057 ADD sp, sp, #16
2058
2059 */
2060 p += emit_add (p, sp, sp, immediate_operand (16));
2061
2062 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2063 registers from the stack.
2064
2065 LDR x2, [sp, #(2 * 16)]
2066 LDR x1, [sp, #(1 * 16)]
2067 LDR x0, [sp, #(0 * 16)]
2068
2069 MSR NZCV, x2
2070 MSR FPSR, x1
2071 MSR FPCR, x0
2072
2073 ADD sp, sp #(5 * 16)
2074
2075 */
2076 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2077 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2078 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2079 p += emit_msr (p, NZCV, x2);
2080 p += emit_msr (p, FPSR, x1);
2081 p += emit_msr (p, FPCR, x0);
2082
2083 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2084
2085 /* Pop general purpose registers:
2086
2087 LDR x0, [sp]
2088 ...
2089 LDR x30, [sp, #(30 * 16)]
2090
2091 ADD sp, sp, #(31 * 16)
2092
2093 */
2094 for (i = 0; i <= 30; i += 1)
2095 p += emit_ldr (p, aarch64_register (i, 1), sp,
2096 offset_memory_operand (i * 16));
2097 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2098
2099 /* Pop SIMD&FP registers:
2100
2101 LDP q0, q1, [sp]
2102 ...
2103 LDP q30, q31, [sp, #(30 * 16)]
2104
2105 ADD sp, sp, #(32 * 16)
2106
2107 */
2108 for (i = 0; i <= 30; i += 2)
2109 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2110 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2111
2112 /* Write the code into the inferior memory. */
2113 append_insns (&buildaddr, p - buf, buf);
2114
2115 /* Now emit the relocated instruction. */
2116 *adjusted_insn_addr = buildaddr;
2117 target_read_uint32 (tpaddr, &insn);
2118
2119 insn_data.base.insn_addr = tpaddr;
2120 insn_data.new_addr = buildaddr;
2121 insn_data.insn_ptr = buf;
2122
2123 aarch64_relocate_instruction (insn, &visitor,
2124 (struct aarch64_insn_data *) &insn_data);
2125
2126 /* We may not have been able to relocate the instruction. */
2127 if (insn_data.insn_ptr == buf)
2128 {
2129 sprintf (err,
2130 "E.Could not relocate instruction from %s to %s.",
2131 core_addr_to_string_nz (tpaddr),
2132 core_addr_to_string_nz (buildaddr));
2133 return 1;
2134 }
2135 else
2136 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
2137 *adjusted_insn_addr_end = buildaddr;
2138
2139 /* Go back to the start of the buffer. */
2140 p = buf;
2141
2142 /* Emit a branch back from the jump pad. */
2143 offset = (tpaddr + orig_size - buildaddr);
2144 if (!can_encode_int32 (offset, 28))
2145 {
2146 sprintf (err,
2147 "E.Jump back from jump pad too far from tracepoint "
2148 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2149 offset);
2150 return 1;
2151 }
2152
2153 p += emit_b (p, 0, offset);
2154 append_insns (&buildaddr, p - buf, buf);
2155
2156 /* Give the caller a branch instruction into the jump pad. */
2157 offset = (*jump_entry - tpaddr);
2158 if (!can_encode_int32 (offset, 28))
2159 {
2160 sprintf (err,
2161 "E.Jump pad too far from tracepoint "
2162 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2163 offset);
2164 return 1;
2165 }
2166
2167 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2168 *jjump_pad_insn_size = 4;
2169
2170 /* Return the end address of our pad. */
2171 *jump_entry = buildaddr;
2172
2173 return 0;
2174 }
2175
2176 /* Helper function writing LEN instructions from START into
2177 current_insn_ptr. */
2178
2179 static void
2180 emit_ops_insns (const uint32_t *start, int len)
2181 {
2182 CORE_ADDR buildaddr = current_insn_ptr;
2183
2184 if (debug_threads)
2185 debug_printf ("Adding %d instrucions at %s\n",
2186 len, paddress (buildaddr));
2187
2188 append_insns (&buildaddr, len, start);
2189 current_insn_ptr = buildaddr;
2190 }
2191
2192 /* Pop a register from the stack. */
2193
2194 static int
2195 emit_pop (uint32_t *buf, struct aarch64_register rt)
2196 {
2197 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2198 }
2199
2200 /* Push a register on the stack. */
2201
2202 static int
2203 emit_push (uint32_t *buf, struct aarch64_register rt)
2204 {
2205 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2206 }
2207
2208 /* Implementation of emit_ops method "emit_prologue". */
2209
2210 static void
2211 aarch64_emit_prologue (void)
2212 {
2213 uint32_t buf[16];
2214 uint32_t *p = buf;
2215
2216 /* This function emit a prologue for the following function prototype:
2217
2218 enum eval_result_type f (unsigned char *regs,
2219 ULONGEST *value);
2220
2221 The first argument is a buffer of raw registers. The second
2222 argument is the result of
2223 evaluating the expression, which will be set to whatever is on top of
2224 the stack at the end.
2225
2226 The stack set up by the prologue is as such:
2227
2228 High *------------------------------------------------------*
2229 | LR |
2230 | FP | <- FP
2231 | x1 (ULONGEST *value) |
2232 | x0 (unsigned char *regs) |
2233 Low *------------------------------------------------------*
2234
2235 As we are implementing a stack machine, each opcode can expand the
2236 stack so we never know how far we are from the data saved by this
2237 prologue. In order to be able refer to value and regs later, we save
2238 the current stack pointer in the frame pointer. This way, it is not
2239 clobbered when calling C functions.
2240
2241 Finally, throughtout every operation, we are using register x0 as the
2242 top of the stack, and x1 as a scratch register. */
2243
2244 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2245 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2246 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2247
2248 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2249
2250
2251 emit_ops_insns (buf, p - buf);
2252 }
2253
2254 /* Implementation of emit_ops method "emit_epilogue". */
2255
2256 static void
2257 aarch64_emit_epilogue (void)
2258 {
2259 uint32_t buf[16];
2260 uint32_t *p = buf;
2261
2262 /* Store the result of the expression (x0) in *value. */
2263 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2264 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2265 p += emit_str (p, x0, x1, offset_memory_operand (0));
2266
2267 /* Restore the previous state. */
2268 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2269 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2270
2271 /* Return expr_eval_no_error. */
2272 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2273 p += emit_ret (p, lr);
2274
2275 emit_ops_insns (buf, p - buf);
2276 }
2277
2278 /* Implementation of emit_ops method "emit_add". */
2279
2280 static void
2281 aarch64_emit_add (void)
2282 {
2283 uint32_t buf[16];
2284 uint32_t *p = buf;
2285
2286 p += emit_pop (p, x1);
2287 p += emit_add (p, x0, x1, register_operand (x0));
2288
2289 emit_ops_insns (buf, p - buf);
2290 }
2291
2292 /* Implementation of emit_ops method "emit_sub". */
2293
2294 static void
2295 aarch64_emit_sub (void)
2296 {
2297 uint32_t buf[16];
2298 uint32_t *p = buf;
2299
2300 p += emit_pop (p, x1);
2301 p += emit_sub (p, x0, x1, register_operand (x0));
2302
2303 emit_ops_insns (buf, p - buf);
2304 }
2305
2306 /* Implementation of emit_ops method "emit_mul". */
2307
2308 static void
2309 aarch64_emit_mul (void)
2310 {
2311 uint32_t buf[16];
2312 uint32_t *p = buf;
2313
2314 p += emit_pop (p, x1);
2315 p += emit_mul (p, x0, x1, x0);
2316
2317 emit_ops_insns (buf, p - buf);
2318 }
2319
2320 /* Implementation of emit_ops method "emit_lsh". */
2321
2322 static void
2323 aarch64_emit_lsh (void)
2324 {
2325 uint32_t buf[16];
2326 uint32_t *p = buf;
2327
2328 p += emit_pop (p, x1);
2329 p += emit_lslv (p, x0, x1, x0);
2330
2331 emit_ops_insns (buf, p - buf);
2332 }
2333
2334 /* Implementation of emit_ops method "emit_rsh_signed". */
2335
2336 static void
2337 aarch64_emit_rsh_signed (void)
2338 {
2339 uint32_t buf[16];
2340 uint32_t *p = buf;
2341
2342 p += emit_pop (p, x1);
2343 p += emit_asrv (p, x0, x1, x0);
2344
2345 emit_ops_insns (buf, p - buf);
2346 }
2347
2348 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2349
2350 static void
2351 aarch64_emit_rsh_unsigned (void)
2352 {
2353 uint32_t buf[16];
2354 uint32_t *p = buf;
2355
2356 p += emit_pop (p, x1);
2357 p += emit_lsrv (p, x0, x1, x0);
2358
2359 emit_ops_insns (buf, p - buf);
2360 }
2361
2362 /* Implementation of emit_ops method "emit_ext". */
2363
2364 static void
2365 aarch64_emit_ext (int arg)
2366 {
2367 uint32_t buf[16];
2368 uint32_t *p = buf;
2369
2370 p += emit_sbfx (p, x0, x0, 0, arg);
2371
2372 emit_ops_insns (buf, p - buf);
2373 }
2374
2375 /* Implementation of emit_ops method "emit_log_not". */
2376
2377 static void
2378 aarch64_emit_log_not (void)
2379 {
2380 uint32_t buf[16];
2381 uint32_t *p = buf;
2382
2383 /* If the top of the stack is 0, replace it with 1. Else replace it with
2384 0. */
2385
2386 p += emit_cmp (p, x0, immediate_operand (0));
2387 p += emit_cset (p, x0, EQ);
2388
2389 emit_ops_insns (buf, p - buf);
2390 }
2391
2392 /* Implementation of emit_ops method "emit_bit_and". */
2393
2394 static void
2395 aarch64_emit_bit_and (void)
2396 {
2397 uint32_t buf[16];
2398 uint32_t *p = buf;
2399
2400 p += emit_pop (p, x1);
2401 p += emit_and (p, x0, x0, x1);
2402
2403 emit_ops_insns (buf, p - buf);
2404 }
2405
2406 /* Implementation of emit_ops method "emit_bit_or". */
2407
2408 static void
2409 aarch64_emit_bit_or (void)
2410 {
2411 uint32_t buf[16];
2412 uint32_t *p = buf;
2413
2414 p += emit_pop (p, x1);
2415 p += emit_orr (p, x0, x0, x1);
2416
2417 emit_ops_insns (buf, p - buf);
2418 }
2419
2420 /* Implementation of emit_ops method "emit_bit_xor". */
2421
2422 static void
2423 aarch64_emit_bit_xor (void)
2424 {
2425 uint32_t buf[16];
2426 uint32_t *p = buf;
2427
2428 p += emit_pop (p, x1);
2429 p += emit_eor (p, x0, x0, x1);
2430
2431 emit_ops_insns (buf, p - buf);
2432 }
2433
2434 /* Implementation of emit_ops method "emit_bit_not". */
2435
2436 static void
2437 aarch64_emit_bit_not (void)
2438 {
2439 uint32_t buf[16];
2440 uint32_t *p = buf;
2441
2442 p += emit_mvn (p, x0, x0);
2443
2444 emit_ops_insns (buf, p - buf);
2445 }
2446
2447 /* Implementation of emit_ops method "emit_equal". */
2448
2449 static void
2450 aarch64_emit_equal (void)
2451 {
2452 uint32_t buf[16];
2453 uint32_t *p = buf;
2454
2455 p += emit_pop (p, x1);
2456 p += emit_cmp (p, x0, register_operand (x1));
2457 p += emit_cset (p, x0, EQ);
2458
2459 emit_ops_insns (buf, p - buf);
2460 }
2461
2462 /* Implementation of emit_ops method "emit_less_signed". */
2463
2464 static void
2465 aarch64_emit_less_signed (void)
2466 {
2467 uint32_t buf[16];
2468 uint32_t *p = buf;
2469
2470 p += emit_pop (p, x1);
2471 p += emit_cmp (p, x1, register_operand (x0));
2472 p += emit_cset (p, x0, LT);
2473
2474 emit_ops_insns (buf, p - buf);
2475 }
2476
2477 /* Implementation of emit_ops method "emit_less_unsigned". */
2478
2479 static void
2480 aarch64_emit_less_unsigned (void)
2481 {
2482 uint32_t buf[16];
2483 uint32_t *p = buf;
2484
2485 p += emit_pop (p, x1);
2486 p += emit_cmp (p, x1, register_operand (x0));
2487 p += emit_cset (p, x0, LO);
2488
2489 emit_ops_insns (buf, p - buf);
2490 }
2491
2492 /* Implementation of emit_ops method "emit_ref". */
2493
2494 static void
2495 aarch64_emit_ref (int size)
2496 {
2497 uint32_t buf[16];
2498 uint32_t *p = buf;
2499
2500 switch (size)
2501 {
2502 case 1:
2503 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2504 break;
2505 case 2:
2506 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2507 break;
2508 case 4:
2509 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2510 break;
2511 case 8:
2512 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2513 break;
2514 default:
2515 /* Unknown size, bail on compilation. */
2516 emit_error = 1;
2517 break;
2518 }
2519
2520 emit_ops_insns (buf, p - buf);
2521 }
2522
2523 /* Implementation of emit_ops method "emit_if_goto". */
2524
2525 static void
2526 aarch64_emit_if_goto (int *offset_p, int *size_p)
2527 {
2528 uint32_t buf[16];
2529 uint32_t *p = buf;
2530
2531 /* The Z flag is set or cleared here. */
2532 p += emit_cmp (p, x0, immediate_operand (0));
2533 /* This instruction must not change the Z flag. */
2534 p += emit_pop (p, x0);
2535 /* Branch over the next instruction if x0 == 0. */
2536 p += emit_bcond (p, EQ, 8);
2537
2538 /* The NOP instruction will be patched with an unconditional branch. */
2539 if (offset_p)
2540 *offset_p = (p - buf) * 4;
2541 if (size_p)
2542 *size_p = 4;
2543 p += emit_nop (p);
2544
2545 emit_ops_insns (buf, p - buf);
2546 }
2547
2548 /* Implementation of emit_ops method "emit_goto". */
2549
2550 static void
2551 aarch64_emit_goto (int *offset_p, int *size_p)
2552 {
2553 uint32_t buf[16];
2554 uint32_t *p = buf;
2555
2556 /* The NOP instruction will be patched with an unconditional branch. */
2557 if (offset_p)
2558 *offset_p = 0;
2559 if (size_p)
2560 *size_p = 4;
2561 p += emit_nop (p);
2562
2563 emit_ops_insns (buf, p - buf);
2564 }
2565
2566 /* Implementation of emit_ops method "write_goto_address". */
2567
2568 void
2569 aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2570 {
2571 uint32_t insn;
2572
2573 emit_b (&insn, 0, to - from);
2574 append_insns (&from, 1, &insn);
2575 }
2576
2577 /* Implementation of emit_ops method "emit_const". */
2578
2579 static void
2580 aarch64_emit_const (LONGEST num)
2581 {
2582 uint32_t buf[16];
2583 uint32_t *p = buf;
2584
2585 p += emit_mov_addr (p, x0, num);
2586
2587 emit_ops_insns (buf, p - buf);
2588 }
2589
2590 /* Implementation of emit_ops method "emit_call". */
2591
2592 static void
2593 aarch64_emit_call (CORE_ADDR fn)
2594 {
2595 uint32_t buf[16];
2596 uint32_t *p = buf;
2597
2598 p += emit_mov_addr (p, ip0, fn);
2599 p += emit_blr (p, ip0);
2600
2601 emit_ops_insns (buf, p - buf);
2602 }
2603
2604 /* Implementation of emit_ops method "emit_reg". */
2605
2606 static void
2607 aarch64_emit_reg (int reg)
2608 {
2609 uint32_t buf[16];
2610 uint32_t *p = buf;
2611
2612 /* Set x0 to unsigned char *regs. */
2613 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2614 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2615 p += emit_mov (p, x1, immediate_operand (reg));
2616
2617 emit_ops_insns (buf, p - buf);
2618
2619 aarch64_emit_call (get_raw_reg_func_addr ());
2620 }
2621
2622 /* Implementation of emit_ops method "emit_pop". */
2623
2624 static void
2625 aarch64_emit_pop (void)
2626 {
2627 uint32_t buf[16];
2628 uint32_t *p = buf;
2629
2630 p += emit_pop (p, x0);
2631
2632 emit_ops_insns (buf, p - buf);
2633 }
2634
2635 /* Implementation of emit_ops method "emit_stack_flush". */
2636
2637 static void
2638 aarch64_emit_stack_flush (void)
2639 {
2640 uint32_t buf[16];
2641 uint32_t *p = buf;
2642
2643 p += emit_push (p, x0);
2644
2645 emit_ops_insns (buf, p - buf);
2646 }
2647
2648 /* Implementation of emit_ops method "emit_zero_ext". */
2649
2650 static void
2651 aarch64_emit_zero_ext (int arg)
2652 {
2653 uint32_t buf[16];
2654 uint32_t *p = buf;
2655
2656 p += emit_ubfx (p, x0, x0, 0, arg);
2657
2658 emit_ops_insns (buf, p - buf);
2659 }
2660
2661 /* Implementation of emit_ops method "emit_swap". */
2662
2663 static void
2664 aarch64_emit_swap (void)
2665 {
2666 uint32_t buf[16];
2667 uint32_t *p = buf;
2668
2669 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2670 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2671 p += emit_mov (p, x0, register_operand (x1));
2672
2673 emit_ops_insns (buf, p - buf);
2674 }
2675
2676 /* Implementation of emit_ops method "emit_stack_adjust". */
2677
2678 static void
2679 aarch64_emit_stack_adjust (int n)
2680 {
2681 /* This is not needed with our design. */
2682 uint32_t buf[16];
2683 uint32_t *p = buf;
2684
2685 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2686
2687 emit_ops_insns (buf, p - buf);
2688 }
2689
2690 /* Implementation of emit_ops method "emit_int_call_1". */
2691
2692 static void
2693 aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2694 {
2695 uint32_t buf[16];
2696 uint32_t *p = buf;
2697
2698 p += emit_mov (p, x0, immediate_operand (arg1));
2699
2700 emit_ops_insns (buf, p - buf);
2701
2702 aarch64_emit_call (fn);
2703 }
2704
2705 /* Implementation of emit_ops method "emit_void_call_2". */
2706
2707 static void
2708 aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2709 {
2710 uint32_t buf[16];
2711 uint32_t *p = buf;
2712
2713 /* Push x0 on the stack. */
2714 aarch64_emit_stack_flush ();
2715
2716 /* Setup arguments for the function call:
2717
2718 x0: arg1
2719 x1: top of the stack
2720
2721 MOV x1, x0
2722 MOV x0, #arg1 */
2723
2724 p += emit_mov (p, x1, register_operand (x0));
2725 p += emit_mov (p, x0, immediate_operand (arg1));
2726
2727 emit_ops_insns (buf, p - buf);
2728
2729 aarch64_emit_call (fn);
2730
2731 /* Restore x0. */
2732 aarch64_emit_pop ();
2733 }
2734
2735 /* Implementation of emit_ops method "emit_eq_goto". */
2736
2737 static void
2738 aarch64_emit_eq_goto (int *offset_p, int *size_p)
2739 {
2740 uint32_t buf[16];
2741 uint32_t *p = buf;
2742
2743 p += emit_pop (p, x1);
2744 p += emit_cmp (p, x1, register_operand (x0));
2745 /* Branch over the next instruction if x0 != x1. */
2746 p += emit_bcond (p, NE, 8);
2747 /* The NOP instruction will be patched with an unconditional branch. */
2748 if (offset_p)
2749 *offset_p = (p - buf) * 4;
2750 if (size_p)
2751 *size_p = 4;
2752 p += emit_nop (p);
2753
2754 emit_ops_insns (buf, p - buf);
2755 }
2756
2757 /* Implementation of emit_ops method "emit_ne_goto". */
2758
2759 static void
2760 aarch64_emit_ne_goto (int *offset_p, int *size_p)
2761 {
2762 uint32_t buf[16];
2763 uint32_t *p = buf;
2764
2765 p += emit_pop (p, x1);
2766 p += emit_cmp (p, x1, register_operand (x0));
2767 /* Branch over the next instruction if x0 == x1. */
2768 p += emit_bcond (p, EQ, 8);
2769 /* The NOP instruction will be patched with an unconditional branch. */
2770 if (offset_p)
2771 *offset_p = (p - buf) * 4;
2772 if (size_p)
2773 *size_p = 4;
2774 p += emit_nop (p);
2775
2776 emit_ops_insns (buf, p - buf);
2777 }
2778
2779 /* Implementation of emit_ops method "emit_lt_goto". */
2780
2781 static void
2782 aarch64_emit_lt_goto (int *offset_p, int *size_p)
2783 {
2784 uint32_t buf[16];
2785 uint32_t *p = buf;
2786
2787 p += emit_pop (p, x1);
2788 p += emit_cmp (p, x1, register_operand (x0));
2789 /* Branch over the next instruction if x0 >= x1. */
2790 p += emit_bcond (p, GE, 8);
2791 /* The NOP instruction will be patched with an unconditional branch. */
2792 if (offset_p)
2793 *offset_p = (p - buf) * 4;
2794 if (size_p)
2795 *size_p = 4;
2796 p += emit_nop (p);
2797
2798 emit_ops_insns (buf, p - buf);
2799 }
2800
2801 /* Implementation of emit_ops method "emit_le_goto". */
2802
2803 static void
2804 aarch64_emit_le_goto (int *offset_p, int *size_p)
2805 {
2806 uint32_t buf[16];
2807 uint32_t *p = buf;
2808
2809 p += emit_pop (p, x1);
2810 p += emit_cmp (p, x1, register_operand (x0));
2811 /* Branch over the next instruction if x0 > x1. */
2812 p += emit_bcond (p, GT, 8);
2813 /* The NOP instruction will be patched with an unconditional branch. */
2814 if (offset_p)
2815 *offset_p = (p - buf) * 4;
2816 if (size_p)
2817 *size_p = 4;
2818 p += emit_nop (p);
2819
2820 emit_ops_insns (buf, p - buf);
2821 }
2822
2823 /* Implementation of emit_ops method "emit_gt_goto". */
2824
2825 static void
2826 aarch64_emit_gt_goto (int *offset_p, int *size_p)
2827 {
2828 uint32_t buf[16];
2829 uint32_t *p = buf;
2830
2831 p += emit_pop (p, x1);
2832 p += emit_cmp (p, x1, register_operand (x0));
2833 /* Branch over the next instruction if x0 <= x1. */
2834 p += emit_bcond (p, LE, 8);
2835 /* The NOP instruction will be patched with an unconditional branch. */
2836 if (offset_p)
2837 *offset_p = (p - buf) * 4;
2838 if (size_p)
2839 *size_p = 4;
2840 p += emit_nop (p);
2841
2842 emit_ops_insns (buf, p - buf);
2843 }
2844
2845 /* Implementation of emit_ops method "emit_ge_got". */
2846
2847 static void
2848 aarch64_emit_ge_got (int *offset_p, int *size_p)
2849 {
2850 uint32_t buf[16];
2851 uint32_t *p = buf;
2852
2853 p += emit_pop (p, x1);
2854 p += emit_cmp (p, x1, register_operand (x0));
2855 /* Branch over the next instruction if x0 <= x1. */
2856 p += emit_bcond (p, LT, 8);
2857 /* The NOP instruction will be patched with an unconditional branch. */
2858 if (offset_p)
2859 *offset_p = (p - buf) * 4;
2860 if (size_p)
2861 *size_p = 4;
2862 p += emit_nop (p);
2863
2864 emit_ops_insns (buf, p - buf);
2865 }
2866
2867 static struct emit_ops aarch64_emit_ops_impl =
2868 {
2869 aarch64_emit_prologue,
2870 aarch64_emit_epilogue,
2871 aarch64_emit_add,
2872 aarch64_emit_sub,
2873 aarch64_emit_mul,
2874 aarch64_emit_lsh,
2875 aarch64_emit_rsh_signed,
2876 aarch64_emit_rsh_unsigned,
2877 aarch64_emit_ext,
2878 aarch64_emit_log_not,
2879 aarch64_emit_bit_and,
2880 aarch64_emit_bit_or,
2881 aarch64_emit_bit_xor,
2882 aarch64_emit_bit_not,
2883 aarch64_emit_equal,
2884 aarch64_emit_less_signed,
2885 aarch64_emit_less_unsigned,
2886 aarch64_emit_ref,
2887 aarch64_emit_if_goto,
2888 aarch64_emit_goto,
2889 aarch64_write_goto_address,
2890 aarch64_emit_const,
2891 aarch64_emit_call,
2892 aarch64_emit_reg,
2893 aarch64_emit_pop,
2894 aarch64_emit_stack_flush,
2895 aarch64_emit_zero_ext,
2896 aarch64_emit_swap,
2897 aarch64_emit_stack_adjust,
2898 aarch64_emit_int_call_1,
2899 aarch64_emit_void_call_2,
2900 aarch64_emit_eq_goto,
2901 aarch64_emit_ne_goto,
2902 aarch64_emit_lt_goto,
2903 aarch64_emit_le_goto,
2904 aarch64_emit_gt_goto,
2905 aarch64_emit_ge_got,
2906 };
2907
2908 /* Implementation of linux_target_ops method "emit_ops". */
2909
2910 static struct emit_ops *
2911 aarch64_emit_ops (void)
2912 {
2913 return &aarch64_emit_ops_impl;
2914 }
2915
2916 /* Implementation of linux_target_ops method
2917 "get_min_fast_tracepoint_insn_len". */
2918
2919 static int
2920 aarch64_get_min_fast_tracepoint_insn_len (void)
2921 {
2922 return 4;
2923 }
2924
2925 /* Implementation of linux_target_ops method "supports_range_stepping". */
2926
2927 static int
2928 aarch64_supports_range_stepping (void)
2929 {
2930 return 1;
2931 }
2932
2933 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2934
2935 static const gdb_byte *
2936 aarch64_sw_breakpoint_from_kind (int kind, int *size)
2937 {
2938 if (is_64bit_tdesc ())
2939 {
2940 *size = aarch64_breakpoint_len;
2941 return aarch64_breakpoint;
2942 }
2943 else
2944 return arm_sw_breakpoint_from_kind (kind, size);
2945 }
2946
2947 /* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
2948
2949 static int
2950 aarch64_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
2951 {
2952 if (is_64bit_tdesc ())
2953 return aarch64_breakpoint_len;
2954 else
2955 return arm_breakpoint_kind_from_pc (pcptr);
2956 }
2957
2958 /* Implementation of the linux_target_ops method
2959 "breakpoint_kind_from_current_state". */
2960
2961 static int
2962 aarch64_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
2963 {
2964 if (is_64bit_tdesc ())
2965 return aarch64_breakpoint_len;
2966 else
2967 return arm_breakpoint_kind_from_current_state (pcptr);
2968 }
2969
2970 /* Support for hardware single step. */
2971
2972 static int
2973 aarch64_supports_hardware_single_step (void)
2974 {
2975 return 1;
2976 }
2977
2978 struct linux_target_ops the_low_target =
2979 {
2980 aarch64_arch_setup,
2981 aarch64_regs_info,
2982 aarch64_cannot_fetch_register,
2983 aarch64_cannot_store_register,
2984 NULL, /* fetch_register */
2985 aarch64_get_pc,
2986 aarch64_set_pc,
2987 aarch64_breakpoint_kind_from_pc,
2988 aarch64_sw_breakpoint_from_kind,
2989 NULL, /* get_next_pcs */
2990 0, /* decr_pc_after_break */
2991 aarch64_breakpoint_at,
2992 aarch64_supports_z_point_type,
2993 aarch64_insert_point,
2994 aarch64_remove_point,
2995 aarch64_stopped_by_watchpoint,
2996 aarch64_stopped_data_address,
2997 NULL, /* collect_ptrace_register */
2998 NULL, /* supply_ptrace_register */
2999 aarch64_linux_siginfo_fixup,
3000 aarch64_linux_new_process,
3001 aarch64_linux_delete_process,
3002 aarch64_linux_new_thread,
3003 aarch64_linux_delete_thread,
3004 aarch64_linux_new_fork,
3005 aarch64_linux_prepare_to_resume,
3006 NULL, /* process_qsupported */
3007 aarch64_supports_tracepoints,
3008 aarch64_get_thread_area,
3009 aarch64_install_fast_tracepoint_jump_pad,
3010 aarch64_emit_ops,
3011 aarch64_get_min_fast_tracepoint_insn_len,
3012 aarch64_supports_range_stepping,
3013 aarch64_breakpoint_kind_from_current_state,
3014 aarch64_supports_hardware_single_step,
3015 aarch64_get_syscall_trapinfo,
3016 };
3017
3018 void
3019 initialize_low_arch (void)
3020 {
3021 init_registers_aarch64 ();
3022
3023 initialize_low_arch_aarch32 ();
3024
3025 initialize_regsets_info (&aarch64_regsets_info);
3026 }
This page took 0.09455 seconds and 4 git commands to generate.