gdbserver/linux-low: turn 'breakpoint_kind_from_{pc, current_state}' into methods
[deliverable/binutils-gdb.git] / gdbserver / linux-aarch64-low.cc
1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "server.h"
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
29 #include "ax.h"
30 #include "tracepoint.h"
31 #include "debug.h"
32
33 #include <signal.h>
34 #include <sys/user.h>
35 #include "nat/gdb_ptrace.h"
36 #include <asm/ptrace.h>
37 #include <inttypes.h>
38 #include <endian.h>
39 #include <sys/uio.h>
40
41 #include "gdb_proc_service.h"
42 #include "arch/aarch64.h"
43 #include "linux-aarch32-tdesc.h"
44 #include "linux-aarch64-tdesc.h"
45 #include "nat/aarch64-sve-linux-ptrace.h"
46 #include "tdesc.h"
47
48 #ifdef HAVE_SYS_REG_H
49 #include <sys/reg.h>
50 #endif
51
52 /* Linux target op definitions for the AArch64 architecture. */
53
54 class aarch64_target : public linux_process_target
55 {
56 public:
57
58 const regs_info *get_regs_info () override;
59
60 int breakpoint_kind_from_pc (CORE_ADDR *pcptr) override;
61
62 int breakpoint_kind_from_current_state (CORE_ADDR *pcptr) override;
63
64 protected:
65
66 void low_arch_setup () override;
67
68 bool low_cannot_fetch_register (int regno) override;
69
70 bool low_cannot_store_register (int regno) override;
71
72 bool low_supports_breakpoints () override;
73
74 CORE_ADDR low_get_pc (regcache *regcache) override;
75
76 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
77 };
78
79 /* The singleton target ops object. */
80
81 static aarch64_target the_aarch64_target;
82
83 bool
84 aarch64_target::low_cannot_fetch_register (int regno)
85 {
86 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
87 "is not implemented by the target");
88 }
89
90 bool
91 aarch64_target::low_cannot_store_register (int regno)
92 {
93 gdb_assert_not_reached ("linux target op low_cannot_store_register "
94 "is not implemented by the target");
95 }
96
97 /* Per-process arch-specific data we want to keep. */
98
99 struct arch_process_info
100 {
101 /* Hardware breakpoint/watchpoint data.
102 The reason for them to be per-process rather than per-thread is
103 due to the lack of information in the gdbserver environment;
104 gdbserver is not told that whether a requested hardware
105 breakpoint/watchpoint is thread specific or not, so it has to set
106 each hw bp/wp for every thread in the current process. The
107 higher level bp/wp management in gdb will resume a thread if a hw
108 bp/wp trap is not expected for it. Since the hw bp/wp setting is
109 same for each thread, it is reasonable for the data to live here.
110 */
111 struct aarch64_debug_reg_state debug_reg_state;
112 };
113
114 /* Return true if the size of register 0 is 8 byte. */
115
116 static int
117 is_64bit_tdesc (void)
118 {
119 struct regcache *regcache = get_thread_regcache (current_thread, 0);
120
121 return register_size (regcache->tdesc, 0) == 8;
122 }
123
124 /* Return true if the regcache contains the number of SVE registers. */
125
126 static bool
127 is_sve_tdesc (void)
128 {
129 struct regcache *regcache = get_thread_regcache (current_thread, 0);
130
131 return tdesc_contains_feature (regcache->tdesc, "org.gnu.gdb.aarch64.sve");
132 }
133
134 static void
135 aarch64_fill_gregset (struct regcache *regcache, void *buf)
136 {
137 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
138 int i;
139
140 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
141 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
142 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
143 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
144 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
145 }
146
147 static void
148 aarch64_store_gregset (struct regcache *regcache, const void *buf)
149 {
150 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
151 int i;
152
153 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
154 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
155 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
156 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
157 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
158 }
159
160 static void
161 aarch64_fill_fpregset (struct regcache *regcache, void *buf)
162 {
163 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
164 int i;
165
166 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
167 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
168 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
169 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
170 }
171
172 static void
173 aarch64_store_fpregset (struct regcache *regcache, const void *buf)
174 {
175 const struct user_fpsimd_state *regset
176 = (const struct user_fpsimd_state *) buf;
177 int i;
178
179 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
180 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
181 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
182 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
183 }
184
185 /* Store the pauth registers to regcache. */
186
187 static void
188 aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
189 {
190 uint64_t *pauth_regset = (uint64_t *) buf;
191 int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
192
193 if (pauth_base == 0)
194 return;
195
196 supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
197 &pauth_regset[0]);
198 supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
199 &pauth_regset[1]);
200 }
201
202 bool
203 aarch64_target::low_supports_breakpoints ()
204 {
205 return true;
206 }
207
208 /* Implementation of linux target ops method "low_get_pc". */
209
210 CORE_ADDR
211 aarch64_target::low_get_pc (regcache *regcache)
212 {
213 if (register_size (regcache->tdesc, 0) == 8)
214 return linux_get_pc_64bit (regcache);
215 else
216 return linux_get_pc_32bit (regcache);
217 }
218
219 /* Implementation of linux target ops method "low_set_pc". */
220
221 void
222 aarch64_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
223 {
224 if (register_size (regcache->tdesc, 0) == 8)
225 linux_set_pc_64bit (regcache, pc);
226 else
227 linux_set_pc_32bit (regcache, pc);
228 }
229
230 #define aarch64_breakpoint_len 4
231
232 /* AArch64 BRK software debug mode instruction.
233 This instruction needs to match gdb/aarch64-tdep.c
234 (aarch64_default_breakpoint). */
235 static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
236
237 /* Implementation of linux_target_ops method "breakpoint_at". */
238
239 static int
240 aarch64_breakpoint_at (CORE_ADDR where)
241 {
242 if (is_64bit_tdesc ())
243 {
244 gdb_byte insn[aarch64_breakpoint_len];
245
246 the_target->read_memory (where, (unsigned char *) &insn,
247 aarch64_breakpoint_len);
248 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
249 return 1;
250
251 return 0;
252 }
253 else
254 return arm_breakpoint_at (where);
255 }
256
257 static void
258 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
259 {
260 int i;
261
262 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
263 {
264 state->dr_addr_bp[i] = 0;
265 state->dr_ctrl_bp[i] = 0;
266 state->dr_ref_count_bp[i] = 0;
267 }
268
269 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
270 {
271 state->dr_addr_wp[i] = 0;
272 state->dr_ctrl_wp[i] = 0;
273 state->dr_ref_count_wp[i] = 0;
274 }
275 }
276
277 /* Return the pointer to the debug register state structure in the
278 current process' arch-specific data area. */
279
280 struct aarch64_debug_reg_state *
281 aarch64_get_debug_reg_state (pid_t pid)
282 {
283 struct process_info *proc = find_process_pid (pid);
284
285 return &proc->priv->arch_private->debug_reg_state;
286 }
287
288 /* Implementation of linux_target_ops method "supports_z_point_type". */
289
290 static int
291 aarch64_supports_z_point_type (char z_type)
292 {
293 switch (z_type)
294 {
295 case Z_PACKET_SW_BP:
296 case Z_PACKET_HW_BP:
297 case Z_PACKET_WRITE_WP:
298 case Z_PACKET_READ_WP:
299 case Z_PACKET_ACCESS_WP:
300 return 1;
301 default:
302 return 0;
303 }
304 }
305
306 /* Implementation of linux_target_ops method "insert_point".
307
308 It actually only records the info of the to-be-inserted bp/wp;
309 the actual insertion will happen when threads are resumed. */
310
311 static int
312 aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
313 int len, struct raw_breakpoint *bp)
314 {
315 int ret;
316 enum target_hw_bp_type targ_type;
317 struct aarch64_debug_reg_state *state
318 = aarch64_get_debug_reg_state (pid_of (current_thread));
319
320 if (show_debug_regs)
321 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
322 (unsigned long) addr, len);
323
324 /* Determine the type from the raw breakpoint type. */
325 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
326
327 if (targ_type != hw_execute)
328 {
329 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
330 ret = aarch64_handle_watchpoint (targ_type, addr, len,
331 1 /* is_insert */, state);
332 else
333 ret = -1;
334 }
335 else
336 {
337 if (len == 3)
338 {
339 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
340 instruction. Set it to 2 to correctly encode length bit
341 mask in hardware/watchpoint control register. */
342 len = 2;
343 }
344 ret = aarch64_handle_breakpoint (targ_type, addr, len,
345 1 /* is_insert */, state);
346 }
347
348 if (show_debug_regs)
349 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
350 targ_type);
351
352 return ret;
353 }
354
355 /* Implementation of linux_target_ops method "remove_point".
356
357 It actually only records the info of the to-be-removed bp/wp,
358 the actual removal will be done when threads are resumed. */
359
360 static int
361 aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
362 int len, struct raw_breakpoint *bp)
363 {
364 int ret;
365 enum target_hw_bp_type targ_type;
366 struct aarch64_debug_reg_state *state
367 = aarch64_get_debug_reg_state (pid_of (current_thread));
368
369 if (show_debug_regs)
370 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
371 (unsigned long) addr, len);
372
373 /* Determine the type from the raw breakpoint type. */
374 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
375
376 /* Set up state pointers. */
377 if (targ_type != hw_execute)
378 ret =
379 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
380 state);
381 else
382 {
383 if (len == 3)
384 {
385 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
386 instruction. Set it to 2 to correctly encode length bit
387 mask in hardware/watchpoint control register. */
388 len = 2;
389 }
390 ret = aarch64_handle_breakpoint (targ_type, addr, len,
391 0 /* is_insert */, state);
392 }
393
394 if (show_debug_regs)
395 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
396 targ_type);
397
398 return ret;
399 }
400
401 /* Implementation of linux_target_ops method "stopped_data_address". */
402
403 static CORE_ADDR
404 aarch64_stopped_data_address (void)
405 {
406 siginfo_t siginfo;
407 int pid, i;
408 struct aarch64_debug_reg_state *state;
409
410 pid = lwpid_of (current_thread);
411
412 /* Get the siginfo. */
413 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
414 return (CORE_ADDR) 0;
415
416 /* Need to be a hardware breakpoint/watchpoint trap. */
417 if (siginfo.si_signo != SIGTRAP
418 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
419 return (CORE_ADDR) 0;
420
421 /* Check if the address matches any watched address. */
422 state = aarch64_get_debug_reg_state (pid_of (current_thread));
423 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
424 {
425 const unsigned int offset
426 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
427 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
428 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
429 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
430 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
431 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
432
433 if (state->dr_ref_count_wp[i]
434 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
435 && addr_trap >= addr_watch_aligned
436 && addr_trap < addr_watch + len)
437 {
438 /* ADDR_TRAP reports the first address of the memory range
439 accessed by the CPU, regardless of what was the memory
440 range watched. Thus, a large CPU access that straddles
441 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
442 ADDR_TRAP that is lower than the
443 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
444
445 addr: | 4 | 5 | 6 | 7 | 8 |
446 |---- range watched ----|
447 |----------- range accessed ------------|
448
449 In this case, ADDR_TRAP will be 4.
450
451 To match a watchpoint known to GDB core, we must never
452 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
453 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
454 positive on kernels older than 4.10. See PR
455 external/20207. */
456 return addr_orig;
457 }
458 }
459
460 return (CORE_ADDR) 0;
461 }
462
463 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
464
465 static int
466 aarch64_stopped_by_watchpoint (void)
467 {
468 if (aarch64_stopped_data_address () != 0)
469 return 1;
470 else
471 return 0;
472 }
473
474 /* Fetch the thread-local storage pointer for libthread_db. */
475
476 ps_err_e
477 ps_get_thread_area (struct ps_prochandle *ph,
478 lwpid_t lwpid, int idx, void **base)
479 {
480 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
481 is_64bit_tdesc ());
482 }
483
484 /* Implementation of linux_target_ops method "siginfo_fixup". */
485
486 static int
487 aarch64_linux_siginfo_fixup (siginfo_t *native, gdb_byte *inf, int direction)
488 {
489 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
490 if (!is_64bit_tdesc ())
491 {
492 if (direction == 0)
493 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
494 native);
495 else
496 aarch64_siginfo_from_compat_siginfo (native,
497 (struct compat_siginfo *) inf);
498
499 return 1;
500 }
501
502 return 0;
503 }
504
505 /* Implementation of linux_target_ops method "new_process". */
506
507 static struct arch_process_info *
508 aarch64_linux_new_process (void)
509 {
510 struct arch_process_info *info = XCNEW (struct arch_process_info);
511
512 aarch64_init_debug_reg_state (&info->debug_reg_state);
513
514 return info;
515 }
516
517 /* Implementation of linux_target_ops method "delete_process". */
518
519 static void
520 aarch64_linux_delete_process (struct arch_process_info *info)
521 {
522 xfree (info);
523 }
524
525 /* Implementation of linux_target_ops method "linux_new_fork". */
526
527 static void
528 aarch64_linux_new_fork (struct process_info *parent,
529 struct process_info *child)
530 {
531 /* These are allocated by linux_add_process. */
532 gdb_assert (parent->priv != NULL
533 && parent->priv->arch_private != NULL);
534 gdb_assert (child->priv != NULL
535 && child->priv->arch_private != NULL);
536
537 /* Linux kernel before 2.6.33 commit
538 72f674d203cd230426437cdcf7dd6f681dad8b0d
539 will inherit hardware debug registers from parent
540 on fork/vfork/clone. Newer Linux kernels create such tasks with
541 zeroed debug registers.
542
543 GDB core assumes the child inherits the watchpoints/hw
544 breakpoints of the parent, and will remove them all from the
545 forked off process. Copy the debug registers mirrors into the
546 new process so that all breakpoints and watchpoints can be
547 removed together. The debug registers mirror will become zeroed
548 in the end before detaching the forked off process, thus making
549 this compatible with older Linux kernels too. */
550
551 *child->priv->arch_private = *parent->priv->arch_private;
552 }
553
554 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
555 #define AARCH64_HWCAP_PACA (1 << 30)
556
557 /* Implementation of linux target ops method "low_arch_setup". */
558
559 void
560 aarch64_target::low_arch_setup ()
561 {
562 unsigned int machine;
563 int is_elf64;
564 int tid;
565
566 tid = lwpid_of (current_thread);
567
568 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
569
570 if (is_elf64)
571 {
572 uint64_t vq = aarch64_sve_get_vq (tid);
573 unsigned long hwcap = linux_get_hwcap (8);
574 bool pauth_p = hwcap & AARCH64_HWCAP_PACA;
575
576 current_process ()->tdesc = aarch64_linux_read_description (vq, pauth_p);
577 }
578 else
579 current_process ()->tdesc = aarch32_linux_read_description ();
580
581 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
582 }
583
584 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
585
586 static void
587 aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
588 {
589 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
590 }
591
592 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
593
594 static void
595 aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
596 {
597 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
598 }
599
600 static struct regset_info aarch64_regsets[] =
601 {
602 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
603 sizeof (struct user_pt_regs), GENERAL_REGS,
604 aarch64_fill_gregset, aarch64_store_gregset },
605 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
606 sizeof (struct user_fpsimd_state), FP_REGS,
607 aarch64_fill_fpregset, aarch64_store_fpregset
608 },
609 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
610 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
611 NULL, aarch64_store_pauthregset },
612 NULL_REGSET
613 };
614
615 static struct regsets_info aarch64_regsets_info =
616 {
617 aarch64_regsets, /* regsets */
618 0, /* num_regsets */
619 NULL, /* disabled_regsets */
620 };
621
622 static struct regs_info regs_info_aarch64 =
623 {
624 NULL, /* regset_bitmap */
625 NULL, /* usrregs */
626 &aarch64_regsets_info,
627 };
628
629 static struct regset_info aarch64_sve_regsets[] =
630 {
631 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
632 sizeof (struct user_pt_regs), GENERAL_REGS,
633 aarch64_fill_gregset, aarch64_store_gregset },
634 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
635 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
636 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
637 },
638 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
639 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
640 NULL, aarch64_store_pauthregset },
641 NULL_REGSET
642 };
643
644 static struct regsets_info aarch64_sve_regsets_info =
645 {
646 aarch64_sve_regsets, /* regsets. */
647 0, /* num_regsets. */
648 NULL, /* disabled_regsets. */
649 };
650
651 static struct regs_info regs_info_aarch64_sve =
652 {
653 NULL, /* regset_bitmap. */
654 NULL, /* usrregs. */
655 &aarch64_sve_regsets_info,
656 };
657
658 /* Implementation of linux target ops method "get_regs_info". */
659
660 const regs_info *
661 aarch64_target::get_regs_info ()
662 {
663 if (!is_64bit_tdesc ())
664 return &regs_info_aarch32;
665
666 if (is_sve_tdesc ())
667 return &regs_info_aarch64_sve;
668
669 return &regs_info_aarch64;
670 }
671
672 /* Implementation of linux_target_ops method "supports_tracepoints". */
673
674 static int
675 aarch64_supports_tracepoints (void)
676 {
677 if (current_thread == NULL)
678 return 1;
679 else
680 {
681 /* We don't support tracepoints on aarch32 now. */
682 return is_64bit_tdesc ();
683 }
684 }
685
686 /* Implementation of linux_target_ops method "get_thread_area". */
687
688 static int
689 aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
690 {
691 struct iovec iovec;
692 uint64_t reg;
693
694 iovec.iov_base = &reg;
695 iovec.iov_len = sizeof (reg);
696
697 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
698 return -1;
699
700 *addrp = reg;
701
702 return 0;
703 }
704
705 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
706
707 static void
708 aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
709 {
710 int use_64bit = register_size (regcache->tdesc, 0) == 8;
711
712 if (use_64bit)
713 {
714 long l_sysno;
715
716 collect_register_by_name (regcache, "x8", &l_sysno);
717 *sysno = (int) l_sysno;
718 }
719 else
720 collect_register_by_name (regcache, "r7", sysno);
721 }
722
723 /* List of condition codes that we need. */
724
725 enum aarch64_condition_codes
726 {
727 EQ = 0x0,
728 NE = 0x1,
729 LO = 0x3,
730 GE = 0xa,
731 LT = 0xb,
732 GT = 0xc,
733 LE = 0xd,
734 };
735
736 enum aarch64_operand_type
737 {
738 OPERAND_IMMEDIATE,
739 OPERAND_REGISTER,
740 };
741
742 /* Representation of an operand. At this time, it only supports register
743 and immediate types. */
744
745 struct aarch64_operand
746 {
747 /* Type of the operand. */
748 enum aarch64_operand_type type;
749
750 /* Value of the operand according to the type. */
751 union
752 {
753 uint32_t imm;
754 struct aarch64_register reg;
755 };
756 };
757
758 /* List of registers that we are currently using, we can add more here as
759 we need to use them. */
760
761 /* General purpose scratch registers (64 bit). */
762 static const struct aarch64_register x0 = { 0, 1 };
763 static const struct aarch64_register x1 = { 1, 1 };
764 static const struct aarch64_register x2 = { 2, 1 };
765 static const struct aarch64_register x3 = { 3, 1 };
766 static const struct aarch64_register x4 = { 4, 1 };
767
768 /* General purpose scratch registers (32 bit). */
769 static const struct aarch64_register w0 = { 0, 0 };
770 static const struct aarch64_register w2 = { 2, 0 };
771
772 /* Intra-procedure scratch registers. */
773 static const struct aarch64_register ip0 = { 16, 1 };
774
775 /* Special purpose registers. */
776 static const struct aarch64_register fp = { 29, 1 };
777 static const struct aarch64_register lr = { 30, 1 };
778 static const struct aarch64_register sp = { 31, 1 };
779 static const struct aarch64_register xzr = { 31, 1 };
780
781 /* Dynamically allocate a new register. If we know the register
782 statically, we should make it a global as above instead of using this
783 helper function. */
784
785 static struct aarch64_register
786 aarch64_register (unsigned num, int is64)
787 {
788 return (struct aarch64_register) { num, is64 };
789 }
790
791 /* Helper function to create a register operand, for instructions with
792 different types of operands.
793
794 For example:
795 p += emit_mov (p, x0, register_operand (x1)); */
796
797 static struct aarch64_operand
798 register_operand (struct aarch64_register reg)
799 {
800 struct aarch64_operand operand;
801
802 operand.type = OPERAND_REGISTER;
803 operand.reg = reg;
804
805 return operand;
806 }
807
808 /* Helper function to create an immediate operand, for instructions with
809 different types of operands.
810
811 For example:
812 p += emit_mov (p, x0, immediate_operand (12)); */
813
814 static struct aarch64_operand
815 immediate_operand (uint32_t imm)
816 {
817 struct aarch64_operand operand;
818
819 operand.type = OPERAND_IMMEDIATE;
820 operand.imm = imm;
821
822 return operand;
823 }
824
825 /* Helper function to create an offset memory operand.
826
827 For example:
828 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
829
830 static struct aarch64_memory_operand
831 offset_memory_operand (int32_t offset)
832 {
833 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
834 }
835
836 /* Helper function to create a pre-index memory operand.
837
838 For example:
839 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
840
841 static struct aarch64_memory_operand
842 preindex_memory_operand (int32_t index)
843 {
844 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
845 }
846
847 /* Helper function to create a post-index memory operand.
848
849 For example:
850 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
851
852 static struct aarch64_memory_operand
853 postindex_memory_operand (int32_t index)
854 {
855 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
856 }
857
858 /* System control registers. These special registers can be written and
859 read with the MRS and MSR instructions.
860
861 - NZCV: Condition flags. GDB refers to this register under the CPSR
862 name.
863 - FPSR: Floating-point status register.
864 - FPCR: Floating-point control registers.
865 - TPIDR_EL0: Software thread ID register. */
866
867 enum aarch64_system_control_registers
868 {
869 /* op0 op1 crn crm op2 */
870 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
871 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
872 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
873 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
874 };
875
876 /* Write a BLR instruction into *BUF.
877
878 BLR rn
879
880 RN is the register to branch to. */
881
882 static int
883 emit_blr (uint32_t *buf, struct aarch64_register rn)
884 {
885 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
886 }
887
888 /* Write a RET instruction into *BUF.
889
890 RET xn
891
892 RN is the register to branch to. */
893
894 static int
895 emit_ret (uint32_t *buf, struct aarch64_register rn)
896 {
897 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
898 }
899
900 static int
901 emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
902 struct aarch64_register rt,
903 struct aarch64_register rt2,
904 struct aarch64_register rn,
905 struct aarch64_memory_operand operand)
906 {
907 uint32_t opc;
908 uint32_t pre_index;
909 uint32_t write_back;
910
911 if (rt.is64)
912 opc = ENCODE (2, 2, 30);
913 else
914 opc = ENCODE (0, 2, 30);
915
916 switch (operand.type)
917 {
918 case MEMORY_OPERAND_OFFSET:
919 {
920 pre_index = ENCODE (1, 1, 24);
921 write_back = ENCODE (0, 1, 23);
922 break;
923 }
924 case MEMORY_OPERAND_POSTINDEX:
925 {
926 pre_index = ENCODE (0, 1, 24);
927 write_back = ENCODE (1, 1, 23);
928 break;
929 }
930 case MEMORY_OPERAND_PREINDEX:
931 {
932 pre_index = ENCODE (1, 1, 24);
933 write_back = ENCODE (1, 1, 23);
934 break;
935 }
936 default:
937 return 0;
938 }
939
940 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
941 | ENCODE (operand.index >> 3, 7, 15)
942 | ENCODE (rt2.num, 5, 10)
943 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
944 }
945
946 /* Write a STP instruction into *BUF.
947
948 STP rt, rt2, [rn, #offset]
949 STP rt, rt2, [rn, #index]!
950 STP rt, rt2, [rn], #index
951
952 RT and RT2 are the registers to store.
953 RN is the base address register.
954 OFFSET is the immediate to add to the base address. It is limited to a
955 -512 .. 504 range (7 bits << 3). */
956
957 static int
958 emit_stp (uint32_t *buf, struct aarch64_register rt,
959 struct aarch64_register rt2, struct aarch64_register rn,
960 struct aarch64_memory_operand operand)
961 {
962 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
963 }
964
965 /* Write a LDP instruction into *BUF.
966
967 LDP rt, rt2, [rn, #offset]
968 LDP rt, rt2, [rn, #index]!
969 LDP rt, rt2, [rn], #index
970
971 RT and RT2 are the registers to store.
972 RN is the base address register.
973 OFFSET is the immediate to add to the base address. It is limited to a
974 -512 .. 504 range (7 bits << 3). */
975
976 static int
977 emit_ldp (uint32_t *buf, struct aarch64_register rt,
978 struct aarch64_register rt2, struct aarch64_register rn,
979 struct aarch64_memory_operand operand)
980 {
981 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
982 }
983
984 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
985
986 LDP qt, qt2, [rn, #offset]
987
988 RT and RT2 are the Q registers to store.
989 RN is the base address register.
990 OFFSET is the immediate to add to the base address. It is limited to
991 -1024 .. 1008 range (7 bits << 4). */
992
993 static int
994 emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
995 struct aarch64_register rn, int32_t offset)
996 {
997 uint32_t opc = ENCODE (2, 2, 30);
998 uint32_t pre_index = ENCODE (1, 1, 24);
999
1000 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
1001 | ENCODE (offset >> 4, 7, 15)
1002 | ENCODE (rt2, 5, 10)
1003 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
1004 }
1005
1006 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1007
1008 STP qt, qt2, [rn, #offset]
1009
1010 RT and RT2 are the Q registers to store.
1011 RN is the base address register.
1012 OFFSET is the immediate to add to the base address. It is limited to
1013 -1024 .. 1008 range (7 bits << 4). */
1014
1015 static int
1016 emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1017 struct aarch64_register rn, int32_t offset)
1018 {
1019 uint32_t opc = ENCODE (2, 2, 30);
1020 uint32_t pre_index = ENCODE (1, 1, 24);
1021
1022 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
1023 | ENCODE (offset >> 4, 7, 15)
1024 | ENCODE (rt2, 5, 10)
1025 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
1026 }
1027
1028 /* Write a LDRH instruction into *BUF.
1029
1030 LDRH wt, [xn, #offset]
1031 LDRH wt, [xn, #index]!
1032 LDRH wt, [xn], #index
1033
1034 RT is the register to store.
1035 RN is the base address register.
1036 OFFSET is the immediate to add to the base address. It is limited to
1037 0 .. 32760 range (12 bits << 3). */
1038
1039 static int
1040 emit_ldrh (uint32_t *buf, struct aarch64_register rt,
1041 struct aarch64_register rn,
1042 struct aarch64_memory_operand operand)
1043 {
1044 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
1045 }
1046
1047 /* Write a LDRB instruction into *BUF.
1048
1049 LDRB wt, [xn, #offset]
1050 LDRB wt, [xn, #index]!
1051 LDRB wt, [xn], #index
1052
1053 RT is the register to store.
1054 RN is the base address register.
1055 OFFSET is the immediate to add to the base address. It is limited to
1056 0 .. 32760 range (12 bits << 3). */
1057
1058 static int
1059 emit_ldrb (uint32_t *buf, struct aarch64_register rt,
1060 struct aarch64_register rn,
1061 struct aarch64_memory_operand operand)
1062 {
1063 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
1064 }
1065
1066
1067
1068 /* Write a STR instruction into *BUF.
1069
1070 STR rt, [rn, #offset]
1071 STR rt, [rn, #index]!
1072 STR rt, [rn], #index
1073
1074 RT is the register to store.
1075 RN is the base address register.
1076 OFFSET is the immediate to add to the base address. It is limited to
1077 0 .. 32760 range (12 bits << 3). */
1078
1079 static int
1080 emit_str (uint32_t *buf, struct aarch64_register rt,
1081 struct aarch64_register rn,
1082 struct aarch64_memory_operand operand)
1083 {
1084 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
1085 }
1086
1087 /* Helper function emitting an exclusive load or store instruction. */
1088
1089 static int
1090 emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1091 enum aarch64_opcodes opcode,
1092 struct aarch64_register rs,
1093 struct aarch64_register rt,
1094 struct aarch64_register rt2,
1095 struct aarch64_register rn)
1096 {
1097 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1098 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1099 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
1100 }
1101
1102 /* Write a LAXR instruction into *BUF.
1103
1104 LDAXR rt, [xn]
1105
1106 RT is the destination register.
1107 RN is the base address register. */
1108
1109 static int
1110 emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1111 struct aarch64_register rn)
1112 {
1113 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1114 xzr, rn);
1115 }
1116
1117 /* Write a STXR instruction into *BUF.
1118
1119 STXR ws, rt, [xn]
1120
1121 RS is the result register, it indicates if the store succeeded or not.
1122 RT is the destination register.
1123 RN is the base address register. */
1124
1125 static int
1126 emit_stxr (uint32_t *buf, struct aarch64_register rs,
1127 struct aarch64_register rt, struct aarch64_register rn)
1128 {
1129 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1130 xzr, rn);
1131 }
1132
1133 /* Write a STLR instruction into *BUF.
1134
1135 STLR rt, [xn]
1136
1137 RT is the register to store.
1138 RN is the base address register. */
1139
1140 static int
1141 emit_stlr (uint32_t *buf, struct aarch64_register rt,
1142 struct aarch64_register rn)
1143 {
1144 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1145 xzr, rn);
1146 }
1147
1148 /* Helper function for data processing instructions with register sources. */
1149
1150 static int
1151 emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
1152 struct aarch64_register rd,
1153 struct aarch64_register rn,
1154 struct aarch64_register rm)
1155 {
1156 uint32_t size = ENCODE (rd.is64, 1, 31);
1157
1158 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1159 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
1160 }
1161
1162 /* Helper function for data processing instructions taking either a register
1163 or an immediate. */
1164
1165 static int
1166 emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1167 struct aarch64_register rd,
1168 struct aarch64_register rn,
1169 struct aarch64_operand operand)
1170 {
1171 uint32_t size = ENCODE (rd.is64, 1, 31);
1172 /* The opcode is different for register and immediate source operands. */
1173 uint32_t operand_opcode;
1174
1175 if (operand.type == OPERAND_IMMEDIATE)
1176 {
1177 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1178 operand_opcode = ENCODE (8, 4, 25);
1179
1180 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1181 | ENCODE (operand.imm, 12, 10)
1182 | ENCODE (rn.num, 5, 5)
1183 | ENCODE (rd.num, 5, 0));
1184 }
1185 else
1186 {
1187 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1188 operand_opcode = ENCODE (5, 4, 25);
1189
1190 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1191 rn, operand.reg);
1192 }
1193 }
1194
1195 /* Write an ADD instruction into *BUF.
1196
1197 ADD rd, rn, #imm
1198 ADD rd, rn, rm
1199
1200 This function handles both an immediate and register add.
1201
1202 RD is the destination register.
1203 RN is the input register.
1204 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1205 OPERAND_REGISTER. */
1206
1207 static int
1208 emit_add (uint32_t *buf, struct aarch64_register rd,
1209 struct aarch64_register rn, struct aarch64_operand operand)
1210 {
1211 return emit_data_processing (buf, ADD, rd, rn, operand);
1212 }
1213
1214 /* Write a SUB instruction into *BUF.
1215
1216 SUB rd, rn, #imm
1217 SUB rd, rn, rm
1218
1219 This function handles both an immediate and register sub.
1220
1221 RD is the destination register.
1222 RN is the input register.
1223 IMM is the immediate to substract to RN. */
1224
1225 static int
1226 emit_sub (uint32_t *buf, struct aarch64_register rd,
1227 struct aarch64_register rn, struct aarch64_operand operand)
1228 {
1229 return emit_data_processing (buf, SUB, rd, rn, operand);
1230 }
1231
1232 /* Write a MOV instruction into *BUF.
1233
1234 MOV rd, #imm
1235 MOV rd, rm
1236
1237 This function handles both a wide immediate move and a register move,
1238 with the condition that the source register is not xzr. xzr and the
1239 stack pointer share the same encoding and this function only supports
1240 the stack pointer.
1241
1242 RD is the destination register.
1243 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1244 OPERAND_REGISTER. */
1245
1246 static int
1247 emit_mov (uint32_t *buf, struct aarch64_register rd,
1248 struct aarch64_operand operand)
1249 {
1250 if (operand.type == OPERAND_IMMEDIATE)
1251 {
1252 uint32_t size = ENCODE (rd.is64, 1, 31);
1253 /* Do not shift the immediate. */
1254 uint32_t shift = ENCODE (0, 2, 21);
1255
1256 return aarch64_emit_insn (buf, MOV | size | shift
1257 | ENCODE (operand.imm, 16, 5)
1258 | ENCODE (rd.num, 5, 0));
1259 }
1260 else
1261 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1262 }
1263
1264 /* Write a MOVK instruction into *BUF.
1265
1266 MOVK rd, #imm, lsl #shift
1267
1268 RD is the destination register.
1269 IMM is the immediate.
1270 SHIFT is the logical shift left to apply to IMM. */
1271
1272 static int
1273 emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1274 unsigned shift)
1275 {
1276 uint32_t size = ENCODE (rd.is64, 1, 31);
1277
1278 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1279 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
1280 }
1281
1282 /* Write instructions into *BUF in order to move ADDR into a register.
1283 ADDR can be a 64-bit value.
1284
1285 This function will emit a series of MOV and MOVK instructions, such as:
1286
1287 MOV xd, #(addr)
1288 MOVK xd, #(addr >> 16), lsl #16
1289 MOVK xd, #(addr >> 32), lsl #32
1290 MOVK xd, #(addr >> 48), lsl #48 */
1291
1292 static int
1293 emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1294 {
1295 uint32_t *p = buf;
1296
1297 /* The MOV (wide immediate) instruction clears to top bits of the
1298 register. */
1299 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1300
1301 if ((addr >> 16) != 0)
1302 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1303 else
1304 return p - buf;
1305
1306 if ((addr >> 32) != 0)
1307 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1308 else
1309 return p - buf;
1310
1311 if ((addr >> 48) != 0)
1312 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1313
1314 return p - buf;
1315 }
1316
1317 /* Write a SUBS instruction into *BUF.
1318
1319 SUBS rd, rn, rm
1320
1321 This instruction update the condition flags.
1322
1323 RD is the destination register.
1324 RN and RM are the source registers. */
1325
1326 static int
1327 emit_subs (uint32_t *buf, struct aarch64_register rd,
1328 struct aarch64_register rn, struct aarch64_operand operand)
1329 {
1330 return emit_data_processing (buf, SUBS, rd, rn, operand);
1331 }
1332
1333 /* Write a CMP instruction into *BUF.
1334
1335 CMP rn, rm
1336
1337 This instruction is an alias of SUBS xzr, rn, rm.
1338
1339 RN and RM are the registers to compare. */
1340
1341 static int
1342 emit_cmp (uint32_t *buf, struct aarch64_register rn,
1343 struct aarch64_operand operand)
1344 {
1345 return emit_subs (buf, xzr, rn, operand);
1346 }
1347
1348 /* Write a AND instruction into *BUF.
1349
1350 AND rd, rn, rm
1351
1352 RD is the destination register.
1353 RN and RM are the source registers. */
1354
1355 static int
1356 emit_and (uint32_t *buf, struct aarch64_register rd,
1357 struct aarch64_register rn, struct aarch64_register rm)
1358 {
1359 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1360 }
1361
1362 /* Write a ORR instruction into *BUF.
1363
1364 ORR rd, rn, rm
1365
1366 RD is the destination register.
1367 RN and RM are the source registers. */
1368
1369 static int
1370 emit_orr (uint32_t *buf, struct aarch64_register rd,
1371 struct aarch64_register rn, struct aarch64_register rm)
1372 {
1373 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1374 }
1375
1376 /* Write a ORN instruction into *BUF.
1377
1378 ORN rd, rn, rm
1379
1380 RD is the destination register.
1381 RN and RM are the source registers. */
1382
1383 static int
1384 emit_orn (uint32_t *buf, struct aarch64_register rd,
1385 struct aarch64_register rn, struct aarch64_register rm)
1386 {
1387 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1388 }
1389
1390 /* Write a EOR instruction into *BUF.
1391
1392 EOR rd, rn, rm
1393
1394 RD is the destination register.
1395 RN and RM are the source registers. */
1396
1397 static int
1398 emit_eor (uint32_t *buf, struct aarch64_register rd,
1399 struct aarch64_register rn, struct aarch64_register rm)
1400 {
1401 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1402 }
1403
1404 /* Write a MVN instruction into *BUF.
1405
1406 MVN rd, rm
1407
1408 This is an alias for ORN rd, xzr, rm.
1409
1410 RD is the destination register.
1411 RM is the source register. */
1412
1413 static int
1414 emit_mvn (uint32_t *buf, struct aarch64_register rd,
1415 struct aarch64_register rm)
1416 {
1417 return emit_orn (buf, rd, xzr, rm);
1418 }
1419
1420 /* Write a LSLV instruction into *BUF.
1421
1422 LSLV rd, rn, rm
1423
1424 RD is the destination register.
1425 RN and RM are the source registers. */
1426
1427 static int
1428 emit_lslv (uint32_t *buf, struct aarch64_register rd,
1429 struct aarch64_register rn, struct aarch64_register rm)
1430 {
1431 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1432 }
1433
1434 /* Write a LSRV instruction into *BUF.
1435
1436 LSRV rd, rn, rm
1437
1438 RD is the destination register.
1439 RN and RM are the source registers. */
1440
1441 static int
1442 emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1443 struct aarch64_register rn, struct aarch64_register rm)
1444 {
1445 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1446 }
1447
1448 /* Write a ASRV instruction into *BUF.
1449
1450 ASRV rd, rn, rm
1451
1452 RD is the destination register.
1453 RN and RM are the source registers. */
1454
1455 static int
1456 emit_asrv (uint32_t *buf, struct aarch64_register rd,
1457 struct aarch64_register rn, struct aarch64_register rm)
1458 {
1459 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1460 }
1461
1462 /* Write a MUL instruction into *BUF.
1463
1464 MUL rd, rn, rm
1465
1466 RD is the destination register.
1467 RN and RM are the source registers. */
1468
1469 static int
1470 emit_mul (uint32_t *buf, struct aarch64_register rd,
1471 struct aarch64_register rn, struct aarch64_register rm)
1472 {
1473 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1474 }
1475
1476 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1477
1478 MRS xt, system_reg
1479
1480 RT is the destination register.
1481 SYSTEM_REG is special purpose register to read. */
1482
1483 static int
1484 emit_mrs (uint32_t *buf, struct aarch64_register rt,
1485 enum aarch64_system_control_registers system_reg)
1486 {
1487 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1488 | ENCODE (rt.num, 5, 0));
1489 }
1490
1491 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1492
1493 MSR system_reg, xt
1494
1495 SYSTEM_REG is special purpose register to write.
1496 RT is the input register. */
1497
1498 static int
1499 emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1500 struct aarch64_register rt)
1501 {
1502 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1503 | ENCODE (rt.num, 5, 0));
1504 }
1505
1506 /* Write a SEVL instruction into *BUF.
1507
1508 This is a hint instruction telling the hardware to trigger an event. */
1509
1510 static int
1511 emit_sevl (uint32_t *buf)
1512 {
1513 return aarch64_emit_insn (buf, SEVL);
1514 }
1515
1516 /* Write a WFE instruction into *BUF.
1517
1518 This is a hint instruction telling the hardware to wait for an event. */
1519
1520 static int
1521 emit_wfe (uint32_t *buf)
1522 {
1523 return aarch64_emit_insn (buf, WFE);
1524 }
1525
1526 /* Write a SBFM instruction into *BUF.
1527
1528 SBFM rd, rn, #immr, #imms
1529
1530 This instruction moves the bits from #immr to #imms into the
1531 destination, sign extending the result.
1532
1533 RD is the destination register.
1534 RN is the source register.
1535 IMMR is the bit number to start at (least significant bit).
1536 IMMS is the bit number to stop at (most significant bit). */
1537
1538 static int
1539 emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1540 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1541 {
1542 uint32_t size = ENCODE (rd.is64, 1, 31);
1543 uint32_t n = ENCODE (rd.is64, 1, 22);
1544
1545 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1546 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1547 | ENCODE (rd.num, 5, 0));
1548 }
1549
1550 /* Write a SBFX instruction into *BUF.
1551
1552 SBFX rd, rn, #lsb, #width
1553
1554 This instruction moves #width bits from #lsb into the destination, sign
1555 extending the result. This is an alias for:
1556
1557 SBFM rd, rn, #lsb, #(lsb + width - 1)
1558
1559 RD is the destination register.
1560 RN is the source register.
1561 LSB is the bit number to start at (least significant bit).
1562 WIDTH is the number of bits to move. */
1563
1564 static int
1565 emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1566 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1567 {
1568 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1569 }
1570
1571 /* Write a UBFM instruction into *BUF.
1572
1573 UBFM rd, rn, #immr, #imms
1574
1575 This instruction moves the bits from #immr to #imms into the
1576 destination, extending the result with zeros.
1577
1578 RD is the destination register.
1579 RN is the source register.
1580 IMMR is the bit number to start at (least significant bit).
1581 IMMS is the bit number to stop at (most significant bit). */
1582
1583 static int
1584 emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1585 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1586 {
1587 uint32_t size = ENCODE (rd.is64, 1, 31);
1588 uint32_t n = ENCODE (rd.is64, 1, 22);
1589
1590 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1591 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1592 | ENCODE (rd.num, 5, 0));
1593 }
1594
1595 /* Write a UBFX instruction into *BUF.
1596
1597 UBFX rd, rn, #lsb, #width
1598
1599 This instruction moves #width bits from #lsb into the destination,
1600 extending the result with zeros. This is an alias for:
1601
1602 UBFM rd, rn, #lsb, #(lsb + width - 1)
1603
1604 RD is the destination register.
1605 RN is the source register.
1606 LSB is the bit number to start at (least significant bit).
1607 WIDTH is the number of bits to move. */
1608
1609 static int
1610 emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1611 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1612 {
1613 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1614 }
1615
1616 /* Write a CSINC instruction into *BUF.
1617
1618 CSINC rd, rn, rm, cond
1619
1620 This instruction conditionally increments rn or rm and places the result
1621 in rd. rn is chosen is the condition is true.
1622
1623 RD is the destination register.
1624 RN and RM are the source registers.
1625 COND is the encoded condition. */
1626
1627 static int
1628 emit_csinc (uint32_t *buf, struct aarch64_register rd,
1629 struct aarch64_register rn, struct aarch64_register rm,
1630 unsigned cond)
1631 {
1632 uint32_t size = ENCODE (rd.is64, 1, 31);
1633
1634 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1635 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1636 | ENCODE (rd.num, 5, 0));
1637 }
1638
1639 /* Write a CSET instruction into *BUF.
1640
1641 CSET rd, cond
1642
1643 This instruction conditionally write 1 or 0 in the destination register.
1644 1 is written if the condition is true. This is an alias for:
1645
1646 CSINC rd, xzr, xzr, !cond
1647
1648 Note that the condition needs to be inverted.
1649
1650 RD is the destination register.
1651 RN and RM are the source registers.
1652 COND is the encoded condition. */
1653
1654 static int
1655 emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1656 {
1657 /* The least significant bit of the condition needs toggling in order to
1658 invert it. */
1659 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1660 }
1661
1662 /* Write LEN instructions from BUF into the inferior memory at *TO.
1663
1664 Note instructions are always little endian on AArch64, unlike data. */
1665
1666 static void
1667 append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1668 {
1669 size_t byte_len = len * sizeof (uint32_t);
1670 #if (__BYTE_ORDER == __BIG_ENDIAN)
1671 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
1672 size_t i;
1673
1674 for (i = 0; i < len; i++)
1675 le_buf[i] = htole32 (buf[i]);
1676
1677 target_write_memory (*to, (const unsigned char *) le_buf, byte_len);
1678
1679 xfree (le_buf);
1680 #else
1681 target_write_memory (*to, (const unsigned char *) buf, byte_len);
1682 #endif
1683
1684 *to += byte_len;
1685 }
1686
1687 /* Sub-class of struct aarch64_insn_data, store information of
1688 instruction relocation for fast tracepoint. Visitor can
1689 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1690 the relocated instructions in buffer pointed by INSN_PTR. */
1691
1692 struct aarch64_insn_relocation_data
1693 {
1694 struct aarch64_insn_data base;
1695
1696 /* The new address the instruction is relocated to. */
1697 CORE_ADDR new_addr;
1698 /* Pointer to the buffer of relocated instruction(s). */
1699 uint32_t *insn_ptr;
1700 };
1701
1702 /* Implementation of aarch64_insn_visitor method "b". */
1703
1704 static void
1705 aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1706 struct aarch64_insn_data *data)
1707 {
1708 struct aarch64_insn_relocation_data *insn_reloc
1709 = (struct aarch64_insn_relocation_data *) data;
1710 int64_t new_offset
1711 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1712
1713 if (can_encode_int32 (new_offset, 28))
1714 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1715 }
1716
1717 /* Implementation of aarch64_insn_visitor method "b_cond". */
1718
1719 static void
1720 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1721 struct aarch64_insn_data *data)
1722 {
1723 struct aarch64_insn_relocation_data *insn_reloc
1724 = (struct aarch64_insn_relocation_data *) data;
1725 int64_t new_offset
1726 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1727
1728 if (can_encode_int32 (new_offset, 21))
1729 {
1730 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1731 new_offset);
1732 }
1733 else if (can_encode_int32 (new_offset, 28))
1734 {
1735 /* The offset is out of range for a conditional branch
1736 instruction but not for a unconditional branch. We can use
1737 the following instructions instead:
1738
1739 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1740 B NOT_TAKEN ; Else jump over TAKEN and continue.
1741 TAKEN:
1742 B #(offset - 8)
1743 NOT_TAKEN:
1744
1745 */
1746
1747 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1748 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1749 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1750 }
1751 }
1752
1753 /* Implementation of aarch64_insn_visitor method "cb". */
1754
1755 static void
1756 aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1757 const unsigned rn, int is64,
1758 struct aarch64_insn_data *data)
1759 {
1760 struct aarch64_insn_relocation_data *insn_reloc
1761 = (struct aarch64_insn_relocation_data *) data;
1762 int64_t new_offset
1763 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1764
1765 if (can_encode_int32 (new_offset, 21))
1766 {
1767 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1768 aarch64_register (rn, is64), new_offset);
1769 }
1770 else if (can_encode_int32 (new_offset, 28))
1771 {
1772 /* The offset is out of range for a compare and branch
1773 instruction but not for a unconditional branch. We can use
1774 the following instructions instead:
1775
1776 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1777 B NOT_TAKEN ; Else jump over TAKEN and continue.
1778 TAKEN:
1779 B #(offset - 8)
1780 NOT_TAKEN:
1781
1782 */
1783 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1784 aarch64_register (rn, is64), 8);
1785 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1786 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1787 }
1788 }
1789
1790 /* Implementation of aarch64_insn_visitor method "tb". */
1791
1792 static void
1793 aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1794 const unsigned rt, unsigned bit,
1795 struct aarch64_insn_data *data)
1796 {
1797 struct aarch64_insn_relocation_data *insn_reloc
1798 = (struct aarch64_insn_relocation_data *) data;
1799 int64_t new_offset
1800 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1801
1802 if (can_encode_int32 (new_offset, 16))
1803 {
1804 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1805 aarch64_register (rt, 1), new_offset);
1806 }
1807 else if (can_encode_int32 (new_offset, 28))
1808 {
1809 /* The offset is out of range for a test bit and branch
1810 instruction but not for a unconditional branch. We can use
1811 the following instructions instead:
1812
1813 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1814 B NOT_TAKEN ; Else jump over TAKEN and continue.
1815 TAKEN:
1816 B #(offset - 8)
1817 NOT_TAKEN:
1818
1819 */
1820 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1821 aarch64_register (rt, 1), 8);
1822 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1823 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1824 new_offset - 8);
1825 }
1826 }
1827
1828 /* Implementation of aarch64_insn_visitor method "adr". */
1829
1830 static void
1831 aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1832 const int is_adrp,
1833 struct aarch64_insn_data *data)
1834 {
1835 struct aarch64_insn_relocation_data *insn_reloc
1836 = (struct aarch64_insn_relocation_data *) data;
1837 /* We know exactly the address the ADR{P,} instruction will compute.
1838 We can just write it to the destination register. */
1839 CORE_ADDR address = data->insn_addr + offset;
1840
1841 if (is_adrp)
1842 {
1843 /* Clear the lower 12 bits of the offset to get the 4K page. */
1844 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1845 aarch64_register (rd, 1),
1846 address & ~0xfff);
1847 }
1848 else
1849 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1850 aarch64_register (rd, 1), address);
1851 }
1852
1853 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1854
1855 static void
1856 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1857 const unsigned rt, const int is64,
1858 struct aarch64_insn_data *data)
1859 {
1860 struct aarch64_insn_relocation_data *insn_reloc
1861 = (struct aarch64_insn_relocation_data *) data;
1862 CORE_ADDR address = data->insn_addr + offset;
1863
1864 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1865 aarch64_register (rt, 1), address);
1866
1867 /* We know exactly what address to load from, and what register we
1868 can use:
1869
1870 MOV xd, #(oldloc + offset)
1871 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1872 ...
1873
1874 LDR xd, [xd] ; or LDRSW xd, [xd]
1875
1876 */
1877
1878 if (is_sw)
1879 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1880 aarch64_register (rt, 1),
1881 aarch64_register (rt, 1),
1882 offset_memory_operand (0));
1883 else
1884 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1885 aarch64_register (rt, is64),
1886 aarch64_register (rt, 1),
1887 offset_memory_operand (0));
1888 }
1889
1890 /* Implementation of aarch64_insn_visitor method "others". */
1891
1892 static void
1893 aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1894 struct aarch64_insn_data *data)
1895 {
1896 struct aarch64_insn_relocation_data *insn_reloc
1897 = (struct aarch64_insn_relocation_data *) data;
1898
1899 /* The instruction is not PC relative. Just re-emit it at the new
1900 location. */
1901 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
1902 }
1903
1904 static const struct aarch64_insn_visitor visitor =
1905 {
1906 aarch64_ftrace_insn_reloc_b,
1907 aarch64_ftrace_insn_reloc_b_cond,
1908 aarch64_ftrace_insn_reloc_cb,
1909 aarch64_ftrace_insn_reloc_tb,
1910 aarch64_ftrace_insn_reloc_adr,
1911 aarch64_ftrace_insn_reloc_ldr_literal,
1912 aarch64_ftrace_insn_reloc_others,
1913 };
1914
1915 /* Implementation of linux_target_ops method
1916 "install_fast_tracepoint_jump_pad". */
1917
1918 static int
1919 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1920 CORE_ADDR tpaddr,
1921 CORE_ADDR collector,
1922 CORE_ADDR lockaddr,
1923 ULONGEST orig_size,
1924 CORE_ADDR *jump_entry,
1925 CORE_ADDR *trampoline,
1926 ULONGEST *trampoline_size,
1927 unsigned char *jjump_pad_insn,
1928 ULONGEST *jjump_pad_insn_size,
1929 CORE_ADDR *adjusted_insn_addr,
1930 CORE_ADDR *adjusted_insn_addr_end,
1931 char *err)
1932 {
1933 uint32_t buf[256];
1934 uint32_t *p = buf;
1935 int64_t offset;
1936 int i;
1937 uint32_t insn;
1938 CORE_ADDR buildaddr = *jump_entry;
1939 struct aarch64_insn_relocation_data insn_data;
1940
1941 /* We need to save the current state on the stack both to restore it
1942 later and to collect register values when the tracepoint is hit.
1943
1944 The saved registers are pushed in a layout that needs to be in sync
1945 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1946 the supply_fast_tracepoint_registers function will fill in the
1947 register cache from a pointer to saved registers on the stack we build
1948 here.
1949
1950 For simplicity, we set the size of each cell on the stack to 16 bytes.
1951 This way one cell can hold any register type, from system registers
1952 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1953 has to be 16 bytes aligned anyway.
1954
1955 Note that the CPSR register does not exist on AArch64. Instead we
1956 can access system bits describing the process state with the
1957 MRS/MSR instructions, namely the condition flags. We save them as
1958 if they are part of a CPSR register because that's how GDB
1959 interprets these system bits. At the moment, only the condition
1960 flags are saved in CPSR (NZCV).
1961
1962 Stack layout, each cell is 16 bytes (descending):
1963
1964 High *-------- SIMD&FP registers from 31 down to 0. --------*
1965 | q31 |
1966 . .
1967 . . 32 cells
1968 . .
1969 | q0 |
1970 *---- General purpose registers from 30 down to 0. ----*
1971 | x30 |
1972 . .
1973 . . 31 cells
1974 . .
1975 | x0 |
1976 *------------- Special purpose registers. -------------*
1977 | SP |
1978 | PC |
1979 | CPSR (NZCV) | 5 cells
1980 | FPSR |
1981 | FPCR | <- SP + 16
1982 *------------- collecting_t object --------------------*
1983 | TPIDR_EL0 | struct tracepoint * |
1984 Low *------------------------------------------------------*
1985
1986 After this stack is set up, we issue a call to the collector, passing
1987 it the saved registers at (SP + 16). */
1988
1989 /* Push SIMD&FP registers on the stack:
1990
1991 SUB sp, sp, #(32 * 16)
1992
1993 STP q30, q31, [sp, #(30 * 16)]
1994 ...
1995 STP q0, q1, [sp]
1996
1997 */
1998 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
1999 for (i = 30; i >= 0; i -= 2)
2000 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
2001
2002 /* Push general purpose registers on the stack. Note that we do not need
2003 to push x31 as it represents the xzr register and not the stack
2004 pointer in a STR instruction.
2005
2006 SUB sp, sp, #(31 * 16)
2007
2008 STR x30, [sp, #(30 * 16)]
2009 ...
2010 STR x0, [sp]
2011
2012 */
2013 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
2014 for (i = 30; i >= 0; i -= 1)
2015 p += emit_str (p, aarch64_register (i, 1), sp,
2016 offset_memory_operand (i * 16));
2017
2018 /* Make space for 5 more cells.
2019
2020 SUB sp, sp, #(5 * 16)
2021
2022 */
2023 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
2024
2025
2026 /* Save SP:
2027
2028 ADD x4, sp, #((32 + 31 + 5) * 16)
2029 STR x4, [sp, #(4 * 16)]
2030
2031 */
2032 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
2033 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
2034
2035 /* Save PC (tracepoint address):
2036
2037 MOV x3, #(tpaddr)
2038 ...
2039
2040 STR x3, [sp, #(3 * 16)]
2041
2042 */
2043
2044 p += emit_mov_addr (p, x3, tpaddr);
2045 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
2046
2047 /* Save CPSR (NZCV), FPSR and FPCR:
2048
2049 MRS x2, nzcv
2050 MRS x1, fpsr
2051 MRS x0, fpcr
2052
2053 STR x2, [sp, #(2 * 16)]
2054 STR x1, [sp, #(1 * 16)]
2055 STR x0, [sp, #(0 * 16)]
2056
2057 */
2058 p += emit_mrs (p, x2, NZCV);
2059 p += emit_mrs (p, x1, FPSR);
2060 p += emit_mrs (p, x0, FPCR);
2061 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2062 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2063 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2064
2065 /* Push the collecting_t object. It consist of the address of the
2066 tracepoint and an ID for the current thread. We get the latter by
2067 reading the tpidr_el0 system register. It corresponds to the
2068 NT_ARM_TLS register accessible with ptrace.
2069
2070 MOV x0, #(tpoint)
2071 ...
2072
2073 MRS x1, tpidr_el0
2074
2075 STP x0, x1, [sp, #-16]!
2076
2077 */
2078
2079 p += emit_mov_addr (p, x0, tpoint);
2080 p += emit_mrs (p, x1, TPIDR_EL0);
2081 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2082
2083 /* Spin-lock:
2084
2085 The shared memory for the lock is at lockaddr. It will hold zero
2086 if no-one is holding the lock, otherwise it contains the address of
2087 the collecting_t object on the stack of the thread which acquired it.
2088
2089 At this stage, the stack pointer points to this thread's collecting_t
2090 object.
2091
2092 We use the following registers:
2093 - x0: Address of the lock.
2094 - x1: Pointer to collecting_t object.
2095 - x2: Scratch register.
2096
2097 MOV x0, #(lockaddr)
2098 ...
2099 MOV x1, sp
2100
2101 ; Trigger an event local to this core. So the following WFE
2102 ; instruction is ignored.
2103 SEVL
2104 again:
2105 ; Wait for an event. The event is triggered by either the SEVL
2106 ; or STLR instructions (store release).
2107 WFE
2108
2109 ; Atomically read at lockaddr. This marks the memory location as
2110 ; exclusive. This instruction also has memory constraints which
2111 ; make sure all previous data reads and writes are done before
2112 ; executing it.
2113 LDAXR x2, [x0]
2114
2115 ; Try again if another thread holds the lock.
2116 CBNZ x2, again
2117
2118 ; We can lock it! Write the address of the collecting_t object.
2119 ; This instruction will fail if the memory location is not marked
2120 ; as exclusive anymore. If it succeeds, it will remove the
2121 ; exclusive mark on the memory location. This way, if another
2122 ; thread executes this instruction before us, we will fail and try
2123 ; all over again.
2124 STXR w2, x1, [x0]
2125 CBNZ w2, again
2126
2127 */
2128
2129 p += emit_mov_addr (p, x0, lockaddr);
2130 p += emit_mov (p, x1, register_operand (sp));
2131
2132 p += emit_sevl (p);
2133 p += emit_wfe (p);
2134 p += emit_ldaxr (p, x2, x0);
2135 p += emit_cb (p, 1, w2, -2 * 4);
2136 p += emit_stxr (p, w2, x1, x0);
2137 p += emit_cb (p, 1, x2, -4 * 4);
2138
2139 /* Call collector (struct tracepoint *, unsigned char *):
2140
2141 MOV x0, #(tpoint)
2142 ...
2143
2144 ; Saved registers start after the collecting_t object.
2145 ADD x1, sp, #16
2146
2147 ; We use an intra-procedure-call scratch register.
2148 MOV ip0, #(collector)
2149 ...
2150
2151 ; And call back to C!
2152 BLR ip0
2153
2154 */
2155
2156 p += emit_mov_addr (p, x0, tpoint);
2157 p += emit_add (p, x1, sp, immediate_operand (16));
2158
2159 p += emit_mov_addr (p, ip0, collector);
2160 p += emit_blr (p, ip0);
2161
2162 /* Release the lock.
2163
2164 MOV x0, #(lockaddr)
2165 ...
2166
2167 ; This instruction is a normal store with memory ordering
2168 ; constraints. Thanks to this we do not have to put a data
2169 ; barrier instruction to make sure all data read and writes are done
2170 ; before this instruction is executed. Furthermore, this instruction
2171 ; will trigger an event, letting other threads know they can grab
2172 ; the lock.
2173 STLR xzr, [x0]
2174
2175 */
2176 p += emit_mov_addr (p, x0, lockaddr);
2177 p += emit_stlr (p, xzr, x0);
2178
2179 /* Free collecting_t object:
2180
2181 ADD sp, sp, #16
2182
2183 */
2184 p += emit_add (p, sp, sp, immediate_operand (16));
2185
2186 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2187 registers from the stack.
2188
2189 LDR x2, [sp, #(2 * 16)]
2190 LDR x1, [sp, #(1 * 16)]
2191 LDR x0, [sp, #(0 * 16)]
2192
2193 MSR NZCV, x2
2194 MSR FPSR, x1
2195 MSR FPCR, x0
2196
2197 ADD sp, sp #(5 * 16)
2198
2199 */
2200 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2201 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2202 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2203 p += emit_msr (p, NZCV, x2);
2204 p += emit_msr (p, FPSR, x1);
2205 p += emit_msr (p, FPCR, x0);
2206
2207 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2208
2209 /* Pop general purpose registers:
2210
2211 LDR x0, [sp]
2212 ...
2213 LDR x30, [sp, #(30 * 16)]
2214
2215 ADD sp, sp, #(31 * 16)
2216
2217 */
2218 for (i = 0; i <= 30; i += 1)
2219 p += emit_ldr (p, aarch64_register (i, 1), sp,
2220 offset_memory_operand (i * 16));
2221 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2222
2223 /* Pop SIMD&FP registers:
2224
2225 LDP q0, q1, [sp]
2226 ...
2227 LDP q30, q31, [sp, #(30 * 16)]
2228
2229 ADD sp, sp, #(32 * 16)
2230
2231 */
2232 for (i = 0; i <= 30; i += 2)
2233 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2234 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2235
2236 /* Write the code into the inferior memory. */
2237 append_insns (&buildaddr, p - buf, buf);
2238
2239 /* Now emit the relocated instruction. */
2240 *adjusted_insn_addr = buildaddr;
2241 target_read_uint32 (tpaddr, &insn);
2242
2243 insn_data.base.insn_addr = tpaddr;
2244 insn_data.new_addr = buildaddr;
2245 insn_data.insn_ptr = buf;
2246
2247 aarch64_relocate_instruction (insn, &visitor,
2248 (struct aarch64_insn_data *) &insn_data);
2249
2250 /* We may not have been able to relocate the instruction. */
2251 if (insn_data.insn_ptr == buf)
2252 {
2253 sprintf (err,
2254 "E.Could not relocate instruction from %s to %s.",
2255 core_addr_to_string_nz (tpaddr),
2256 core_addr_to_string_nz (buildaddr));
2257 return 1;
2258 }
2259 else
2260 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
2261 *adjusted_insn_addr_end = buildaddr;
2262
2263 /* Go back to the start of the buffer. */
2264 p = buf;
2265
2266 /* Emit a branch back from the jump pad. */
2267 offset = (tpaddr + orig_size - buildaddr);
2268 if (!can_encode_int32 (offset, 28))
2269 {
2270 sprintf (err,
2271 "E.Jump back from jump pad too far from tracepoint "
2272 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2273 offset);
2274 return 1;
2275 }
2276
2277 p += emit_b (p, 0, offset);
2278 append_insns (&buildaddr, p - buf, buf);
2279
2280 /* Give the caller a branch instruction into the jump pad. */
2281 offset = (*jump_entry - tpaddr);
2282 if (!can_encode_int32 (offset, 28))
2283 {
2284 sprintf (err,
2285 "E.Jump pad too far from tracepoint "
2286 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2287 offset);
2288 return 1;
2289 }
2290
2291 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2292 *jjump_pad_insn_size = 4;
2293
2294 /* Return the end address of our pad. */
2295 *jump_entry = buildaddr;
2296
2297 return 0;
2298 }
2299
2300 /* Helper function writing LEN instructions from START into
2301 current_insn_ptr. */
2302
2303 static void
2304 emit_ops_insns (const uint32_t *start, int len)
2305 {
2306 CORE_ADDR buildaddr = current_insn_ptr;
2307
2308 if (debug_threads)
2309 debug_printf ("Adding %d instrucions at %s\n",
2310 len, paddress (buildaddr));
2311
2312 append_insns (&buildaddr, len, start);
2313 current_insn_ptr = buildaddr;
2314 }
2315
2316 /* Pop a register from the stack. */
2317
2318 static int
2319 emit_pop (uint32_t *buf, struct aarch64_register rt)
2320 {
2321 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2322 }
2323
2324 /* Push a register on the stack. */
2325
2326 static int
2327 emit_push (uint32_t *buf, struct aarch64_register rt)
2328 {
2329 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2330 }
2331
2332 /* Implementation of emit_ops method "emit_prologue". */
2333
2334 static void
2335 aarch64_emit_prologue (void)
2336 {
2337 uint32_t buf[16];
2338 uint32_t *p = buf;
2339
2340 /* This function emit a prologue for the following function prototype:
2341
2342 enum eval_result_type f (unsigned char *regs,
2343 ULONGEST *value);
2344
2345 The first argument is a buffer of raw registers. The second
2346 argument is the result of
2347 evaluating the expression, which will be set to whatever is on top of
2348 the stack at the end.
2349
2350 The stack set up by the prologue is as such:
2351
2352 High *------------------------------------------------------*
2353 | LR |
2354 | FP | <- FP
2355 | x1 (ULONGEST *value) |
2356 | x0 (unsigned char *regs) |
2357 Low *------------------------------------------------------*
2358
2359 As we are implementing a stack machine, each opcode can expand the
2360 stack so we never know how far we are from the data saved by this
2361 prologue. In order to be able refer to value and regs later, we save
2362 the current stack pointer in the frame pointer. This way, it is not
2363 clobbered when calling C functions.
2364
2365 Finally, throughout every operation, we are using register x0 as the
2366 top of the stack, and x1 as a scratch register. */
2367
2368 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2369 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2370 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2371
2372 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2373
2374
2375 emit_ops_insns (buf, p - buf);
2376 }
2377
2378 /* Implementation of emit_ops method "emit_epilogue". */
2379
2380 static void
2381 aarch64_emit_epilogue (void)
2382 {
2383 uint32_t buf[16];
2384 uint32_t *p = buf;
2385
2386 /* Store the result of the expression (x0) in *value. */
2387 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2388 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2389 p += emit_str (p, x0, x1, offset_memory_operand (0));
2390
2391 /* Restore the previous state. */
2392 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2393 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2394
2395 /* Return expr_eval_no_error. */
2396 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2397 p += emit_ret (p, lr);
2398
2399 emit_ops_insns (buf, p - buf);
2400 }
2401
2402 /* Implementation of emit_ops method "emit_add". */
2403
2404 static void
2405 aarch64_emit_add (void)
2406 {
2407 uint32_t buf[16];
2408 uint32_t *p = buf;
2409
2410 p += emit_pop (p, x1);
2411 p += emit_add (p, x0, x1, register_operand (x0));
2412
2413 emit_ops_insns (buf, p - buf);
2414 }
2415
2416 /* Implementation of emit_ops method "emit_sub". */
2417
2418 static void
2419 aarch64_emit_sub (void)
2420 {
2421 uint32_t buf[16];
2422 uint32_t *p = buf;
2423
2424 p += emit_pop (p, x1);
2425 p += emit_sub (p, x0, x1, register_operand (x0));
2426
2427 emit_ops_insns (buf, p - buf);
2428 }
2429
2430 /* Implementation of emit_ops method "emit_mul". */
2431
2432 static void
2433 aarch64_emit_mul (void)
2434 {
2435 uint32_t buf[16];
2436 uint32_t *p = buf;
2437
2438 p += emit_pop (p, x1);
2439 p += emit_mul (p, x0, x1, x0);
2440
2441 emit_ops_insns (buf, p - buf);
2442 }
2443
2444 /* Implementation of emit_ops method "emit_lsh". */
2445
2446 static void
2447 aarch64_emit_lsh (void)
2448 {
2449 uint32_t buf[16];
2450 uint32_t *p = buf;
2451
2452 p += emit_pop (p, x1);
2453 p += emit_lslv (p, x0, x1, x0);
2454
2455 emit_ops_insns (buf, p - buf);
2456 }
2457
2458 /* Implementation of emit_ops method "emit_rsh_signed". */
2459
2460 static void
2461 aarch64_emit_rsh_signed (void)
2462 {
2463 uint32_t buf[16];
2464 uint32_t *p = buf;
2465
2466 p += emit_pop (p, x1);
2467 p += emit_asrv (p, x0, x1, x0);
2468
2469 emit_ops_insns (buf, p - buf);
2470 }
2471
2472 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2473
2474 static void
2475 aarch64_emit_rsh_unsigned (void)
2476 {
2477 uint32_t buf[16];
2478 uint32_t *p = buf;
2479
2480 p += emit_pop (p, x1);
2481 p += emit_lsrv (p, x0, x1, x0);
2482
2483 emit_ops_insns (buf, p - buf);
2484 }
2485
2486 /* Implementation of emit_ops method "emit_ext". */
2487
2488 static void
2489 aarch64_emit_ext (int arg)
2490 {
2491 uint32_t buf[16];
2492 uint32_t *p = buf;
2493
2494 p += emit_sbfx (p, x0, x0, 0, arg);
2495
2496 emit_ops_insns (buf, p - buf);
2497 }
2498
2499 /* Implementation of emit_ops method "emit_log_not". */
2500
2501 static void
2502 aarch64_emit_log_not (void)
2503 {
2504 uint32_t buf[16];
2505 uint32_t *p = buf;
2506
2507 /* If the top of the stack is 0, replace it with 1. Else replace it with
2508 0. */
2509
2510 p += emit_cmp (p, x0, immediate_operand (0));
2511 p += emit_cset (p, x0, EQ);
2512
2513 emit_ops_insns (buf, p - buf);
2514 }
2515
2516 /* Implementation of emit_ops method "emit_bit_and". */
2517
2518 static void
2519 aarch64_emit_bit_and (void)
2520 {
2521 uint32_t buf[16];
2522 uint32_t *p = buf;
2523
2524 p += emit_pop (p, x1);
2525 p += emit_and (p, x0, x0, x1);
2526
2527 emit_ops_insns (buf, p - buf);
2528 }
2529
2530 /* Implementation of emit_ops method "emit_bit_or". */
2531
2532 static void
2533 aarch64_emit_bit_or (void)
2534 {
2535 uint32_t buf[16];
2536 uint32_t *p = buf;
2537
2538 p += emit_pop (p, x1);
2539 p += emit_orr (p, x0, x0, x1);
2540
2541 emit_ops_insns (buf, p - buf);
2542 }
2543
2544 /* Implementation of emit_ops method "emit_bit_xor". */
2545
2546 static void
2547 aarch64_emit_bit_xor (void)
2548 {
2549 uint32_t buf[16];
2550 uint32_t *p = buf;
2551
2552 p += emit_pop (p, x1);
2553 p += emit_eor (p, x0, x0, x1);
2554
2555 emit_ops_insns (buf, p - buf);
2556 }
2557
2558 /* Implementation of emit_ops method "emit_bit_not". */
2559
2560 static void
2561 aarch64_emit_bit_not (void)
2562 {
2563 uint32_t buf[16];
2564 uint32_t *p = buf;
2565
2566 p += emit_mvn (p, x0, x0);
2567
2568 emit_ops_insns (buf, p - buf);
2569 }
2570
2571 /* Implementation of emit_ops method "emit_equal". */
2572
2573 static void
2574 aarch64_emit_equal (void)
2575 {
2576 uint32_t buf[16];
2577 uint32_t *p = buf;
2578
2579 p += emit_pop (p, x1);
2580 p += emit_cmp (p, x0, register_operand (x1));
2581 p += emit_cset (p, x0, EQ);
2582
2583 emit_ops_insns (buf, p - buf);
2584 }
2585
2586 /* Implementation of emit_ops method "emit_less_signed". */
2587
2588 static void
2589 aarch64_emit_less_signed (void)
2590 {
2591 uint32_t buf[16];
2592 uint32_t *p = buf;
2593
2594 p += emit_pop (p, x1);
2595 p += emit_cmp (p, x1, register_operand (x0));
2596 p += emit_cset (p, x0, LT);
2597
2598 emit_ops_insns (buf, p - buf);
2599 }
2600
2601 /* Implementation of emit_ops method "emit_less_unsigned". */
2602
2603 static void
2604 aarch64_emit_less_unsigned (void)
2605 {
2606 uint32_t buf[16];
2607 uint32_t *p = buf;
2608
2609 p += emit_pop (p, x1);
2610 p += emit_cmp (p, x1, register_operand (x0));
2611 p += emit_cset (p, x0, LO);
2612
2613 emit_ops_insns (buf, p - buf);
2614 }
2615
2616 /* Implementation of emit_ops method "emit_ref". */
2617
2618 static void
2619 aarch64_emit_ref (int size)
2620 {
2621 uint32_t buf[16];
2622 uint32_t *p = buf;
2623
2624 switch (size)
2625 {
2626 case 1:
2627 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2628 break;
2629 case 2:
2630 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2631 break;
2632 case 4:
2633 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2634 break;
2635 case 8:
2636 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2637 break;
2638 default:
2639 /* Unknown size, bail on compilation. */
2640 emit_error = 1;
2641 break;
2642 }
2643
2644 emit_ops_insns (buf, p - buf);
2645 }
2646
2647 /* Implementation of emit_ops method "emit_if_goto". */
2648
2649 static void
2650 aarch64_emit_if_goto (int *offset_p, int *size_p)
2651 {
2652 uint32_t buf[16];
2653 uint32_t *p = buf;
2654
2655 /* The Z flag is set or cleared here. */
2656 p += emit_cmp (p, x0, immediate_operand (0));
2657 /* This instruction must not change the Z flag. */
2658 p += emit_pop (p, x0);
2659 /* Branch over the next instruction if x0 == 0. */
2660 p += emit_bcond (p, EQ, 8);
2661
2662 /* The NOP instruction will be patched with an unconditional branch. */
2663 if (offset_p)
2664 *offset_p = (p - buf) * 4;
2665 if (size_p)
2666 *size_p = 4;
2667 p += emit_nop (p);
2668
2669 emit_ops_insns (buf, p - buf);
2670 }
2671
2672 /* Implementation of emit_ops method "emit_goto". */
2673
2674 static void
2675 aarch64_emit_goto (int *offset_p, int *size_p)
2676 {
2677 uint32_t buf[16];
2678 uint32_t *p = buf;
2679
2680 /* The NOP instruction will be patched with an unconditional branch. */
2681 if (offset_p)
2682 *offset_p = 0;
2683 if (size_p)
2684 *size_p = 4;
2685 p += emit_nop (p);
2686
2687 emit_ops_insns (buf, p - buf);
2688 }
2689
2690 /* Implementation of emit_ops method "write_goto_address". */
2691
2692 static void
2693 aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2694 {
2695 uint32_t insn;
2696
2697 emit_b (&insn, 0, to - from);
2698 append_insns (&from, 1, &insn);
2699 }
2700
2701 /* Implementation of emit_ops method "emit_const". */
2702
2703 static void
2704 aarch64_emit_const (LONGEST num)
2705 {
2706 uint32_t buf[16];
2707 uint32_t *p = buf;
2708
2709 p += emit_mov_addr (p, x0, num);
2710
2711 emit_ops_insns (buf, p - buf);
2712 }
2713
2714 /* Implementation of emit_ops method "emit_call". */
2715
2716 static void
2717 aarch64_emit_call (CORE_ADDR fn)
2718 {
2719 uint32_t buf[16];
2720 uint32_t *p = buf;
2721
2722 p += emit_mov_addr (p, ip0, fn);
2723 p += emit_blr (p, ip0);
2724
2725 emit_ops_insns (buf, p - buf);
2726 }
2727
2728 /* Implementation of emit_ops method "emit_reg". */
2729
2730 static void
2731 aarch64_emit_reg (int reg)
2732 {
2733 uint32_t buf[16];
2734 uint32_t *p = buf;
2735
2736 /* Set x0 to unsigned char *regs. */
2737 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2738 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2739 p += emit_mov (p, x1, immediate_operand (reg));
2740
2741 emit_ops_insns (buf, p - buf);
2742
2743 aarch64_emit_call (get_raw_reg_func_addr ());
2744 }
2745
2746 /* Implementation of emit_ops method "emit_pop". */
2747
2748 static void
2749 aarch64_emit_pop (void)
2750 {
2751 uint32_t buf[16];
2752 uint32_t *p = buf;
2753
2754 p += emit_pop (p, x0);
2755
2756 emit_ops_insns (buf, p - buf);
2757 }
2758
2759 /* Implementation of emit_ops method "emit_stack_flush". */
2760
2761 static void
2762 aarch64_emit_stack_flush (void)
2763 {
2764 uint32_t buf[16];
2765 uint32_t *p = buf;
2766
2767 p += emit_push (p, x0);
2768
2769 emit_ops_insns (buf, p - buf);
2770 }
2771
2772 /* Implementation of emit_ops method "emit_zero_ext". */
2773
2774 static void
2775 aarch64_emit_zero_ext (int arg)
2776 {
2777 uint32_t buf[16];
2778 uint32_t *p = buf;
2779
2780 p += emit_ubfx (p, x0, x0, 0, arg);
2781
2782 emit_ops_insns (buf, p - buf);
2783 }
2784
2785 /* Implementation of emit_ops method "emit_swap". */
2786
2787 static void
2788 aarch64_emit_swap (void)
2789 {
2790 uint32_t buf[16];
2791 uint32_t *p = buf;
2792
2793 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2794 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2795 p += emit_mov (p, x0, register_operand (x1));
2796
2797 emit_ops_insns (buf, p - buf);
2798 }
2799
2800 /* Implementation of emit_ops method "emit_stack_adjust". */
2801
2802 static void
2803 aarch64_emit_stack_adjust (int n)
2804 {
2805 /* This is not needed with our design. */
2806 uint32_t buf[16];
2807 uint32_t *p = buf;
2808
2809 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2810
2811 emit_ops_insns (buf, p - buf);
2812 }
2813
2814 /* Implementation of emit_ops method "emit_int_call_1". */
2815
2816 static void
2817 aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2818 {
2819 uint32_t buf[16];
2820 uint32_t *p = buf;
2821
2822 p += emit_mov (p, x0, immediate_operand (arg1));
2823
2824 emit_ops_insns (buf, p - buf);
2825
2826 aarch64_emit_call (fn);
2827 }
2828
2829 /* Implementation of emit_ops method "emit_void_call_2". */
2830
2831 static void
2832 aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2833 {
2834 uint32_t buf[16];
2835 uint32_t *p = buf;
2836
2837 /* Push x0 on the stack. */
2838 aarch64_emit_stack_flush ();
2839
2840 /* Setup arguments for the function call:
2841
2842 x0: arg1
2843 x1: top of the stack
2844
2845 MOV x1, x0
2846 MOV x0, #arg1 */
2847
2848 p += emit_mov (p, x1, register_operand (x0));
2849 p += emit_mov (p, x0, immediate_operand (arg1));
2850
2851 emit_ops_insns (buf, p - buf);
2852
2853 aarch64_emit_call (fn);
2854
2855 /* Restore x0. */
2856 aarch64_emit_pop ();
2857 }
2858
2859 /* Implementation of emit_ops method "emit_eq_goto". */
2860
2861 static void
2862 aarch64_emit_eq_goto (int *offset_p, int *size_p)
2863 {
2864 uint32_t buf[16];
2865 uint32_t *p = buf;
2866
2867 p += emit_pop (p, x1);
2868 p += emit_cmp (p, x1, register_operand (x0));
2869 /* Branch over the next instruction if x0 != x1. */
2870 p += emit_bcond (p, NE, 8);
2871 /* The NOP instruction will be patched with an unconditional branch. */
2872 if (offset_p)
2873 *offset_p = (p - buf) * 4;
2874 if (size_p)
2875 *size_p = 4;
2876 p += emit_nop (p);
2877
2878 emit_ops_insns (buf, p - buf);
2879 }
2880
2881 /* Implementation of emit_ops method "emit_ne_goto". */
2882
2883 static void
2884 aarch64_emit_ne_goto (int *offset_p, int *size_p)
2885 {
2886 uint32_t buf[16];
2887 uint32_t *p = buf;
2888
2889 p += emit_pop (p, x1);
2890 p += emit_cmp (p, x1, register_operand (x0));
2891 /* Branch over the next instruction if x0 == x1. */
2892 p += emit_bcond (p, EQ, 8);
2893 /* The NOP instruction will be patched with an unconditional branch. */
2894 if (offset_p)
2895 *offset_p = (p - buf) * 4;
2896 if (size_p)
2897 *size_p = 4;
2898 p += emit_nop (p);
2899
2900 emit_ops_insns (buf, p - buf);
2901 }
2902
2903 /* Implementation of emit_ops method "emit_lt_goto". */
2904
2905 static void
2906 aarch64_emit_lt_goto (int *offset_p, int *size_p)
2907 {
2908 uint32_t buf[16];
2909 uint32_t *p = buf;
2910
2911 p += emit_pop (p, x1);
2912 p += emit_cmp (p, x1, register_operand (x0));
2913 /* Branch over the next instruction if x0 >= x1. */
2914 p += emit_bcond (p, GE, 8);
2915 /* The NOP instruction will be patched with an unconditional branch. */
2916 if (offset_p)
2917 *offset_p = (p - buf) * 4;
2918 if (size_p)
2919 *size_p = 4;
2920 p += emit_nop (p);
2921
2922 emit_ops_insns (buf, p - buf);
2923 }
2924
2925 /* Implementation of emit_ops method "emit_le_goto". */
2926
2927 static void
2928 aarch64_emit_le_goto (int *offset_p, int *size_p)
2929 {
2930 uint32_t buf[16];
2931 uint32_t *p = buf;
2932
2933 p += emit_pop (p, x1);
2934 p += emit_cmp (p, x1, register_operand (x0));
2935 /* Branch over the next instruction if x0 > x1. */
2936 p += emit_bcond (p, GT, 8);
2937 /* The NOP instruction will be patched with an unconditional branch. */
2938 if (offset_p)
2939 *offset_p = (p - buf) * 4;
2940 if (size_p)
2941 *size_p = 4;
2942 p += emit_nop (p);
2943
2944 emit_ops_insns (buf, p - buf);
2945 }
2946
2947 /* Implementation of emit_ops method "emit_gt_goto". */
2948
2949 static void
2950 aarch64_emit_gt_goto (int *offset_p, int *size_p)
2951 {
2952 uint32_t buf[16];
2953 uint32_t *p = buf;
2954
2955 p += emit_pop (p, x1);
2956 p += emit_cmp (p, x1, register_operand (x0));
2957 /* Branch over the next instruction if x0 <= x1. */
2958 p += emit_bcond (p, LE, 8);
2959 /* The NOP instruction will be patched with an unconditional branch. */
2960 if (offset_p)
2961 *offset_p = (p - buf) * 4;
2962 if (size_p)
2963 *size_p = 4;
2964 p += emit_nop (p);
2965
2966 emit_ops_insns (buf, p - buf);
2967 }
2968
2969 /* Implementation of emit_ops method "emit_ge_got". */
2970
2971 static void
2972 aarch64_emit_ge_got (int *offset_p, int *size_p)
2973 {
2974 uint32_t buf[16];
2975 uint32_t *p = buf;
2976
2977 p += emit_pop (p, x1);
2978 p += emit_cmp (p, x1, register_operand (x0));
2979 /* Branch over the next instruction if x0 <= x1. */
2980 p += emit_bcond (p, LT, 8);
2981 /* The NOP instruction will be patched with an unconditional branch. */
2982 if (offset_p)
2983 *offset_p = (p - buf) * 4;
2984 if (size_p)
2985 *size_p = 4;
2986 p += emit_nop (p);
2987
2988 emit_ops_insns (buf, p - buf);
2989 }
2990
2991 static struct emit_ops aarch64_emit_ops_impl =
2992 {
2993 aarch64_emit_prologue,
2994 aarch64_emit_epilogue,
2995 aarch64_emit_add,
2996 aarch64_emit_sub,
2997 aarch64_emit_mul,
2998 aarch64_emit_lsh,
2999 aarch64_emit_rsh_signed,
3000 aarch64_emit_rsh_unsigned,
3001 aarch64_emit_ext,
3002 aarch64_emit_log_not,
3003 aarch64_emit_bit_and,
3004 aarch64_emit_bit_or,
3005 aarch64_emit_bit_xor,
3006 aarch64_emit_bit_not,
3007 aarch64_emit_equal,
3008 aarch64_emit_less_signed,
3009 aarch64_emit_less_unsigned,
3010 aarch64_emit_ref,
3011 aarch64_emit_if_goto,
3012 aarch64_emit_goto,
3013 aarch64_write_goto_address,
3014 aarch64_emit_const,
3015 aarch64_emit_call,
3016 aarch64_emit_reg,
3017 aarch64_emit_pop,
3018 aarch64_emit_stack_flush,
3019 aarch64_emit_zero_ext,
3020 aarch64_emit_swap,
3021 aarch64_emit_stack_adjust,
3022 aarch64_emit_int_call_1,
3023 aarch64_emit_void_call_2,
3024 aarch64_emit_eq_goto,
3025 aarch64_emit_ne_goto,
3026 aarch64_emit_lt_goto,
3027 aarch64_emit_le_goto,
3028 aarch64_emit_gt_goto,
3029 aarch64_emit_ge_got,
3030 };
3031
3032 /* Implementation of linux_target_ops method "emit_ops". */
3033
3034 static struct emit_ops *
3035 aarch64_emit_ops (void)
3036 {
3037 return &aarch64_emit_ops_impl;
3038 }
3039
3040 /* Implementation of linux_target_ops method
3041 "get_min_fast_tracepoint_insn_len". */
3042
3043 static int
3044 aarch64_get_min_fast_tracepoint_insn_len (void)
3045 {
3046 return 4;
3047 }
3048
3049 /* Implementation of linux_target_ops method "supports_range_stepping". */
3050
3051 static int
3052 aarch64_supports_range_stepping (void)
3053 {
3054 return 1;
3055 }
3056
3057 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
3058
3059 static const gdb_byte *
3060 aarch64_sw_breakpoint_from_kind (int kind, int *size)
3061 {
3062 if (is_64bit_tdesc ())
3063 {
3064 *size = aarch64_breakpoint_len;
3065 return aarch64_breakpoint;
3066 }
3067 else
3068 return arm_sw_breakpoint_from_kind (kind, size);
3069 }
3070
3071 /* Implementation of target ops method "breakpoint_kind_from_pc". */
3072
3073 int
3074 aarch64_target::breakpoint_kind_from_pc (CORE_ADDR *pcptr)
3075 {
3076 if (is_64bit_tdesc ())
3077 return aarch64_breakpoint_len;
3078 else
3079 return arm_breakpoint_kind_from_pc (pcptr);
3080 }
3081
3082 /* Implementation of the target ops method
3083 "breakpoint_kind_from_current_state". */
3084
3085 int
3086 aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
3087 {
3088 if (is_64bit_tdesc ())
3089 return aarch64_breakpoint_len;
3090 else
3091 return arm_breakpoint_kind_from_current_state (pcptr);
3092 }
3093
3094 /* Support for hardware single step. */
3095
3096 static int
3097 aarch64_supports_hardware_single_step (void)
3098 {
3099 return 1;
3100 }
3101
3102 struct linux_target_ops the_low_target =
3103 {
3104 aarch64_sw_breakpoint_from_kind,
3105 NULL, /* get_next_pcs */
3106 0, /* decr_pc_after_break */
3107 aarch64_breakpoint_at,
3108 aarch64_supports_z_point_type,
3109 aarch64_insert_point,
3110 aarch64_remove_point,
3111 aarch64_stopped_by_watchpoint,
3112 aarch64_stopped_data_address,
3113 NULL, /* collect_ptrace_register */
3114 NULL, /* supply_ptrace_register */
3115 aarch64_linux_siginfo_fixup,
3116 aarch64_linux_new_process,
3117 aarch64_linux_delete_process,
3118 aarch64_linux_new_thread,
3119 aarch64_linux_delete_thread,
3120 aarch64_linux_new_fork,
3121 aarch64_linux_prepare_to_resume,
3122 NULL, /* process_qsupported */
3123 aarch64_supports_tracepoints,
3124 aarch64_get_thread_area,
3125 aarch64_install_fast_tracepoint_jump_pad,
3126 aarch64_emit_ops,
3127 aarch64_get_min_fast_tracepoint_insn_len,
3128 aarch64_supports_range_stepping,
3129 aarch64_supports_hardware_single_step,
3130 aarch64_get_syscall_trapinfo,
3131 };
3132
3133 /* The linux target ops object. */
3134
3135 linux_process_target *the_linux_target = &the_aarch64_target;
3136
3137 void
3138 initialize_low_arch (void)
3139 {
3140 initialize_low_arch_aarch32 ();
3141
3142 initialize_regsets_info (&aarch64_regsets_info);
3143 initialize_regsets_info (&aarch64_sve_regsets_info);
3144 }
This page took 0.098736 seconds and 5 git commands to generate.