Remove declarations of is_running/is_stopped/is_exited
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-aarch64-low.c
CommitLineData
176eb98c
MS
1/* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
e2882c85 4 Copyright (C) 2009-2018 Free Software Foundation, Inc.
176eb98c
MS
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "server.h"
23#include "linux-low.h"
db3cb7cb 24#include "nat/aarch64-linux.h"
554717a3 25#include "nat/aarch64-linux-hw-point.h"
bb903df0 26#include "arch/aarch64-insn.h"
3b53ae99 27#include "linux-aarch32-low.h"
176eb98c 28#include "elf/common.h"
afbe19f8
PL
29#include "ax.h"
30#include "tracepoint.h"
176eb98c
MS
31
32#include <signal.h>
33#include <sys/user.h>
5826e159 34#include "nat/gdb_ptrace.h"
e9dae05e 35#include <asm/ptrace.h>
bb903df0
PL
36#include <inttypes.h>
37#include <endian.h>
38#include <sys/uio.h>
176eb98c
MS
39
40#include "gdb_proc_service.h"
cc628f3d 41#include "arch/aarch64.h"
d6d7ce56 42#include "linux-aarch64-tdesc.h"
fefa175e 43#include "nat/aarch64-sve-linux-ptrace.h"
02895270 44#include "tdesc.h"
176eb98c 45
176eb98c
MS
46#ifdef HAVE_SYS_REG_H
47#include <sys/reg.h>
48#endif
49
176eb98c
MS
50/* Per-process arch-specific data we want to keep. */
51
52struct arch_process_info
53{
54 /* Hardware breakpoint/watchpoint data.
55 The reason for them to be per-process rather than per-thread is
56 due to the lack of information in the gdbserver environment;
57 gdbserver is not told that whether a requested hardware
58 breakpoint/watchpoint is thread specific or not, so it has to set
59 each hw bp/wp for every thread in the current process. The
60 higher level bp/wp management in gdb will resume a thread if a hw
61 bp/wp trap is not expected for it. Since the hw bp/wp setting is
62 same for each thread, it is reasonable for the data to live here.
63 */
64 struct aarch64_debug_reg_state debug_reg_state;
65};
66
3b53ae99
YQ
67/* Return true if the size of register 0 is 8 byte. */
68
69static int
70is_64bit_tdesc (void)
71{
72 struct regcache *regcache = get_thread_regcache (current_thread, 0);
73
74 return register_size (regcache->tdesc, 0) == 8;
75}
76
02895270
AH
77/* Return true if the regcache contains the number of SVE registers. */
78
79static bool
80is_sve_tdesc (void)
81{
82 struct regcache *regcache = get_thread_regcache (current_thread, 0);
83
84 return regcache->tdesc->reg_defs.size () == AARCH64_SVE_NUM_REGS;
85}
86
421530db
PL
87/* Implementation of linux_target_ops method "cannot_store_register". */
88
176eb98c
MS
89static int
90aarch64_cannot_store_register (int regno)
91{
92 return regno >= AARCH64_NUM_REGS;
93}
94
421530db
PL
95/* Implementation of linux_target_ops method "cannot_fetch_register". */
96
176eb98c
MS
97static int
98aarch64_cannot_fetch_register (int regno)
99{
100 return regno >= AARCH64_NUM_REGS;
101}
102
103static void
104aarch64_fill_gregset (struct regcache *regcache, void *buf)
105{
6a69a054 106 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
176eb98c
MS
107 int i;
108
109 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
110 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
111 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
112 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
113 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
114}
115
116static void
117aarch64_store_gregset (struct regcache *regcache, const void *buf)
118{
6a69a054 119 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
176eb98c
MS
120 int i;
121
122 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
123 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
124 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
125 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
126 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
127}
128
129static void
130aarch64_fill_fpregset (struct regcache *regcache, void *buf)
131{
9caa3311 132 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
176eb98c
MS
133 int i;
134
135 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
136 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
137 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
138 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
139}
140
141static void
142aarch64_store_fpregset (struct regcache *regcache, const void *buf)
143{
9caa3311
YQ
144 const struct user_fpsimd_state *regset
145 = (const struct user_fpsimd_state *) buf;
176eb98c
MS
146 int i;
147
148 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
149 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
150 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
151 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
152}
153
176eb98c
MS
154/* Enable miscellaneous debugging output. The name is historical - it
155 was originally used to debug LinuxThreads support. */
156extern int debug_threads;
157
421530db
PL
158/* Implementation of linux_target_ops method "get_pc". */
159
176eb98c
MS
160static CORE_ADDR
161aarch64_get_pc (struct regcache *regcache)
162{
8a7e4587 163 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 164 return linux_get_pc_64bit (regcache);
8a7e4587 165 else
a5652c21 166 return linux_get_pc_32bit (regcache);
176eb98c
MS
167}
168
421530db
PL
169/* Implementation of linux_target_ops method "set_pc". */
170
176eb98c
MS
171static void
172aarch64_set_pc (struct regcache *regcache, CORE_ADDR pc)
173{
8a7e4587 174 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 175 linux_set_pc_64bit (regcache, pc);
8a7e4587 176 else
a5652c21 177 linux_set_pc_32bit (regcache, pc);
176eb98c
MS
178}
179
176eb98c
MS
180#define aarch64_breakpoint_len 4
181
37d66942
PL
182/* AArch64 BRK software debug mode instruction.
183 This instruction needs to match gdb/aarch64-tdep.c
184 (aarch64_default_breakpoint). */
185static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
176eb98c 186
421530db
PL
187/* Implementation of linux_target_ops method "breakpoint_at". */
188
176eb98c
MS
189static int
190aarch64_breakpoint_at (CORE_ADDR where)
191{
db91f502
YQ
192 if (is_64bit_tdesc ())
193 {
194 gdb_byte insn[aarch64_breakpoint_len];
176eb98c 195
db91f502
YQ
196 (*the_target->read_memory) (where, (unsigned char *) &insn,
197 aarch64_breakpoint_len);
198 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
199 return 1;
176eb98c 200
db91f502
YQ
201 return 0;
202 }
203 else
204 return arm_breakpoint_at (where);
176eb98c
MS
205}
206
176eb98c
MS
207static void
208aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
209{
210 int i;
211
212 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
213 {
214 state->dr_addr_bp[i] = 0;
215 state->dr_ctrl_bp[i] = 0;
216 state->dr_ref_count_bp[i] = 0;
217 }
218
219 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
220 {
221 state->dr_addr_wp[i] = 0;
222 state->dr_ctrl_wp[i] = 0;
223 state->dr_ref_count_wp[i] = 0;
224 }
225}
226
176eb98c
MS
227/* Return the pointer to the debug register state structure in the
228 current process' arch-specific data area. */
229
db3cb7cb 230struct aarch64_debug_reg_state *
88e2cf7e 231aarch64_get_debug_reg_state (pid_t pid)
176eb98c 232{
88e2cf7e 233 struct process_info *proc = find_process_pid (pid);
176eb98c 234
fe978cb0 235 return &proc->priv->arch_private->debug_reg_state;
176eb98c
MS
236}
237
421530db
PL
238/* Implementation of linux_target_ops method "supports_z_point_type". */
239
4ff0d3d8
PA
240static int
241aarch64_supports_z_point_type (char z_type)
242{
243 switch (z_type)
244 {
96c97461 245 case Z_PACKET_SW_BP:
4ff0d3d8
PA
246 case Z_PACKET_HW_BP:
247 case Z_PACKET_WRITE_WP:
248 case Z_PACKET_READ_WP:
249 case Z_PACKET_ACCESS_WP:
250 return 1;
251 default:
4ff0d3d8
PA
252 return 0;
253 }
254}
255
421530db 256/* Implementation of linux_target_ops method "insert_point".
176eb98c 257
421530db
PL
258 It actually only records the info of the to-be-inserted bp/wp;
259 the actual insertion will happen when threads are resumed. */
176eb98c
MS
260
261static int
802e8e6d
PA
262aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
263 int len, struct raw_breakpoint *bp)
176eb98c
MS
264{
265 int ret;
4ff0d3d8 266 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
267 struct aarch64_debug_reg_state *state
268 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 269
c5e92cca 270 if (show_debug_regs)
176eb98c
MS
271 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
272 (unsigned long) addr, len);
273
802e8e6d
PA
274 /* Determine the type from the raw breakpoint type. */
275 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
276
277 if (targ_type != hw_execute)
39edd165
YQ
278 {
279 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
280 ret = aarch64_handle_watchpoint (targ_type, addr, len,
281 1 /* is_insert */, state);
282 else
283 ret = -1;
284 }
176eb98c 285 else
8d689ee5
YQ
286 {
287 if (len == 3)
288 {
289 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
290 instruction. Set it to 2 to correctly encode length bit
291 mask in hardware/watchpoint control register. */
292 len = 2;
293 }
294 ret = aarch64_handle_breakpoint (targ_type, addr, len,
295 1 /* is_insert */, state);
296 }
176eb98c 297
60a191ed 298 if (show_debug_regs)
88e2cf7e
YQ
299 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
300 targ_type);
176eb98c
MS
301
302 return ret;
303}
304
421530db 305/* Implementation of linux_target_ops method "remove_point".
176eb98c 306
421530db
PL
307 It actually only records the info of the to-be-removed bp/wp,
308 the actual removal will be done when threads are resumed. */
176eb98c
MS
309
310static int
802e8e6d
PA
311aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
312 int len, struct raw_breakpoint *bp)
176eb98c
MS
313{
314 int ret;
4ff0d3d8 315 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
316 struct aarch64_debug_reg_state *state
317 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 318
c5e92cca 319 if (show_debug_regs)
176eb98c
MS
320 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
321 (unsigned long) addr, len);
322
802e8e6d
PA
323 /* Determine the type from the raw breakpoint type. */
324 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
325
326 /* Set up state pointers. */
327 if (targ_type != hw_execute)
328 ret =
c67ca4de
YQ
329 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
330 state);
176eb98c 331 else
8d689ee5
YQ
332 {
333 if (len == 3)
334 {
335 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
336 instruction. Set it to 2 to correctly encode length bit
337 mask in hardware/watchpoint control register. */
338 len = 2;
339 }
340 ret = aarch64_handle_breakpoint (targ_type, addr, len,
341 0 /* is_insert */, state);
342 }
176eb98c 343
60a191ed 344 if (show_debug_regs)
88e2cf7e
YQ
345 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
346 targ_type);
176eb98c
MS
347
348 return ret;
349}
350
421530db 351/* Implementation of linux_target_ops method "stopped_data_address". */
176eb98c
MS
352
353static CORE_ADDR
354aarch64_stopped_data_address (void)
355{
356 siginfo_t siginfo;
357 int pid, i;
358 struct aarch64_debug_reg_state *state;
359
0bfdf32f 360 pid = lwpid_of (current_thread);
176eb98c
MS
361
362 /* Get the siginfo. */
363 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
364 return (CORE_ADDR) 0;
365
366 /* Need to be a hardware breakpoint/watchpoint trap. */
367 if (siginfo.si_signo != SIGTRAP
368 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
369 return (CORE_ADDR) 0;
370
371 /* Check if the address matches any watched address. */
88e2cf7e 372 state = aarch64_get_debug_reg_state (pid_of (current_thread));
176eb98c
MS
373 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
374 {
a3b60e45
JK
375 const unsigned int offset
376 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
176eb98c
MS
377 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
378 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
a3b60e45
JK
379 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
380 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
381 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
382
176eb98c
MS
383 if (state->dr_ref_count_wp[i]
384 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
a3b60e45 385 && addr_trap >= addr_watch_aligned
176eb98c 386 && addr_trap < addr_watch + len)
a3b60e45
JK
387 {
388 /* ADDR_TRAP reports the first address of the memory range
389 accessed by the CPU, regardless of what was the memory
390 range watched. Thus, a large CPU access that straddles
391 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
392 ADDR_TRAP that is lower than the
393 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
394
395 addr: | 4 | 5 | 6 | 7 | 8 |
396 |---- range watched ----|
397 |----------- range accessed ------------|
398
399 In this case, ADDR_TRAP will be 4.
400
401 To match a watchpoint known to GDB core, we must never
402 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
403 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
404 positive on kernels older than 4.10. See PR
405 external/20207. */
406 return addr_orig;
407 }
176eb98c
MS
408 }
409
410 return (CORE_ADDR) 0;
411}
412
421530db 413/* Implementation of linux_target_ops method "stopped_by_watchpoint". */
176eb98c
MS
414
415static int
416aarch64_stopped_by_watchpoint (void)
417{
418 if (aarch64_stopped_data_address () != 0)
419 return 1;
420 else
421 return 0;
422}
423
424/* Fetch the thread-local storage pointer for libthread_db. */
425
426ps_err_e
754653a7 427ps_get_thread_area (struct ps_prochandle *ph,
176eb98c
MS
428 lwpid_t lwpid, int idx, void **base)
429{
a0cc84cd
YQ
430 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
431 is_64bit_tdesc ());
176eb98c
MS
432}
433
ade90bde
YQ
434/* Implementation of linux_target_ops method "siginfo_fixup". */
435
436static int
8adce034 437aarch64_linux_siginfo_fixup (siginfo_t *native, gdb_byte *inf, int direction)
ade90bde
YQ
438{
439 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
440 if (!is_64bit_tdesc ())
441 {
442 if (direction == 0)
443 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
444 native);
445 else
446 aarch64_siginfo_from_compat_siginfo (native,
447 (struct compat_siginfo *) inf);
448
449 return 1;
450 }
451
452 return 0;
453}
454
04ec7890 455/* Implementation of linux_target_ops method "new_process". */
176eb98c
MS
456
457static struct arch_process_info *
458aarch64_linux_new_process (void)
459{
8d749320 460 struct arch_process_info *info = XCNEW (struct arch_process_info);
176eb98c
MS
461
462 aarch64_init_debug_reg_state (&info->debug_reg_state);
463
464 return info;
465}
466
04ec7890
SM
467/* Implementation of linux_target_ops method "delete_process". */
468
469static void
470aarch64_linux_delete_process (struct arch_process_info *info)
471{
472 xfree (info);
473}
474
421530db
PL
475/* Implementation of linux_target_ops method "linux_new_fork". */
476
3a8a0396
DB
477static void
478aarch64_linux_new_fork (struct process_info *parent,
479 struct process_info *child)
480{
481 /* These are allocated by linux_add_process. */
61a7418c
DB
482 gdb_assert (parent->priv != NULL
483 && parent->priv->arch_private != NULL);
484 gdb_assert (child->priv != NULL
485 && child->priv->arch_private != NULL);
3a8a0396
DB
486
487 /* Linux kernel before 2.6.33 commit
488 72f674d203cd230426437cdcf7dd6f681dad8b0d
489 will inherit hardware debug registers from parent
490 on fork/vfork/clone. Newer Linux kernels create such tasks with
491 zeroed debug registers.
492
493 GDB core assumes the child inherits the watchpoints/hw
494 breakpoints of the parent, and will remove them all from the
495 forked off process. Copy the debug registers mirrors into the
496 new process so that all breakpoints and watchpoints can be
497 removed together. The debug registers mirror will become zeroed
498 in the end before detaching the forked off process, thus making
499 this compatible with older Linux kernels too. */
500
61a7418c 501 *child->priv->arch_private = *parent->priv->arch_private;
3a8a0396
DB
502}
503
d6d7ce56 504/* Implementation of linux_target_ops method "arch_setup". */
3b53ae99 505
d6d7ce56
AH
506static void
507aarch64_arch_setup (void)
3b53ae99
YQ
508{
509 unsigned int machine;
510 int is_elf64;
511 int tid;
512
513 tid = lwpid_of (current_thread);
514
515 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
516
517 if (is_elf64)
fefa175e
AH
518 {
519 uint64_t vq = aarch64_sve_get_vq (tid);
520 current_process ()->tdesc = aarch64_linux_read_description (vq);
521 }
3b53ae99 522 else
d6d7ce56 523 current_process ()->tdesc = tdesc_arm_with_neon;
176eb98c 524
af1b22f3 525 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
176eb98c
MS
526}
527
02895270
AH
528/* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
529
530static void
531aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
532{
533 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
534}
535
536/* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
537
538static void
539aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
540{
541 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
542}
543
3aee8918 544static struct regset_info aarch64_regsets[] =
176eb98c
MS
545{
546 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
547 sizeof (struct user_pt_regs), GENERAL_REGS,
548 aarch64_fill_gregset, aarch64_store_gregset },
549 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
550 sizeof (struct user_fpsimd_state), FP_REGS,
551 aarch64_fill_fpregset, aarch64_store_fpregset
552 },
50bc912a 553 NULL_REGSET
176eb98c
MS
554};
555
3aee8918
PA
556static struct regsets_info aarch64_regsets_info =
557 {
558 aarch64_regsets, /* regsets */
559 0, /* num_regsets */
560 NULL, /* disabled_regsets */
561 };
562
3b53ae99 563static struct regs_info regs_info_aarch64 =
3aee8918
PA
564 {
565 NULL, /* regset_bitmap */
c2d65f38 566 NULL, /* usrregs */
3aee8918
PA
567 &aarch64_regsets_info,
568 };
569
02895270
AH
570static struct regset_info aarch64_sve_regsets[] =
571{
572 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
573 sizeof (struct user_pt_regs), GENERAL_REGS,
574 aarch64_fill_gregset, aarch64_store_gregset },
575 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
576 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
577 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
578 },
579 NULL_REGSET
580};
581
582static struct regsets_info aarch64_sve_regsets_info =
583 {
584 aarch64_sve_regsets, /* regsets. */
585 0, /* num_regsets. */
586 NULL, /* disabled_regsets. */
587 };
588
589static struct regs_info regs_info_aarch64_sve =
590 {
591 NULL, /* regset_bitmap. */
592 NULL, /* usrregs. */
593 &aarch64_sve_regsets_info,
594 };
595
421530db
PL
596/* Implementation of linux_target_ops method "regs_info". */
597
3aee8918
PA
598static const struct regs_info *
599aarch64_regs_info (void)
600{
02895270 601 if (!is_64bit_tdesc ())
3b53ae99 602 return &regs_info_aarch32;
02895270
AH
603
604 if (is_sve_tdesc ())
605 return &regs_info_aarch64_sve;
606
607 return &regs_info_aarch64;
3aee8918
PA
608}
609
7671bf47
PL
610/* Implementation of linux_target_ops method "supports_tracepoints". */
611
612static int
613aarch64_supports_tracepoints (void)
614{
524b57e6
YQ
615 if (current_thread == NULL)
616 return 1;
617 else
618 {
619 /* We don't support tracepoints on aarch32 now. */
620 return is_64bit_tdesc ();
621 }
7671bf47
PL
622}
623
bb903df0
PL
624/* Implementation of linux_target_ops method "get_thread_area". */
625
626static int
627aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
628{
629 struct iovec iovec;
630 uint64_t reg;
631
632 iovec.iov_base = &reg;
633 iovec.iov_len = sizeof (reg);
634
635 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
636 return -1;
637
638 *addrp = reg;
639
640 return 0;
641}
642
061fc021
YQ
643/* Implementation of linux_target_ops method "get_syscall_trapinfo". */
644
645static void
646aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
647{
648 int use_64bit = register_size (regcache->tdesc, 0) == 8;
649
650 if (use_64bit)
651 {
652 long l_sysno;
653
654 collect_register_by_name (regcache, "x8", &l_sysno);
655 *sysno = (int) l_sysno;
656 }
657 else
658 collect_register_by_name (regcache, "r7", sysno);
659}
660
afbe19f8
PL
661/* List of condition codes that we need. */
662
663enum aarch64_condition_codes
664{
665 EQ = 0x0,
666 NE = 0x1,
667 LO = 0x3,
668 GE = 0xa,
669 LT = 0xb,
670 GT = 0xc,
671 LE = 0xd,
bb903df0
PL
672};
673
6c1c9a8b
YQ
674enum aarch64_operand_type
675{
676 OPERAND_IMMEDIATE,
677 OPERAND_REGISTER,
678};
679
bb903df0
PL
680/* Representation of an operand. At this time, it only supports register
681 and immediate types. */
682
683struct aarch64_operand
684{
685 /* Type of the operand. */
6c1c9a8b
YQ
686 enum aarch64_operand_type type;
687
bb903df0
PL
688 /* Value of the operand according to the type. */
689 union
690 {
691 uint32_t imm;
692 struct aarch64_register reg;
693 };
694};
695
696/* List of registers that we are currently using, we can add more here as
697 we need to use them. */
698
699/* General purpose scratch registers (64 bit). */
700static const struct aarch64_register x0 = { 0, 1 };
701static const struct aarch64_register x1 = { 1, 1 };
702static const struct aarch64_register x2 = { 2, 1 };
703static const struct aarch64_register x3 = { 3, 1 };
704static const struct aarch64_register x4 = { 4, 1 };
705
706/* General purpose scratch registers (32 bit). */
afbe19f8 707static const struct aarch64_register w0 = { 0, 0 };
bb903df0
PL
708static const struct aarch64_register w2 = { 2, 0 };
709
710/* Intra-procedure scratch registers. */
711static const struct aarch64_register ip0 = { 16, 1 };
712
713/* Special purpose registers. */
afbe19f8
PL
714static const struct aarch64_register fp = { 29, 1 };
715static const struct aarch64_register lr = { 30, 1 };
bb903df0
PL
716static const struct aarch64_register sp = { 31, 1 };
717static const struct aarch64_register xzr = { 31, 1 };
718
719/* Dynamically allocate a new register. If we know the register
720 statically, we should make it a global as above instead of using this
721 helper function. */
722
723static struct aarch64_register
724aarch64_register (unsigned num, int is64)
725{
726 return (struct aarch64_register) { num, is64 };
727}
728
729/* Helper function to create a register operand, for instructions with
730 different types of operands.
731
732 For example:
733 p += emit_mov (p, x0, register_operand (x1)); */
734
735static struct aarch64_operand
736register_operand (struct aarch64_register reg)
737{
738 struct aarch64_operand operand;
739
740 operand.type = OPERAND_REGISTER;
741 operand.reg = reg;
742
743 return operand;
744}
745
746/* Helper function to create an immediate operand, for instructions with
747 different types of operands.
748
749 For example:
750 p += emit_mov (p, x0, immediate_operand (12)); */
751
752static struct aarch64_operand
753immediate_operand (uint32_t imm)
754{
755 struct aarch64_operand operand;
756
757 operand.type = OPERAND_IMMEDIATE;
758 operand.imm = imm;
759
760 return operand;
761}
762
bb903df0
PL
763/* Helper function to create an offset memory operand.
764
765 For example:
766 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
767
768static struct aarch64_memory_operand
769offset_memory_operand (int32_t offset)
770{
771 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
772}
773
774/* Helper function to create a pre-index memory operand.
775
776 For example:
777 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
778
779static struct aarch64_memory_operand
780preindex_memory_operand (int32_t index)
781{
782 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
783}
784
afbe19f8
PL
785/* Helper function to create a post-index memory operand.
786
787 For example:
788 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
789
790static struct aarch64_memory_operand
791postindex_memory_operand (int32_t index)
792{
793 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
794}
795
bb903df0
PL
796/* System control registers. These special registers can be written and
797 read with the MRS and MSR instructions.
798
799 - NZCV: Condition flags. GDB refers to this register under the CPSR
800 name.
801 - FPSR: Floating-point status register.
802 - FPCR: Floating-point control registers.
803 - TPIDR_EL0: Software thread ID register. */
804
805enum aarch64_system_control_registers
806{
807 /* op0 op1 crn crm op2 */
808 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
809 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
810 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
811 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
812};
813
bb903df0
PL
814/* Write a BLR instruction into *BUF.
815
816 BLR rn
817
818 RN is the register to branch to. */
819
820static int
821emit_blr (uint32_t *buf, struct aarch64_register rn)
822{
e1c587c3 823 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
bb903df0
PL
824}
825
afbe19f8 826/* Write a RET instruction into *BUF.
bb903df0 827
afbe19f8 828 RET xn
bb903df0 829
afbe19f8 830 RN is the register to branch to. */
bb903df0
PL
831
832static int
afbe19f8
PL
833emit_ret (uint32_t *buf, struct aarch64_register rn)
834{
e1c587c3 835 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
afbe19f8
PL
836}
837
838static int
839emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
840 struct aarch64_register rt,
841 struct aarch64_register rt2,
842 struct aarch64_register rn,
843 struct aarch64_memory_operand operand)
bb903df0
PL
844{
845 uint32_t opc;
846 uint32_t pre_index;
847 uint32_t write_back;
848
849 if (rt.is64)
850 opc = ENCODE (2, 2, 30);
851 else
852 opc = ENCODE (0, 2, 30);
853
854 switch (operand.type)
855 {
856 case MEMORY_OPERAND_OFFSET:
857 {
858 pre_index = ENCODE (1, 1, 24);
859 write_back = ENCODE (0, 1, 23);
860 break;
861 }
afbe19f8
PL
862 case MEMORY_OPERAND_POSTINDEX:
863 {
864 pre_index = ENCODE (0, 1, 24);
865 write_back = ENCODE (1, 1, 23);
866 break;
867 }
bb903df0
PL
868 case MEMORY_OPERAND_PREINDEX:
869 {
870 pre_index = ENCODE (1, 1, 24);
871 write_back = ENCODE (1, 1, 23);
872 break;
873 }
874 default:
875 return 0;
876 }
877
e1c587c3
YQ
878 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
879 | ENCODE (operand.index >> 3, 7, 15)
880 | ENCODE (rt2.num, 5, 10)
881 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
882}
883
afbe19f8
PL
884/* Write a STP instruction into *BUF.
885
886 STP rt, rt2, [rn, #offset]
887 STP rt, rt2, [rn, #index]!
888 STP rt, rt2, [rn], #index
889
890 RT and RT2 are the registers to store.
891 RN is the base address register.
892 OFFSET is the immediate to add to the base address. It is limited to a
893 -512 .. 504 range (7 bits << 3). */
894
895static int
896emit_stp (uint32_t *buf, struct aarch64_register rt,
897 struct aarch64_register rt2, struct aarch64_register rn,
898 struct aarch64_memory_operand operand)
899{
900 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
901}
902
903/* Write a LDP instruction into *BUF.
904
905 LDP rt, rt2, [rn, #offset]
906 LDP rt, rt2, [rn, #index]!
907 LDP rt, rt2, [rn], #index
908
909 RT and RT2 are the registers to store.
910 RN is the base address register.
911 OFFSET is the immediate to add to the base address. It is limited to a
912 -512 .. 504 range (7 bits << 3). */
913
914static int
915emit_ldp (uint32_t *buf, struct aarch64_register rt,
916 struct aarch64_register rt2, struct aarch64_register rn,
917 struct aarch64_memory_operand operand)
918{
919 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
920}
921
bb903df0
PL
922/* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
923
924 LDP qt, qt2, [rn, #offset]
925
926 RT and RT2 are the Q registers to store.
927 RN is the base address register.
928 OFFSET is the immediate to add to the base address. It is limited to
929 -1024 .. 1008 range (7 bits << 4). */
930
931static int
932emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
933 struct aarch64_register rn, int32_t offset)
934{
935 uint32_t opc = ENCODE (2, 2, 30);
936 uint32_t pre_index = ENCODE (1, 1, 24);
937
e1c587c3
YQ
938 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
939 | ENCODE (offset >> 4, 7, 15)
940 | ENCODE (rt2, 5, 10)
941 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
942}
943
944/* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
945
946 STP qt, qt2, [rn, #offset]
947
948 RT and RT2 are the Q registers to store.
949 RN is the base address register.
950 OFFSET is the immediate to add to the base address. It is limited to
951 -1024 .. 1008 range (7 bits << 4). */
952
953static int
954emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
955 struct aarch64_register rn, int32_t offset)
956{
957 uint32_t opc = ENCODE (2, 2, 30);
958 uint32_t pre_index = ENCODE (1, 1, 24);
959
e1c587c3 960 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
b6542f81
YQ
961 | ENCODE (offset >> 4, 7, 15)
962 | ENCODE (rt2, 5, 10)
963 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
964}
965
afbe19f8
PL
966/* Write a LDRH instruction into *BUF.
967
968 LDRH wt, [xn, #offset]
969 LDRH wt, [xn, #index]!
970 LDRH wt, [xn], #index
971
972 RT is the register to store.
973 RN is the base address register.
974 OFFSET is the immediate to add to the base address. It is limited to
975 0 .. 32760 range (12 bits << 3). */
976
977static int
978emit_ldrh (uint32_t *buf, struct aarch64_register rt,
979 struct aarch64_register rn,
980 struct aarch64_memory_operand operand)
981{
1c2e1515 982 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
afbe19f8
PL
983}
984
985/* Write a LDRB instruction into *BUF.
986
987 LDRB wt, [xn, #offset]
988 LDRB wt, [xn, #index]!
989 LDRB wt, [xn], #index
990
991 RT is the register to store.
992 RN is the base address register.
993 OFFSET is the immediate to add to the base address. It is limited to
994 0 .. 32760 range (12 bits << 3). */
995
996static int
997emit_ldrb (uint32_t *buf, struct aarch64_register rt,
998 struct aarch64_register rn,
999 struct aarch64_memory_operand operand)
1000{
1c2e1515 1001 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
afbe19f8
PL
1002}
1003
bb903df0 1004
bb903df0
PL
1005
1006/* Write a STR instruction into *BUF.
1007
1008 STR rt, [rn, #offset]
1009 STR rt, [rn, #index]!
afbe19f8 1010 STR rt, [rn], #index
bb903df0
PL
1011
1012 RT is the register to store.
1013 RN is the base address register.
1014 OFFSET is the immediate to add to the base address. It is limited to
1015 0 .. 32760 range (12 bits << 3). */
1016
1017static int
1018emit_str (uint32_t *buf, struct aarch64_register rt,
1019 struct aarch64_register rn,
1020 struct aarch64_memory_operand operand)
1021{
1c2e1515 1022 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
bb903df0
PL
1023}
1024
1025/* Helper function emitting an exclusive load or store instruction. */
1026
1027static int
1028emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1029 enum aarch64_opcodes opcode,
1030 struct aarch64_register rs,
1031 struct aarch64_register rt,
1032 struct aarch64_register rt2,
1033 struct aarch64_register rn)
1034{
e1c587c3
YQ
1035 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1036 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1037 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
1038}
1039
1040/* Write a LAXR instruction into *BUF.
1041
1042 LDAXR rt, [xn]
1043
1044 RT is the destination register.
1045 RN is the base address register. */
1046
1047static int
1048emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1049 struct aarch64_register rn)
1050{
1051 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1052 xzr, rn);
1053}
1054
1055/* Write a STXR instruction into *BUF.
1056
1057 STXR ws, rt, [xn]
1058
1059 RS is the result register, it indicates if the store succeeded or not.
1060 RT is the destination register.
1061 RN is the base address register. */
1062
1063static int
1064emit_stxr (uint32_t *buf, struct aarch64_register rs,
1065 struct aarch64_register rt, struct aarch64_register rn)
1066{
1067 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1068 xzr, rn);
1069}
1070
1071/* Write a STLR instruction into *BUF.
1072
1073 STLR rt, [xn]
1074
1075 RT is the register to store.
1076 RN is the base address register. */
1077
1078static int
1079emit_stlr (uint32_t *buf, struct aarch64_register rt,
1080 struct aarch64_register rn)
1081{
1082 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1083 xzr, rn);
1084}
1085
1086/* Helper function for data processing instructions with register sources. */
1087
1088static int
231c0592 1089emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
bb903df0
PL
1090 struct aarch64_register rd,
1091 struct aarch64_register rn,
1092 struct aarch64_register rm)
1093{
1094 uint32_t size = ENCODE (rd.is64, 1, 31);
1095
e1c587c3
YQ
1096 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1097 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1098}
1099
1100/* Helper function for data processing instructions taking either a register
1101 or an immediate. */
1102
1103static int
1104emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1105 struct aarch64_register rd,
1106 struct aarch64_register rn,
1107 struct aarch64_operand operand)
1108{
1109 uint32_t size = ENCODE (rd.is64, 1, 31);
1110 /* The opcode is different for register and immediate source operands. */
1111 uint32_t operand_opcode;
1112
1113 if (operand.type == OPERAND_IMMEDIATE)
1114 {
1115 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1116 operand_opcode = ENCODE (8, 4, 25);
1117
e1c587c3
YQ
1118 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1119 | ENCODE (operand.imm, 12, 10)
1120 | ENCODE (rn.num, 5, 5)
1121 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1122 }
1123 else
1124 {
1125 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1126 operand_opcode = ENCODE (5, 4, 25);
1127
1128 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1129 rn, operand.reg);
1130 }
1131}
1132
1133/* Write an ADD instruction into *BUF.
1134
1135 ADD rd, rn, #imm
1136 ADD rd, rn, rm
1137
1138 This function handles both an immediate and register add.
1139
1140 RD is the destination register.
1141 RN is the input register.
1142 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1143 OPERAND_REGISTER. */
1144
1145static int
1146emit_add (uint32_t *buf, struct aarch64_register rd,
1147 struct aarch64_register rn, struct aarch64_operand operand)
1148{
1149 return emit_data_processing (buf, ADD, rd, rn, operand);
1150}
1151
1152/* Write a SUB instruction into *BUF.
1153
1154 SUB rd, rn, #imm
1155 SUB rd, rn, rm
1156
1157 This function handles both an immediate and register sub.
1158
1159 RD is the destination register.
1160 RN is the input register.
1161 IMM is the immediate to substract to RN. */
1162
1163static int
1164emit_sub (uint32_t *buf, struct aarch64_register rd,
1165 struct aarch64_register rn, struct aarch64_operand operand)
1166{
1167 return emit_data_processing (buf, SUB, rd, rn, operand);
1168}
1169
1170/* Write a MOV instruction into *BUF.
1171
1172 MOV rd, #imm
1173 MOV rd, rm
1174
1175 This function handles both a wide immediate move and a register move,
1176 with the condition that the source register is not xzr. xzr and the
1177 stack pointer share the same encoding and this function only supports
1178 the stack pointer.
1179
1180 RD is the destination register.
1181 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1182 OPERAND_REGISTER. */
1183
1184static int
1185emit_mov (uint32_t *buf, struct aarch64_register rd,
1186 struct aarch64_operand operand)
1187{
1188 if (operand.type == OPERAND_IMMEDIATE)
1189 {
1190 uint32_t size = ENCODE (rd.is64, 1, 31);
1191 /* Do not shift the immediate. */
1192 uint32_t shift = ENCODE (0, 2, 21);
1193
e1c587c3
YQ
1194 return aarch64_emit_insn (buf, MOV | size | shift
1195 | ENCODE (operand.imm, 16, 5)
1196 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1197 }
1198 else
1199 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1200}
1201
1202/* Write a MOVK instruction into *BUF.
1203
1204 MOVK rd, #imm, lsl #shift
1205
1206 RD is the destination register.
1207 IMM is the immediate.
1208 SHIFT is the logical shift left to apply to IMM. */
1209
1210static int
7781c06f
YQ
1211emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1212 unsigned shift)
bb903df0
PL
1213{
1214 uint32_t size = ENCODE (rd.is64, 1, 31);
1215
e1c587c3
YQ
1216 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1217 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1218}
1219
1220/* Write instructions into *BUF in order to move ADDR into a register.
1221 ADDR can be a 64-bit value.
1222
1223 This function will emit a series of MOV and MOVK instructions, such as:
1224
1225 MOV xd, #(addr)
1226 MOVK xd, #(addr >> 16), lsl #16
1227 MOVK xd, #(addr >> 32), lsl #32
1228 MOVK xd, #(addr >> 48), lsl #48 */
1229
1230static int
1231emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1232{
1233 uint32_t *p = buf;
1234
1235 /* The MOV (wide immediate) instruction clears to top bits of the
1236 register. */
1237 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1238
1239 if ((addr >> 16) != 0)
1240 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1241 else
1242 return p - buf;
1243
1244 if ((addr >> 32) != 0)
1245 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1246 else
1247 return p - buf;
1248
1249 if ((addr >> 48) != 0)
1250 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1251
1252 return p - buf;
1253}
1254
afbe19f8
PL
1255/* Write a SUBS instruction into *BUF.
1256
1257 SUBS rd, rn, rm
1258
1259 This instruction update the condition flags.
1260
1261 RD is the destination register.
1262 RN and RM are the source registers. */
1263
1264static int
1265emit_subs (uint32_t *buf, struct aarch64_register rd,
1266 struct aarch64_register rn, struct aarch64_operand operand)
1267{
1268 return emit_data_processing (buf, SUBS, rd, rn, operand);
1269}
1270
1271/* Write a CMP instruction into *BUF.
1272
1273 CMP rn, rm
1274
1275 This instruction is an alias of SUBS xzr, rn, rm.
1276
1277 RN and RM are the registers to compare. */
1278
1279static int
1280emit_cmp (uint32_t *buf, struct aarch64_register rn,
1281 struct aarch64_operand operand)
1282{
1283 return emit_subs (buf, xzr, rn, operand);
1284}
1285
1286/* Write a AND instruction into *BUF.
1287
1288 AND rd, rn, rm
1289
1290 RD is the destination register.
1291 RN and RM are the source registers. */
1292
1293static int
1294emit_and (uint32_t *buf, struct aarch64_register rd,
1295 struct aarch64_register rn, struct aarch64_register rm)
1296{
1297 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1298}
1299
1300/* Write a ORR instruction into *BUF.
1301
1302 ORR rd, rn, rm
1303
1304 RD is the destination register.
1305 RN and RM are the source registers. */
1306
1307static int
1308emit_orr (uint32_t *buf, struct aarch64_register rd,
1309 struct aarch64_register rn, struct aarch64_register rm)
1310{
1311 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1312}
1313
1314/* Write a ORN instruction into *BUF.
1315
1316 ORN rd, rn, rm
1317
1318 RD is the destination register.
1319 RN and RM are the source registers. */
1320
1321static int
1322emit_orn (uint32_t *buf, struct aarch64_register rd,
1323 struct aarch64_register rn, struct aarch64_register rm)
1324{
1325 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1326}
1327
1328/* Write a EOR instruction into *BUF.
1329
1330 EOR rd, rn, rm
1331
1332 RD is the destination register.
1333 RN and RM are the source registers. */
1334
1335static int
1336emit_eor (uint32_t *buf, struct aarch64_register rd,
1337 struct aarch64_register rn, struct aarch64_register rm)
1338{
1339 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1340}
1341
1342/* Write a MVN instruction into *BUF.
1343
1344 MVN rd, rm
1345
1346 This is an alias for ORN rd, xzr, rm.
1347
1348 RD is the destination register.
1349 RM is the source register. */
1350
1351static int
1352emit_mvn (uint32_t *buf, struct aarch64_register rd,
1353 struct aarch64_register rm)
1354{
1355 return emit_orn (buf, rd, xzr, rm);
1356}
1357
1358/* Write a LSLV instruction into *BUF.
1359
1360 LSLV rd, rn, rm
1361
1362 RD is the destination register.
1363 RN and RM are the source registers. */
1364
1365static int
1366emit_lslv (uint32_t *buf, struct aarch64_register rd,
1367 struct aarch64_register rn, struct aarch64_register rm)
1368{
1369 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1370}
1371
1372/* Write a LSRV instruction into *BUF.
1373
1374 LSRV rd, rn, rm
1375
1376 RD is the destination register.
1377 RN and RM are the source registers. */
1378
1379static int
1380emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1381 struct aarch64_register rn, struct aarch64_register rm)
1382{
1383 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1384}
1385
1386/* Write a ASRV instruction into *BUF.
1387
1388 ASRV rd, rn, rm
1389
1390 RD is the destination register.
1391 RN and RM are the source registers. */
1392
1393static int
1394emit_asrv (uint32_t *buf, struct aarch64_register rd,
1395 struct aarch64_register rn, struct aarch64_register rm)
1396{
1397 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1398}
1399
1400/* Write a MUL instruction into *BUF.
1401
1402 MUL rd, rn, rm
1403
1404 RD is the destination register.
1405 RN and RM are the source registers. */
1406
1407static int
1408emit_mul (uint32_t *buf, struct aarch64_register rd,
1409 struct aarch64_register rn, struct aarch64_register rm)
1410{
1411 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1412}
1413
bb903df0
PL
1414/* Write a MRS instruction into *BUF. The register size is 64-bit.
1415
1416 MRS xt, system_reg
1417
1418 RT is the destination register.
1419 SYSTEM_REG is special purpose register to read. */
1420
1421static int
1422emit_mrs (uint32_t *buf, struct aarch64_register rt,
1423 enum aarch64_system_control_registers system_reg)
1424{
e1c587c3
YQ
1425 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1426 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1427}
1428
1429/* Write a MSR instruction into *BUF. The register size is 64-bit.
1430
1431 MSR system_reg, xt
1432
1433 SYSTEM_REG is special purpose register to write.
1434 RT is the input register. */
1435
1436static int
1437emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1438 struct aarch64_register rt)
1439{
e1c587c3
YQ
1440 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1441 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1442}
1443
1444/* Write a SEVL instruction into *BUF.
1445
1446 This is a hint instruction telling the hardware to trigger an event. */
1447
1448static int
1449emit_sevl (uint32_t *buf)
1450{
e1c587c3 1451 return aarch64_emit_insn (buf, SEVL);
bb903df0
PL
1452}
1453
1454/* Write a WFE instruction into *BUF.
1455
1456 This is a hint instruction telling the hardware to wait for an event. */
1457
1458static int
1459emit_wfe (uint32_t *buf)
1460{
e1c587c3 1461 return aarch64_emit_insn (buf, WFE);
bb903df0
PL
1462}
1463
afbe19f8
PL
1464/* Write a SBFM instruction into *BUF.
1465
1466 SBFM rd, rn, #immr, #imms
1467
1468 This instruction moves the bits from #immr to #imms into the
1469 destination, sign extending the result.
1470
1471 RD is the destination register.
1472 RN is the source register.
1473 IMMR is the bit number to start at (least significant bit).
1474 IMMS is the bit number to stop at (most significant bit). */
1475
1476static int
1477emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1478 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1479{
1480 uint32_t size = ENCODE (rd.is64, 1, 31);
1481 uint32_t n = ENCODE (rd.is64, 1, 22);
1482
e1c587c3
YQ
1483 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1484 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1485 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1486}
1487
1488/* Write a SBFX instruction into *BUF.
1489
1490 SBFX rd, rn, #lsb, #width
1491
1492 This instruction moves #width bits from #lsb into the destination, sign
1493 extending the result. This is an alias for:
1494
1495 SBFM rd, rn, #lsb, #(lsb + width - 1)
1496
1497 RD is the destination register.
1498 RN is the source register.
1499 LSB is the bit number to start at (least significant bit).
1500 WIDTH is the number of bits to move. */
1501
1502static int
1503emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1504 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1505{
1506 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1507}
1508
1509/* Write a UBFM instruction into *BUF.
1510
1511 UBFM rd, rn, #immr, #imms
1512
1513 This instruction moves the bits from #immr to #imms into the
1514 destination, extending the result with zeros.
1515
1516 RD is the destination register.
1517 RN is the source register.
1518 IMMR is the bit number to start at (least significant bit).
1519 IMMS is the bit number to stop at (most significant bit). */
1520
1521static int
1522emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1523 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1524{
1525 uint32_t size = ENCODE (rd.is64, 1, 31);
1526 uint32_t n = ENCODE (rd.is64, 1, 22);
1527
e1c587c3
YQ
1528 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1529 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1530 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1531}
1532
1533/* Write a UBFX instruction into *BUF.
1534
1535 UBFX rd, rn, #lsb, #width
1536
1537 This instruction moves #width bits from #lsb into the destination,
1538 extending the result with zeros. This is an alias for:
1539
1540 UBFM rd, rn, #lsb, #(lsb + width - 1)
1541
1542 RD is the destination register.
1543 RN is the source register.
1544 LSB is the bit number to start at (least significant bit).
1545 WIDTH is the number of bits to move. */
1546
1547static int
1548emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1549 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1550{
1551 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1552}
1553
1554/* Write a CSINC instruction into *BUF.
1555
1556 CSINC rd, rn, rm, cond
1557
1558 This instruction conditionally increments rn or rm and places the result
1559 in rd. rn is chosen is the condition is true.
1560
1561 RD is the destination register.
1562 RN and RM are the source registers.
1563 COND is the encoded condition. */
1564
1565static int
1566emit_csinc (uint32_t *buf, struct aarch64_register rd,
1567 struct aarch64_register rn, struct aarch64_register rm,
1568 unsigned cond)
1569{
1570 uint32_t size = ENCODE (rd.is64, 1, 31);
1571
e1c587c3
YQ
1572 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1573 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1574 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1575}
1576
1577/* Write a CSET instruction into *BUF.
1578
1579 CSET rd, cond
1580
1581 This instruction conditionally write 1 or 0 in the destination register.
1582 1 is written if the condition is true. This is an alias for:
1583
1584 CSINC rd, xzr, xzr, !cond
1585
1586 Note that the condition needs to be inverted.
1587
1588 RD is the destination register.
1589 RN and RM are the source registers.
1590 COND is the encoded condition. */
1591
1592static int
1593emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1594{
1595 /* The least significant bit of the condition needs toggling in order to
1596 invert it. */
1597 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1598}
1599
bb903df0
PL
1600/* Write LEN instructions from BUF into the inferior memory at *TO.
1601
1602 Note instructions are always little endian on AArch64, unlike data. */
1603
1604static void
1605append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1606{
1607 size_t byte_len = len * sizeof (uint32_t);
1608#if (__BYTE_ORDER == __BIG_ENDIAN)
cb93dc7f 1609 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
bb903df0
PL
1610 size_t i;
1611
1612 for (i = 0; i < len; i++)
1613 le_buf[i] = htole32 (buf[i]);
1614
1615 write_inferior_memory (*to, (const unsigned char *) le_buf, byte_len);
1616
1617 xfree (le_buf);
1618#else
1619 write_inferior_memory (*to, (const unsigned char *) buf, byte_len);
1620#endif
1621
1622 *to += byte_len;
1623}
1624
0badd99f
YQ
1625/* Sub-class of struct aarch64_insn_data, store information of
1626 instruction relocation for fast tracepoint. Visitor can
1627 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1628 the relocated instructions in buffer pointed by INSN_PTR. */
bb903df0 1629
0badd99f
YQ
1630struct aarch64_insn_relocation_data
1631{
1632 struct aarch64_insn_data base;
1633
1634 /* The new address the instruction is relocated to. */
1635 CORE_ADDR new_addr;
1636 /* Pointer to the buffer of relocated instruction(s). */
1637 uint32_t *insn_ptr;
1638};
1639
1640/* Implementation of aarch64_insn_visitor method "b". */
1641
1642static void
1643aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1644 struct aarch64_insn_data *data)
1645{
1646 struct aarch64_insn_relocation_data *insn_reloc
1647 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1648 int64_t new_offset
0badd99f
YQ
1649 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1650
1651 if (can_encode_int32 (new_offset, 28))
1652 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1653}
1654
1655/* Implementation of aarch64_insn_visitor method "b_cond". */
1656
1657static void
1658aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1659 struct aarch64_insn_data *data)
1660{
1661 struct aarch64_insn_relocation_data *insn_reloc
1662 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1663 int64_t new_offset
0badd99f
YQ
1664 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1665
1666 if (can_encode_int32 (new_offset, 21))
1667 {
1668 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1669 new_offset);
bb903df0 1670 }
0badd99f 1671 else if (can_encode_int32 (new_offset, 28))
bb903df0 1672 {
0badd99f
YQ
1673 /* The offset is out of range for a conditional branch
1674 instruction but not for a unconditional branch. We can use
1675 the following instructions instead:
bb903df0 1676
0badd99f
YQ
1677 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1678 B NOT_TAKEN ; Else jump over TAKEN and continue.
1679 TAKEN:
1680 B #(offset - 8)
1681 NOT_TAKEN:
1682
1683 */
1684
1685 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1686 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1687 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
bb903df0 1688 }
0badd99f 1689}
bb903df0 1690
0badd99f
YQ
1691/* Implementation of aarch64_insn_visitor method "cb". */
1692
1693static void
1694aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1695 const unsigned rn, int is64,
1696 struct aarch64_insn_data *data)
1697{
1698 struct aarch64_insn_relocation_data *insn_reloc
1699 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1700 int64_t new_offset
0badd99f
YQ
1701 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1702
1703 if (can_encode_int32 (new_offset, 21))
1704 {
1705 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1706 aarch64_register (rn, is64), new_offset);
bb903df0 1707 }
0badd99f 1708 else if (can_encode_int32 (new_offset, 28))
bb903df0 1709 {
0badd99f
YQ
1710 /* The offset is out of range for a compare and branch
1711 instruction but not for a unconditional branch. We can use
1712 the following instructions instead:
1713
1714 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1715 B NOT_TAKEN ; Else jump over TAKEN and continue.
1716 TAKEN:
1717 B #(offset - 8)
1718 NOT_TAKEN:
1719
1720 */
1721 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1722 aarch64_register (rn, is64), 8);
1723 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1724 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1725 }
1726}
bb903df0 1727
0badd99f 1728/* Implementation of aarch64_insn_visitor method "tb". */
bb903df0 1729
0badd99f
YQ
1730static void
1731aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1732 const unsigned rt, unsigned bit,
1733 struct aarch64_insn_data *data)
1734{
1735 struct aarch64_insn_relocation_data *insn_reloc
1736 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1737 int64_t new_offset
0badd99f
YQ
1738 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1739
1740 if (can_encode_int32 (new_offset, 16))
1741 {
1742 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1743 aarch64_register (rt, 1), new_offset);
bb903df0 1744 }
0badd99f 1745 else if (can_encode_int32 (new_offset, 28))
bb903df0 1746 {
0badd99f
YQ
1747 /* The offset is out of range for a test bit and branch
1748 instruction but not for a unconditional branch. We can use
1749 the following instructions instead:
1750
1751 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1752 B NOT_TAKEN ; Else jump over TAKEN and continue.
1753 TAKEN:
1754 B #(offset - 8)
1755 NOT_TAKEN:
1756
1757 */
1758 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1759 aarch64_register (rt, 1), 8);
1760 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1761 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1762 new_offset - 8);
1763 }
1764}
bb903df0 1765
0badd99f 1766/* Implementation of aarch64_insn_visitor method "adr". */
bb903df0 1767
0badd99f
YQ
1768static void
1769aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1770 const int is_adrp,
1771 struct aarch64_insn_data *data)
1772{
1773 struct aarch64_insn_relocation_data *insn_reloc
1774 = (struct aarch64_insn_relocation_data *) data;
1775 /* We know exactly the address the ADR{P,} instruction will compute.
1776 We can just write it to the destination register. */
1777 CORE_ADDR address = data->insn_addr + offset;
bb903df0 1778
0badd99f
YQ
1779 if (is_adrp)
1780 {
1781 /* Clear the lower 12 bits of the offset to get the 4K page. */
1782 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1783 aarch64_register (rd, 1),
1784 address & ~0xfff);
1785 }
1786 else
1787 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1788 aarch64_register (rd, 1), address);
1789}
bb903df0 1790
0badd99f 1791/* Implementation of aarch64_insn_visitor method "ldr_literal". */
bb903df0 1792
0badd99f
YQ
1793static void
1794aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1795 const unsigned rt, const int is64,
1796 struct aarch64_insn_data *data)
1797{
1798 struct aarch64_insn_relocation_data *insn_reloc
1799 = (struct aarch64_insn_relocation_data *) data;
1800 CORE_ADDR address = data->insn_addr + offset;
1801
1802 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1803 aarch64_register (rt, 1), address);
1804
1805 /* We know exactly what address to load from, and what register we
1806 can use:
1807
1808 MOV xd, #(oldloc + offset)
1809 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1810 ...
1811
1812 LDR xd, [xd] ; or LDRSW xd, [xd]
1813
1814 */
1815
1816 if (is_sw)
1817 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1818 aarch64_register (rt, 1),
1819 aarch64_register (rt, 1),
1820 offset_memory_operand (0));
bb903df0 1821 else
0badd99f
YQ
1822 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1823 aarch64_register (rt, is64),
1824 aarch64_register (rt, 1),
1825 offset_memory_operand (0));
1826}
1827
1828/* Implementation of aarch64_insn_visitor method "others". */
1829
1830static void
1831aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1832 struct aarch64_insn_data *data)
1833{
1834 struct aarch64_insn_relocation_data *insn_reloc
1835 = (struct aarch64_insn_relocation_data *) data;
bb903df0 1836
0badd99f
YQ
1837 /* The instruction is not PC relative. Just re-emit it at the new
1838 location. */
e1c587c3 1839 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
0badd99f
YQ
1840}
1841
1842static const struct aarch64_insn_visitor visitor =
1843{
1844 aarch64_ftrace_insn_reloc_b,
1845 aarch64_ftrace_insn_reloc_b_cond,
1846 aarch64_ftrace_insn_reloc_cb,
1847 aarch64_ftrace_insn_reloc_tb,
1848 aarch64_ftrace_insn_reloc_adr,
1849 aarch64_ftrace_insn_reloc_ldr_literal,
1850 aarch64_ftrace_insn_reloc_others,
1851};
1852
bb903df0
PL
1853/* Implementation of linux_target_ops method
1854 "install_fast_tracepoint_jump_pad". */
1855
1856static int
1857aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1858 CORE_ADDR tpaddr,
1859 CORE_ADDR collector,
1860 CORE_ADDR lockaddr,
1861 ULONGEST orig_size,
1862 CORE_ADDR *jump_entry,
1863 CORE_ADDR *trampoline,
1864 ULONGEST *trampoline_size,
1865 unsigned char *jjump_pad_insn,
1866 ULONGEST *jjump_pad_insn_size,
1867 CORE_ADDR *adjusted_insn_addr,
1868 CORE_ADDR *adjusted_insn_addr_end,
1869 char *err)
1870{
1871 uint32_t buf[256];
1872 uint32_t *p = buf;
2ac09a5b 1873 int64_t offset;
bb903df0 1874 int i;
70b439f0 1875 uint32_t insn;
bb903df0 1876 CORE_ADDR buildaddr = *jump_entry;
0badd99f 1877 struct aarch64_insn_relocation_data insn_data;
bb903df0
PL
1878
1879 /* We need to save the current state on the stack both to restore it
1880 later and to collect register values when the tracepoint is hit.
1881
1882 The saved registers are pushed in a layout that needs to be in sync
1883 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1884 the supply_fast_tracepoint_registers function will fill in the
1885 register cache from a pointer to saved registers on the stack we build
1886 here.
1887
1888 For simplicity, we set the size of each cell on the stack to 16 bytes.
1889 This way one cell can hold any register type, from system registers
1890 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1891 has to be 16 bytes aligned anyway.
1892
1893 Note that the CPSR register does not exist on AArch64. Instead we
1894 can access system bits describing the process state with the
1895 MRS/MSR instructions, namely the condition flags. We save them as
1896 if they are part of a CPSR register because that's how GDB
1897 interprets these system bits. At the moment, only the condition
1898 flags are saved in CPSR (NZCV).
1899
1900 Stack layout, each cell is 16 bytes (descending):
1901
1902 High *-------- SIMD&FP registers from 31 down to 0. --------*
1903 | q31 |
1904 . .
1905 . . 32 cells
1906 . .
1907 | q0 |
1908 *---- General purpose registers from 30 down to 0. ----*
1909 | x30 |
1910 . .
1911 . . 31 cells
1912 . .
1913 | x0 |
1914 *------------- Special purpose registers. -------------*
1915 | SP |
1916 | PC |
1917 | CPSR (NZCV) | 5 cells
1918 | FPSR |
1919 | FPCR | <- SP + 16
1920 *------------- collecting_t object --------------------*
1921 | TPIDR_EL0 | struct tracepoint * |
1922 Low *------------------------------------------------------*
1923
1924 After this stack is set up, we issue a call to the collector, passing
1925 it the saved registers at (SP + 16). */
1926
1927 /* Push SIMD&FP registers on the stack:
1928
1929 SUB sp, sp, #(32 * 16)
1930
1931 STP q30, q31, [sp, #(30 * 16)]
1932 ...
1933 STP q0, q1, [sp]
1934
1935 */
1936 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
1937 for (i = 30; i >= 0; i -= 2)
1938 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
1939
1940 /* Push general puspose registers on the stack. Note that we do not need
1941 to push x31 as it represents the xzr register and not the stack
1942 pointer in a STR instruction.
1943
1944 SUB sp, sp, #(31 * 16)
1945
1946 STR x30, [sp, #(30 * 16)]
1947 ...
1948 STR x0, [sp]
1949
1950 */
1951 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
1952 for (i = 30; i >= 0; i -= 1)
1953 p += emit_str (p, aarch64_register (i, 1), sp,
1954 offset_memory_operand (i * 16));
1955
1956 /* Make space for 5 more cells.
1957
1958 SUB sp, sp, #(5 * 16)
1959
1960 */
1961 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
1962
1963
1964 /* Save SP:
1965
1966 ADD x4, sp, #((32 + 31 + 5) * 16)
1967 STR x4, [sp, #(4 * 16)]
1968
1969 */
1970 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
1971 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
1972
1973 /* Save PC (tracepoint address):
1974
1975 MOV x3, #(tpaddr)
1976 ...
1977
1978 STR x3, [sp, #(3 * 16)]
1979
1980 */
1981
1982 p += emit_mov_addr (p, x3, tpaddr);
1983 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
1984
1985 /* Save CPSR (NZCV), FPSR and FPCR:
1986
1987 MRS x2, nzcv
1988 MRS x1, fpsr
1989 MRS x0, fpcr
1990
1991 STR x2, [sp, #(2 * 16)]
1992 STR x1, [sp, #(1 * 16)]
1993 STR x0, [sp, #(0 * 16)]
1994
1995 */
1996 p += emit_mrs (p, x2, NZCV);
1997 p += emit_mrs (p, x1, FPSR);
1998 p += emit_mrs (p, x0, FPCR);
1999 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2000 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2001 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2002
2003 /* Push the collecting_t object. It consist of the address of the
2004 tracepoint and an ID for the current thread. We get the latter by
2005 reading the tpidr_el0 system register. It corresponds to the
2006 NT_ARM_TLS register accessible with ptrace.
2007
2008 MOV x0, #(tpoint)
2009 ...
2010
2011 MRS x1, tpidr_el0
2012
2013 STP x0, x1, [sp, #-16]!
2014
2015 */
2016
2017 p += emit_mov_addr (p, x0, tpoint);
2018 p += emit_mrs (p, x1, TPIDR_EL0);
2019 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2020
2021 /* Spin-lock:
2022
2023 The shared memory for the lock is at lockaddr. It will hold zero
2024 if no-one is holding the lock, otherwise it contains the address of
2025 the collecting_t object on the stack of the thread which acquired it.
2026
2027 At this stage, the stack pointer points to this thread's collecting_t
2028 object.
2029
2030 We use the following registers:
2031 - x0: Address of the lock.
2032 - x1: Pointer to collecting_t object.
2033 - x2: Scratch register.
2034
2035 MOV x0, #(lockaddr)
2036 ...
2037 MOV x1, sp
2038
2039 ; Trigger an event local to this core. So the following WFE
2040 ; instruction is ignored.
2041 SEVL
2042 again:
2043 ; Wait for an event. The event is triggered by either the SEVL
2044 ; or STLR instructions (store release).
2045 WFE
2046
2047 ; Atomically read at lockaddr. This marks the memory location as
2048 ; exclusive. This instruction also has memory constraints which
2049 ; make sure all previous data reads and writes are done before
2050 ; executing it.
2051 LDAXR x2, [x0]
2052
2053 ; Try again if another thread holds the lock.
2054 CBNZ x2, again
2055
2056 ; We can lock it! Write the address of the collecting_t object.
2057 ; This instruction will fail if the memory location is not marked
2058 ; as exclusive anymore. If it succeeds, it will remove the
2059 ; exclusive mark on the memory location. This way, if another
2060 ; thread executes this instruction before us, we will fail and try
2061 ; all over again.
2062 STXR w2, x1, [x0]
2063 CBNZ w2, again
2064
2065 */
2066
2067 p += emit_mov_addr (p, x0, lockaddr);
2068 p += emit_mov (p, x1, register_operand (sp));
2069
2070 p += emit_sevl (p);
2071 p += emit_wfe (p);
2072 p += emit_ldaxr (p, x2, x0);
2073 p += emit_cb (p, 1, w2, -2 * 4);
2074 p += emit_stxr (p, w2, x1, x0);
2075 p += emit_cb (p, 1, x2, -4 * 4);
2076
2077 /* Call collector (struct tracepoint *, unsigned char *):
2078
2079 MOV x0, #(tpoint)
2080 ...
2081
2082 ; Saved registers start after the collecting_t object.
2083 ADD x1, sp, #16
2084
2085 ; We use an intra-procedure-call scratch register.
2086 MOV ip0, #(collector)
2087 ...
2088
2089 ; And call back to C!
2090 BLR ip0
2091
2092 */
2093
2094 p += emit_mov_addr (p, x0, tpoint);
2095 p += emit_add (p, x1, sp, immediate_operand (16));
2096
2097 p += emit_mov_addr (p, ip0, collector);
2098 p += emit_blr (p, ip0);
2099
2100 /* Release the lock.
2101
2102 MOV x0, #(lockaddr)
2103 ...
2104
2105 ; This instruction is a normal store with memory ordering
2106 ; constraints. Thanks to this we do not have to put a data
2107 ; barrier instruction to make sure all data read and writes are done
2108 ; before this instruction is executed. Furthermore, this instrucion
2109 ; will trigger an event, letting other threads know they can grab
2110 ; the lock.
2111 STLR xzr, [x0]
2112
2113 */
2114 p += emit_mov_addr (p, x0, lockaddr);
2115 p += emit_stlr (p, xzr, x0);
2116
2117 /* Free collecting_t object:
2118
2119 ADD sp, sp, #16
2120
2121 */
2122 p += emit_add (p, sp, sp, immediate_operand (16));
2123
2124 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2125 registers from the stack.
2126
2127 LDR x2, [sp, #(2 * 16)]
2128 LDR x1, [sp, #(1 * 16)]
2129 LDR x0, [sp, #(0 * 16)]
2130
2131 MSR NZCV, x2
2132 MSR FPSR, x1
2133 MSR FPCR, x0
2134
2135 ADD sp, sp #(5 * 16)
2136
2137 */
2138 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2139 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2140 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2141 p += emit_msr (p, NZCV, x2);
2142 p += emit_msr (p, FPSR, x1);
2143 p += emit_msr (p, FPCR, x0);
2144
2145 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2146
2147 /* Pop general purpose registers:
2148
2149 LDR x0, [sp]
2150 ...
2151 LDR x30, [sp, #(30 * 16)]
2152
2153 ADD sp, sp, #(31 * 16)
2154
2155 */
2156 for (i = 0; i <= 30; i += 1)
2157 p += emit_ldr (p, aarch64_register (i, 1), sp,
2158 offset_memory_operand (i * 16));
2159 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2160
2161 /* Pop SIMD&FP registers:
2162
2163 LDP q0, q1, [sp]
2164 ...
2165 LDP q30, q31, [sp, #(30 * 16)]
2166
2167 ADD sp, sp, #(32 * 16)
2168
2169 */
2170 for (i = 0; i <= 30; i += 2)
2171 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2172 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2173
2174 /* Write the code into the inferior memory. */
2175 append_insns (&buildaddr, p - buf, buf);
2176
2177 /* Now emit the relocated instruction. */
2178 *adjusted_insn_addr = buildaddr;
70b439f0 2179 target_read_uint32 (tpaddr, &insn);
0badd99f
YQ
2180
2181 insn_data.base.insn_addr = tpaddr;
2182 insn_data.new_addr = buildaddr;
2183 insn_data.insn_ptr = buf;
2184
2185 aarch64_relocate_instruction (insn, &visitor,
2186 (struct aarch64_insn_data *) &insn_data);
2187
bb903df0 2188 /* We may not have been able to relocate the instruction. */
0badd99f 2189 if (insn_data.insn_ptr == buf)
bb903df0
PL
2190 {
2191 sprintf (err,
2192 "E.Could not relocate instruction from %s to %s.",
2193 core_addr_to_string_nz (tpaddr),
2194 core_addr_to_string_nz (buildaddr));
2195 return 1;
2196 }
dfaffe9d 2197 else
0badd99f 2198 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
dfaffe9d 2199 *adjusted_insn_addr_end = buildaddr;
bb903df0
PL
2200
2201 /* Go back to the start of the buffer. */
2202 p = buf;
2203
2204 /* Emit a branch back from the jump pad. */
2205 offset = (tpaddr + orig_size - buildaddr);
2206 if (!can_encode_int32 (offset, 28))
2207 {
2208 sprintf (err,
2209 "E.Jump back from jump pad too far from tracepoint "
2ac09a5b 2210 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2211 offset);
2212 return 1;
2213 }
2214
2215 p += emit_b (p, 0, offset);
2216 append_insns (&buildaddr, p - buf, buf);
2217
2218 /* Give the caller a branch instruction into the jump pad. */
2219 offset = (*jump_entry - tpaddr);
2220 if (!can_encode_int32 (offset, 28))
2221 {
2222 sprintf (err,
2223 "E.Jump pad too far from tracepoint "
2ac09a5b 2224 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2225 offset);
2226 return 1;
2227 }
2228
2229 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2230 *jjump_pad_insn_size = 4;
2231
2232 /* Return the end address of our pad. */
2233 *jump_entry = buildaddr;
2234
2235 return 0;
2236}
2237
afbe19f8
PL
2238/* Helper function writing LEN instructions from START into
2239 current_insn_ptr. */
2240
2241static void
2242emit_ops_insns (const uint32_t *start, int len)
2243{
2244 CORE_ADDR buildaddr = current_insn_ptr;
2245
2246 if (debug_threads)
2247 debug_printf ("Adding %d instrucions at %s\n",
2248 len, paddress (buildaddr));
2249
2250 append_insns (&buildaddr, len, start);
2251 current_insn_ptr = buildaddr;
2252}
2253
2254/* Pop a register from the stack. */
2255
2256static int
2257emit_pop (uint32_t *buf, struct aarch64_register rt)
2258{
2259 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2260}
2261
2262/* Push a register on the stack. */
2263
2264static int
2265emit_push (uint32_t *buf, struct aarch64_register rt)
2266{
2267 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2268}
2269
2270/* Implementation of emit_ops method "emit_prologue". */
2271
2272static void
2273aarch64_emit_prologue (void)
2274{
2275 uint32_t buf[16];
2276 uint32_t *p = buf;
2277
2278 /* This function emit a prologue for the following function prototype:
2279
2280 enum eval_result_type f (unsigned char *regs,
2281 ULONGEST *value);
2282
2283 The first argument is a buffer of raw registers. The second
2284 argument is the result of
2285 evaluating the expression, which will be set to whatever is on top of
2286 the stack at the end.
2287
2288 The stack set up by the prologue is as such:
2289
2290 High *------------------------------------------------------*
2291 | LR |
2292 | FP | <- FP
2293 | x1 (ULONGEST *value) |
2294 | x0 (unsigned char *regs) |
2295 Low *------------------------------------------------------*
2296
2297 As we are implementing a stack machine, each opcode can expand the
2298 stack so we never know how far we are from the data saved by this
2299 prologue. In order to be able refer to value and regs later, we save
2300 the current stack pointer in the frame pointer. This way, it is not
2301 clobbered when calling C functions.
2302
2303 Finally, throughtout every operation, we are using register x0 as the
2304 top of the stack, and x1 as a scratch register. */
2305
2306 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2307 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2308 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2309
2310 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2311
2312
2313 emit_ops_insns (buf, p - buf);
2314}
2315
2316/* Implementation of emit_ops method "emit_epilogue". */
2317
2318static void
2319aarch64_emit_epilogue (void)
2320{
2321 uint32_t buf[16];
2322 uint32_t *p = buf;
2323
2324 /* Store the result of the expression (x0) in *value. */
2325 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2326 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2327 p += emit_str (p, x0, x1, offset_memory_operand (0));
2328
2329 /* Restore the previous state. */
2330 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2331 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2332
2333 /* Return expr_eval_no_error. */
2334 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2335 p += emit_ret (p, lr);
2336
2337 emit_ops_insns (buf, p - buf);
2338}
2339
2340/* Implementation of emit_ops method "emit_add". */
2341
2342static void
2343aarch64_emit_add (void)
2344{
2345 uint32_t buf[16];
2346 uint32_t *p = buf;
2347
2348 p += emit_pop (p, x1);
45e3745e 2349 p += emit_add (p, x0, x1, register_operand (x0));
afbe19f8
PL
2350
2351 emit_ops_insns (buf, p - buf);
2352}
2353
2354/* Implementation of emit_ops method "emit_sub". */
2355
2356static void
2357aarch64_emit_sub (void)
2358{
2359 uint32_t buf[16];
2360 uint32_t *p = buf;
2361
2362 p += emit_pop (p, x1);
45e3745e 2363 p += emit_sub (p, x0, x1, register_operand (x0));
afbe19f8
PL
2364
2365 emit_ops_insns (buf, p - buf);
2366}
2367
2368/* Implementation of emit_ops method "emit_mul". */
2369
2370static void
2371aarch64_emit_mul (void)
2372{
2373 uint32_t buf[16];
2374 uint32_t *p = buf;
2375
2376 p += emit_pop (p, x1);
2377 p += emit_mul (p, x0, x1, x0);
2378
2379 emit_ops_insns (buf, p - buf);
2380}
2381
2382/* Implementation of emit_ops method "emit_lsh". */
2383
2384static void
2385aarch64_emit_lsh (void)
2386{
2387 uint32_t buf[16];
2388 uint32_t *p = buf;
2389
2390 p += emit_pop (p, x1);
2391 p += emit_lslv (p, x0, x1, x0);
2392
2393 emit_ops_insns (buf, p - buf);
2394}
2395
2396/* Implementation of emit_ops method "emit_rsh_signed". */
2397
2398static void
2399aarch64_emit_rsh_signed (void)
2400{
2401 uint32_t buf[16];
2402 uint32_t *p = buf;
2403
2404 p += emit_pop (p, x1);
2405 p += emit_asrv (p, x0, x1, x0);
2406
2407 emit_ops_insns (buf, p - buf);
2408}
2409
2410/* Implementation of emit_ops method "emit_rsh_unsigned". */
2411
2412static void
2413aarch64_emit_rsh_unsigned (void)
2414{
2415 uint32_t buf[16];
2416 uint32_t *p = buf;
2417
2418 p += emit_pop (p, x1);
2419 p += emit_lsrv (p, x0, x1, x0);
2420
2421 emit_ops_insns (buf, p - buf);
2422}
2423
2424/* Implementation of emit_ops method "emit_ext". */
2425
2426static void
2427aarch64_emit_ext (int arg)
2428{
2429 uint32_t buf[16];
2430 uint32_t *p = buf;
2431
2432 p += emit_sbfx (p, x0, x0, 0, arg);
2433
2434 emit_ops_insns (buf, p - buf);
2435}
2436
2437/* Implementation of emit_ops method "emit_log_not". */
2438
2439static void
2440aarch64_emit_log_not (void)
2441{
2442 uint32_t buf[16];
2443 uint32_t *p = buf;
2444
2445 /* If the top of the stack is 0, replace it with 1. Else replace it with
2446 0. */
2447
2448 p += emit_cmp (p, x0, immediate_operand (0));
2449 p += emit_cset (p, x0, EQ);
2450
2451 emit_ops_insns (buf, p - buf);
2452}
2453
2454/* Implementation of emit_ops method "emit_bit_and". */
2455
2456static void
2457aarch64_emit_bit_and (void)
2458{
2459 uint32_t buf[16];
2460 uint32_t *p = buf;
2461
2462 p += emit_pop (p, x1);
2463 p += emit_and (p, x0, x0, x1);
2464
2465 emit_ops_insns (buf, p - buf);
2466}
2467
2468/* Implementation of emit_ops method "emit_bit_or". */
2469
2470static void
2471aarch64_emit_bit_or (void)
2472{
2473 uint32_t buf[16];
2474 uint32_t *p = buf;
2475
2476 p += emit_pop (p, x1);
2477 p += emit_orr (p, x0, x0, x1);
2478
2479 emit_ops_insns (buf, p - buf);
2480}
2481
2482/* Implementation of emit_ops method "emit_bit_xor". */
2483
2484static void
2485aarch64_emit_bit_xor (void)
2486{
2487 uint32_t buf[16];
2488 uint32_t *p = buf;
2489
2490 p += emit_pop (p, x1);
2491 p += emit_eor (p, x0, x0, x1);
2492
2493 emit_ops_insns (buf, p - buf);
2494}
2495
2496/* Implementation of emit_ops method "emit_bit_not". */
2497
2498static void
2499aarch64_emit_bit_not (void)
2500{
2501 uint32_t buf[16];
2502 uint32_t *p = buf;
2503
2504 p += emit_mvn (p, x0, x0);
2505
2506 emit_ops_insns (buf, p - buf);
2507}
2508
2509/* Implementation of emit_ops method "emit_equal". */
2510
2511static void
2512aarch64_emit_equal (void)
2513{
2514 uint32_t buf[16];
2515 uint32_t *p = buf;
2516
2517 p += emit_pop (p, x1);
2518 p += emit_cmp (p, x0, register_operand (x1));
2519 p += emit_cset (p, x0, EQ);
2520
2521 emit_ops_insns (buf, p - buf);
2522}
2523
2524/* Implementation of emit_ops method "emit_less_signed". */
2525
2526static void
2527aarch64_emit_less_signed (void)
2528{
2529 uint32_t buf[16];
2530 uint32_t *p = buf;
2531
2532 p += emit_pop (p, x1);
2533 p += emit_cmp (p, x1, register_operand (x0));
2534 p += emit_cset (p, x0, LT);
2535
2536 emit_ops_insns (buf, p - buf);
2537}
2538
2539/* Implementation of emit_ops method "emit_less_unsigned". */
2540
2541static void
2542aarch64_emit_less_unsigned (void)
2543{
2544 uint32_t buf[16];
2545 uint32_t *p = buf;
2546
2547 p += emit_pop (p, x1);
2548 p += emit_cmp (p, x1, register_operand (x0));
2549 p += emit_cset (p, x0, LO);
2550
2551 emit_ops_insns (buf, p - buf);
2552}
2553
2554/* Implementation of emit_ops method "emit_ref". */
2555
2556static void
2557aarch64_emit_ref (int size)
2558{
2559 uint32_t buf[16];
2560 uint32_t *p = buf;
2561
2562 switch (size)
2563 {
2564 case 1:
2565 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2566 break;
2567 case 2:
2568 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2569 break;
2570 case 4:
2571 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2572 break;
2573 case 8:
2574 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2575 break;
2576 default:
2577 /* Unknown size, bail on compilation. */
2578 emit_error = 1;
2579 break;
2580 }
2581
2582 emit_ops_insns (buf, p - buf);
2583}
2584
2585/* Implementation of emit_ops method "emit_if_goto". */
2586
2587static void
2588aarch64_emit_if_goto (int *offset_p, int *size_p)
2589{
2590 uint32_t buf[16];
2591 uint32_t *p = buf;
2592
2593 /* The Z flag is set or cleared here. */
2594 p += emit_cmp (p, x0, immediate_operand (0));
2595 /* This instruction must not change the Z flag. */
2596 p += emit_pop (p, x0);
2597 /* Branch over the next instruction if x0 == 0. */
2598 p += emit_bcond (p, EQ, 8);
2599
2600 /* The NOP instruction will be patched with an unconditional branch. */
2601 if (offset_p)
2602 *offset_p = (p - buf) * 4;
2603 if (size_p)
2604 *size_p = 4;
2605 p += emit_nop (p);
2606
2607 emit_ops_insns (buf, p - buf);
2608}
2609
2610/* Implementation of emit_ops method "emit_goto". */
2611
2612static void
2613aarch64_emit_goto (int *offset_p, int *size_p)
2614{
2615 uint32_t buf[16];
2616 uint32_t *p = buf;
2617
2618 /* The NOP instruction will be patched with an unconditional branch. */
2619 if (offset_p)
2620 *offset_p = 0;
2621 if (size_p)
2622 *size_p = 4;
2623 p += emit_nop (p);
2624
2625 emit_ops_insns (buf, p - buf);
2626}
2627
2628/* Implementation of emit_ops method "write_goto_address". */
2629
2630void
2631aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2632{
2633 uint32_t insn;
2634
2635 emit_b (&insn, 0, to - from);
2636 append_insns (&from, 1, &insn);
2637}
2638
2639/* Implementation of emit_ops method "emit_const". */
2640
2641static void
2642aarch64_emit_const (LONGEST num)
2643{
2644 uint32_t buf[16];
2645 uint32_t *p = buf;
2646
2647 p += emit_mov_addr (p, x0, num);
2648
2649 emit_ops_insns (buf, p - buf);
2650}
2651
2652/* Implementation of emit_ops method "emit_call". */
2653
2654static void
2655aarch64_emit_call (CORE_ADDR fn)
2656{
2657 uint32_t buf[16];
2658 uint32_t *p = buf;
2659
2660 p += emit_mov_addr (p, ip0, fn);
2661 p += emit_blr (p, ip0);
2662
2663 emit_ops_insns (buf, p - buf);
2664}
2665
2666/* Implementation of emit_ops method "emit_reg". */
2667
2668static void
2669aarch64_emit_reg (int reg)
2670{
2671 uint32_t buf[16];
2672 uint32_t *p = buf;
2673
2674 /* Set x0 to unsigned char *regs. */
2675 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2676 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2677 p += emit_mov (p, x1, immediate_operand (reg));
2678
2679 emit_ops_insns (buf, p - buf);
2680
2681 aarch64_emit_call (get_raw_reg_func_addr ());
2682}
2683
2684/* Implementation of emit_ops method "emit_pop". */
2685
2686static void
2687aarch64_emit_pop (void)
2688{
2689 uint32_t buf[16];
2690 uint32_t *p = buf;
2691
2692 p += emit_pop (p, x0);
2693
2694 emit_ops_insns (buf, p - buf);
2695}
2696
2697/* Implementation of emit_ops method "emit_stack_flush". */
2698
2699static void
2700aarch64_emit_stack_flush (void)
2701{
2702 uint32_t buf[16];
2703 uint32_t *p = buf;
2704
2705 p += emit_push (p, x0);
2706
2707 emit_ops_insns (buf, p - buf);
2708}
2709
2710/* Implementation of emit_ops method "emit_zero_ext". */
2711
2712static void
2713aarch64_emit_zero_ext (int arg)
2714{
2715 uint32_t buf[16];
2716 uint32_t *p = buf;
2717
2718 p += emit_ubfx (p, x0, x0, 0, arg);
2719
2720 emit_ops_insns (buf, p - buf);
2721}
2722
2723/* Implementation of emit_ops method "emit_swap". */
2724
2725static void
2726aarch64_emit_swap (void)
2727{
2728 uint32_t buf[16];
2729 uint32_t *p = buf;
2730
2731 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2732 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2733 p += emit_mov (p, x0, register_operand (x1));
2734
2735 emit_ops_insns (buf, p - buf);
2736}
2737
2738/* Implementation of emit_ops method "emit_stack_adjust". */
2739
2740static void
2741aarch64_emit_stack_adjust (int n)
2742{
2743 /* This is not needed with our design. */
2744 uint32_t buf[16];
2745 uint32_t *p = buf;
2746
2747 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2748
2749 emit_ops_insns (buf, p - buf);
2750}
2751
2752/* Implementation of emit_ops method "emit_int_call_1". */
2753
2754static void
2755aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2756{
2757 uint32_t buf[16];
2758 uint32_t *p = buf;
2759
2760 p += emit_mov (p, x0, immediate_operand (arg1));
2761
2762 emit_ops_insns (buf, p - buf);
2763
2764 aarch64_emit_call (fn);
2765}
2766
2767/* Implementation of emit_ops method "emit_void_call_2". */
2768
2769static void
2770aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2771{
2772 uint32_t buf[16];
2773 uint32_t *p = buf;
2774
2775 /* Push x0 on the stack. */
2776 aarch64_emit_stack_flush ();
2777
2778 /* Setup arguments for the function call:
2779
2780 x0: arg1
2781 x1: top of the stack
2782
2783 MOV x1, x0
2784 MOV x0, #arg1 */
2785
2786 p += emit_mov (p, x1, register_operand (x0));
2787 p += emit_mov (p, x0, immediate_operand (arg1));
2788
2789 emit_ops_insns (buf, p - buf);
2790
2791 aarch64_emit_call (fn);
2792
2793 /* Restore x0. */
2794 aarch64_emit_pop ();
2795}
2796
2797/* Implementation of emit_ops method "emit_eq_goto". */
2798
2799static void
2800aarch64_emit_eq_goto (int *offset_p, int *size_p)
2801{
2802 uint32_t buf[16];
2803 uint32_t *p = buf;
2804
2805 p += emit_pop (p, x1);
2806 p += emit_cmp (p, x1, register_operand (x0));
2807 /* Branch over the next instruction if x0 != x1. */
2808 p += emit_bcond (p, NE, 8);
2809 /* The NOP instruction will be patched with an unconditional branch. */
2810 if (offset_p)
2811 *offset_p = (p - buf) * 4;
2812 if (size_p)
2813 *size_p = 4;
2814 p += emit_nop (p);
2815
2816 emit_ops_insns (buf, p - buf);
2817}
2818
2819/* Implementation of emit_ops method "emit_ne_goto". */
2820
2821static void
2822aarch64_emit_ne_goto (int *offset_p, int *size_p)
2823{
2824 uint32_t buf[16];
2825 uint32_t *p = buf;
2826
2827 p += emit_pop (p, x1);
2828 p += emit_cmp (p, x1, register_operand (x0));
2829 /* Branch over the next instruction if x0 == x1. */
2830 p += emit_bcond (p, EQ, 8);
2831 /* The NOP instruction will be patched with an unconditional branch. */
2832 if (offset_p)
2833 *offset_p = (p - buf) * 4;
2834 if (size_p)
2835 *size_p = 4;
2836 p += emit_nop (p);
2837
2838 emit_ops_insns (buf, p - buf);
2839}
2840
2841/* Implementation of emit_ops method "emit_lt_goto". */
2842
2843static void
2844aarch64_emit_lt_goto (int *offset_p, int *size_p)
2845{
2846 uint32_t buf[16];
2847 uint32_t *p = buf;
2848
2849 p += emit_pop (p, x1);
2850 p += emit_cmp (p, x1, register_operand (x0));
2851 /* Branch over the next instruction if x0 >= x1. */
2852 p += emit_bcond (p, GE, 8);
2853 /* The NOP instruction will be patched with an unconditional branch. */
2854 if (offset_p)
2855 *offset_p = (p - buf) * 4;
2856 if (size_p)
2857 *size_p = 4;
2858 p += emit_nop (p);
2859
2860 emit_ops_insns (buf, p - buf);
2861}
2862
2863/* Implementation of emit_ops method "emit_le_goto". */
2864
2865static void
2866aarch64_emit_le_goto (int *offset_p, int *size_p)
2867{
2868 uint32_t buf[16];
2869 uint32_t *p = buf;
2870
2871 p += emit_pop (p, x1);
2872 p += emit_cmp (p, x1, register_operand (x0));
2873 /* Branch over the next instruction if x0 > x1. */
2874 p += emit_bcond (p, GT, 8);
2875 /* The NOP instruction will be patched with an unconditional branch. */
2876 if (offset_p)
2877 *offset_p = (p - buf) * 4;
2878 if (size_p)
2879 *size_p = 4;
2880 p += emit_nop (p);
2881
2882 emit_ops_insns (buf, p - buf);
2883}
2884
2885/* Implementation of emit_ops method "emit_gt_goto". */
2886
2887static void
2888aarch64_emit_gt_goto (int *offset_p, int *size_p)
2889{
2890 uint32_t buf[16];
2891 uint32_t *p = buf;
2892
2893 p += emit_pop (p, x1);
2894 p += emit_cmp (p, x1, register_operand (x0));
2895 /* Branch over the next instruction if x0 <= x1. */
2896 p += emit_bcond (p, LE, 8);
2897 /* The NOP instruction will be patched with an unconditional branch. */
2898 if (offset_p)
2899 *offset_p = (p - buf) * 4;
2900 if (size_p)
2901 *size_p = 4;
2902 p += emit_nop (p);
2903
2904 emit_ops_insns (buf, p - buf);
2905}
2906
2907/* Implementation of emit_ops method "emit_ge_got". */
2908
2909static void
2910aarch64_emit_ge_got (int *offset_p, int *size_p)
2911{
2912 uint32_t buf[16];
2913 uint32_t *p = buf;
2914
2915 p += emit_pop (p, x1);
2916 p += emit_cmp (p, x1, register_operand (x0));
2917 /* Branch over the next instruction if x0 <= x1. */
2918 p += emit_bcond (p, LT, 8);
2919 /* The NOP instruction will be patched with an unconditional branch. */
2920 if (offset_p)
2921 *offset_p = (p - buf) * 4;
2922 if (size_p)
2923 *size_p = 4;
2924 p += emit_nop (p);
2925
2926 emit_ops_insns (buf, p - buf);
2927}
2928
2929static struct emit_ops aarch64_emit_ops_impl =
2930{
2931 aarch64_emit_prologue,
2932 aarch64_emit_epilogue,
2933 aarch64_emit_add,
2934 aarch64_emit_sub,
2935 aarch64_emit_mul,
2936 aarch64_emit_lsh,
2937 aarch64_emit_rsh_signed,
2938 aarch64_emit_rsh_unsigned,
2939 aarch64_emit_ext,
2940 aarch64_emit_log_not,
2941 aarch64_emit_bit_and,
2942 aarch64_emit_bit_or,
2943 aarch64_emit_bit_xor,
2944 aarch64_emit_bit_not,
2945 aarch64_emit_equal,
2946 aarch64_emit_less_signed,
2947 aarch64_emit_less_unsigned,
2948 aarch64_emit_ref,
2949 aarch64_emit_if_goto,
2950 aarch64_emit_goto,
2951 aarch64_write_goto_address,
2952 aarch64_emit_const,
2953 aarch64_emit_call,
2954 aarch64_emit_reg,
2955 aarch64_emit_pop,
2956 aarch64_emit_stack_flush,
2957 aarch64_emit_zero_ext,
2958 aarch64_emit_swap,
2959 aarch64_emit_stack_adjust,
2960 aarch64_emit_int_call_1,
2961 aarch64_emit_void_call_2,
2962 aarch64_emit_eq_goto,
2963 aarch64_emit_ne_goto,
2964 aarch64_emit_lt_goto,
2965 aarch64_emit_le_goto,
2966 aarch64_emit_gt_goto,
2967 aarch64_emit_ge_got,
2968};
2969
2970/* Implementation of linux_target_ops method "emit_ops". */
2971
2972static struct emit_ops *
2973aarch64_emit_ops (void)
2974{
2975 return &aarch64_emit_ops_impl;
2976}
2977
bb903df0
PL
2978/* Implementation of linux_target_ops method
2979 "get_min_fast_tracepoint_insn_len". */
2980
2981static int
2982aarch64_get_min_fast_tracepoint_insn_len (void)
2983{
2984 return 4;
2985}
2986
d1d0aea1
PL
2987/* Implementation of linux_target_ops method "supports_range_stepping". */
2988
2989static int
2990aarch64_supports_range_stepping (void)
2991{
2992 return 1;
2993}
2994
dd373349
AT
2995/* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2996
2997static const gdb_byte *
2998aarch64_sw_breakpoint_from_kind (int kind, int *size)
2999{
17b1509a
YQ
3000 if (is_64bit_tdesc ())
3001 {
3002 *size = aarch64_breakpoint_len;
3003 return aarch64_breakpoint;
3004 }
3005 else
3006 return arm_sw_breakpoint_from_kind (kind, size);
3007}
3008
3009/* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
3010
3011static int
3012aarch64_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
3013{
3014 if (is_64bit_tdesc ())
3015 return aarch64_breakpoint_len;
3016 else
3017 return arm_breakpoint_kind_from_pc (pcptr);
3018}
3019
3020/* Implementation of the linux_target_ops method
3021 "breakpoint_kind_from_current_state". */
3022
3023static int
3024aarch64_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
3025{
3026 if (is_64bit_tdesc ())
3027 return aarch64_breakpoint_len;
3028 else
3029 return arm_breakpoint_kind_from_current_state (pcptr);
dd373349
AT
3030}
3031
7d00775e
AT
3032/* Support for hardware single step. */
3033
3034static int
3035aarch64_supports_hardware_single_step (void)
3036{
3037 return 1;
3038}
3039
176eb98c
MS
3040struct linux_target_ops the_low_target =
3041{
3042 aarch64_arch_setup,
3aee8918 3043 aarch64_regs_info,
176eb98c
MS
3044 aarch64_cannot_fetch_register,
3045 aarch64_cannot_store_register,
421530db 3046 NULL, /* fetch_register */
176eb98c
MS
3047 aarch64_get_pc,
3048 aarch64_set_pc,
17b1509a 3049 aarch64_breakpoint_kind_from_pc,
dd373349 3050 aarch64_sw_breakpoint_from_kind,
fa5308bd 3051 NULL, /* get_next_pcs */
421530db 3052 0, /* decr_pc_after_break */
176eb98c 3053 aarch64_breakpoint_at,
802e8e6d 3054 aarch64_supports_z_point_type,
176eb98c
MS
3055 aarch64_insert_point,
3056 aarch64_remove_point,
3057 aarch64_stopped_by_watchpoint,
3058 aarch64_stopped_data_address,
421530db
PL
3059 NULL, /* collect_ptrace_register */
3060 NULL, /* supply_ptrace_register */
ade90bde 3061 aarch64_linux_siginfo_fixup,
176eb98c 3062 aarch64_linux_new_process,
04ec7890 3063 aarch64_linux_delete_process,
176eb98c 3064 aarch64_linux_new_thread,
466eecee 3065 aarch64_linux_delete_thread,
3a8a0396 3066 aarch64_linux_new_fork,
176eb98c 3067 aarch64_linux_prepare_to_resume,
421530db 3068 NULL, /* process_qsupported */
7671bf47 3069 aarch64_supports_tracepoints,
bb903df0
PL
3070 aarch64_get_thread_area,
3071 aarch64_install_fast_tracepoint_jump_pad,
afbe19f8 3072 aarch64_emit_ops,
bb903df0 3073 aarch64_get_min_fast_tracepoint_insn_len,
d1d0aea1 3074 aarch64_supports_range_stepping,
17b1509a 3075 aarch64_breakpoint_kind_from_current_state,
7d00775e 3076 aarch64_supports_hardware_single_step,
061fc021 3077 aarch64_get_syscall_trapinfo,
176eb98c 3078};
3aee8918
PA
3079
3080void
3081initialize_low_arch (void)
3082{
3b53ae99
YQ
3083 initialize_low_arch_aarch32 ();
3084
3aee8918 3085 initialize_regsets_info (&aarch64_regsets_info);
02895270 3086 initialize_regsets_info (&aarch64_sve_regsets_info);
6654d750
AH
3087
3088#if GDB_SELF_TEST
3089 initialize_low_tdesc ();
3090#endif
3aee8918 3091}
This page took 0.752731 seconds and 4 git commands to generate.