1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2017 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
31 #include "nat/amd64-linux-siginfo.h"
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
38 #include "elf/common.h"
43 #include "tracepoint.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
51 static struct target_desc
*tdesc_amd64_linux_no_xml
;
53 static struct target_desc
*tdesc_i386_linux_no_xml
;
56 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
59 /* Backward compatibility for gdb without XML support. */
61 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
67 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
74 #include <sys/procfs.h>
75 #include "nat/gdb_ptrace.h"
78 #ifndef PTRACE_GET_THREAD_AREA
79 #define PTRACE_GET_THREAD_AREA 25
82 /* This definition comes from prctl.h, but some kernels may not have it. */
83 #ifndef PTRACE_ARCH_PRCTL
84 #define PTRACE_ARCH_PRCTL 30
87 /* The following definitions come from prctl.h, but may be absent
88 for certain configurations. */
90 #define ARCH_SET_GS 0x1001
91 #define ARCH_SET_FS 0x1002
92 #define ARCH_GET_FS 0x1003
93 #define ARCH_GET_GS 0x1004
96 /* Per-process arch-specific data we want to keep. */
98 struct arch_process_info
100 struct x86_debug_reg_state debug_reg_state
;
105 /* Mapping between the general-purpose registers in `struct user'
106 format and GDB's register array layout.
107 Note that the transfer layout uses 64-bit regs. */
108 static /*const*/ int i386_regmap
[] =
110 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
111 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
112 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
113 DS
* 8, ES
* 8, FS
* 8, GS
* 8
116 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
118 /* So code below doesn't have to care, i386 or amd64. */
119 #define ORIG_EAX ORIG_RAX
122 static const int x86_64_regmap
[] =
124 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
125 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
126 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
127 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
128 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
129 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
130 -1, -1, -1, -1, -1, -1, -1, -1,
131 -1, -1, -1, -1, -1, -1, -1, -1,
132 -1, -1, -1, -1, -1, -1, -1, -1,
134 -1, -1, -1, -1, -1, -1, -1, -1,
136 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
141 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
142 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
143 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
144 -1, -1, -1, -1, -1, -1, -1, -1,
145 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
146 -1, -1, -1, -1, -1, -1, -1, -1,
147 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
148 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
149 -1, -1, -1, -1, -1, -1, -1, -1,
150 -1, -1, -1, -1, -1, -1, -1, -1,
151 -1, -1, -1, -1, -1, -1, -1, -1,
155 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
156 #define X86_64_USER_REGS (GS + 1)
158 #else /* ! __x86_64__ */
160 /* Mapping between the general-purpose registers in `struct user'
161 format and GDB's register array layout. */
162 static /*const*/ int i386_regmap
[] =
164 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
165 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
166 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
167 DS
* 4, ES
* 4, FS
* 4, GS
* 4
170 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
178 /* Returns true if the current inferior belongs to a x86-64 process,
182 is_64bit_tdesc (void)
184 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
186 return register_size (regcache
->tdesc
, 0) == 8;
192 /* Called by libthread_db. */
195 ps_get_thread_area (struct ps_prochandle
*ph
,
196 lwpid_t lwpid
, int idx
, void **base
)
199 int use_64bit
= is_64bit_tdesc ();
206 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
210 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
221 unsigned int desc
[4];
223 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
224 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
227 /* Ensure we properly extend the value to 64-bits for x86_64. */
228 *base
= (void *) (uintptr_t) desc
[1];
233 /* Get the thread area address. This is used to recognize which
234 thread is which when tracing with the in-process agent library. We
235 don't read anything from the address, and treat it as opaque; it's
236 the address itself that we assume is unique per-thread. */
239 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
242 int use_64bit
= is_64bit_tdesc ();
247 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
249 *addr
= (CORE_ADDR
) (uintptr_t) base
;
258 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
259 struct thread_info
*thr
= get_lwp_thread (lwp
);
260 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
261 unsigned int desc
[4];
263 const int reg_thread_area
= 3; /* bits to scale down register value. */
266 collect_register_by_name (regcache
, "gs", &gs
);
268 idx
= gs
>> reg_thread_area
;
270 if (ptrace (PTRACE_GET_THREAD_AREA
,
272 (void *) (long) idx
, (unsigned long) &desc
) < 0)
283 x86_cannot_store_register (int regno
)
286 if (is_64bit_tdesc ())
290 return regno
>= I386_NUM_REGS
;
294 x86_cannot_fetch_register (int regno
)
297 if (is_64bit_tdesc ())
301 return regno
>= I386_NUM_REGS
;
305 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
310 if (register_size (regcache
->tdesc
, 0) == 8)
312 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
313 if (x86_64_regmap
[i
] != -1)
314 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
316 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
319 int lwpid
= lwpid_of (current_thread
);
321 collect_register_by_name (regcache
, "fs_base", &base
);
322 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_FS
);
324 collect_register_by_name (regcache
, "gs_base", &base
);
325 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_GS
);
332 /* 32-bit inferior registers need to be zero-extended.
333 Callers would read uninitialized memory otherwise. */
334 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
337 for (i
= 0; i
< I386_NUM_REGS
; i
++)
338 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
340 collect_register_by_name (regcache
, "orig_eax",
341 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
345 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
350 if (register_size (regcache
->tdesc
, 0) == 8)
352 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
353 if (x86_64_regmap
[i
] != -1)
354 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
356 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
359 int lwpid
= lwpid_of (current_thread
);
361 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
362 supply_register_by_name (regcache
, "fs_base", &base
);
364 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_GS
) == 0)
365 supply_register_by_name (regcache
, "gs_base", &base
);
372 for (i
= 0; i
< I386_NUM_REGS
; i
++)
373 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
375 supply_register_by_name (regcache
, "orig_eax",
376 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
380 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
383 i387_cache_to_fxsave (regcache
, buf
);
385 i387_cache_to_fsave (regcache
, buf
);
390 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
393 i387_fxsave_to_cache (regcache
, buf
);
395 i387_fsave_to_cache (regcache
, buf
);
402 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
404 i387_cache_to_fxsave (regcache
, buf
);
408 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
410 i387_fxsave_to_cache (regcache
, buf
);
416 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
418 i387_cache_to_xsave (regcache
, buf
);
422 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
424 i387_xsave_to_cache (regcache
, buf
);
427 /* ??? The non-biarch i386 case stores all the i387 regs twice.
428 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
429 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
430 doesn't work. IWBN to avoid the duplication in the case where it
431 does work. Maybe the arch_setup routine could check whether it works
432 and update the supported regsets accordingly. */
434 static struct regset_info x86_regsets
[] =
436 #ifdef HAVE_PTRACE_GETREGS
437 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
439 x86_fill_gregset
, x86_store_gregset
},
440 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
441 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
443 # ifdef HAVE_PTRACE_GETFPXREGS
444 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
446 x86_fill_fpxregset
, x86_store_fpxregset
},
449 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
451 x86_fill_fpregset
, x86_store_fpregset
},
452 #endif /* HAVE_PTRACE_GETREGS */
457 x86_get_pc (struct regcache
*regcache
)
459 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
465 collect_register_by_name (regcache
, "rip", &pc
);
466 return (CORE_ADDR
) pc
;
472 collect_register_by_name (regcache
, "eip", &pc
);
473 return (CORE_ADDR
) pc
;
478 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
480 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
486 supply_register_by_name (regcache
, "rip", &newpc
);
492 supply_register_by_name (regcache
, "eip", &newpc
);
496 static const gdb_byte x86_breakpoint
[] = { 0xCC };
497 #define x86_breakpoint_len 1
500 x86_breakpoint_at (CORE_ADDR pc
)
504 (*the_target
->read_memory
) (pc
, &c
, 1);
511 /* Low-level function vector. */
512 struct x86_dr_low_type x86_dr_low
=
514 x86_linux_dr_set_control
,
515 x86_linux_dr_set_addr
,
516 x86_linux_dr_get_addr
,
517 x86_linux_dr_get_status
,
518 x86_linux_dr_get_control
,
522 /* Breakpoint/Watchpoint support. */
525 x86_supports_z_point_type (char z_type
)
531 case Z_PACKET_WRITE_WP
:
532 case Z_PACKET_ACCESS_WP
:
540 x86_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
541 int size
, struct raw_breakpoint
*bp
)
543 struct process_info
*proc
= current_process ();
547 case raw_bkpt_type_hw
:
548 case raw_bkpt_type_write_wp
:
549 case raw_bkpt_type_access_wp
:
551 enum target_hw_bp_type hw_type
552 = raw_bkpt_type_to_target_hw_bp_type (type
);
553 struct x86_debug_reg_state
*state
554 = &proc
->priv
->arch_private
->debug_reg_state
;
556 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
566 x86_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
567 int size
, struct raw_breakpoint
*bp
)
569 struct process_info
*proc
= current_process ();
573 case raw_bkpt_type_hw
:
574 case raw_bkpt_type_write_wp
:
575 case raw_bkpt_type_access_wp
:
577 enum target_hw_bp_type hw_type
578 = raw_bkpt_type_to_target_hw_bp_type (type
);
579 struct x86_debug_reg_state
*state
580 = &proc
->priv
->arch_private
->debug_reg_state
;
582 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
591 x86_stopped_by_watchpoint (void)
593 struct process_info
*proc
= current_process ();
594 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
598 x86_stopped_data_address (void)
600 struct process_info
*proc
= current_process ();
602 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
608 /* Called when a new process is created. */
610 static struct arch_process_info
*
611 x86_linux_new_process (void)
613 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
615 x86_low_init_dregs (&info
->debug_reg_state
);
620 /* Target routine for linux_new_fork. */
623 x86_linux_new_fork (struct process_info
*parent
, struct process_info
*child
)
625 /* These are allocated by linux_add_process. */
626 gdb_assert (parent
->priv
!= NULL
627 && parent
->priv
->arch_private
!= NULL
);
628 gdb_assert (child
->priv
!= NULL
629 && child
->priv
->arch_private
!= NULL
);
631 /* Linux kernel before 2.6.33 commit
632 72f674d203cd230426437cdcf7dd6f681dad8b0d
633 will inherit hardware debug registers from parent
634 on fork/vfork/clone. Newer Linux kernels create such tasks with
635 zeroed debug registers.
637 GDB core assumes the child inherits the watchpoints/hw
638 breakpoints of the parent, and will remove them all from the
639 forked off process. Copy the debug registers mirrors into the
640 new process so that all breakpoints and watchpoints can be
641 removed together. The debug registers mirror will become zeroed
642 in the end before detaching the forked off process, thus making
643 this compatible with older Linux kernels too. */
645 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
648 /* See nat/x86-dregs.h. */
650 struct x86_debug_reg_state
*
651 x86_debug_reg_state (pid_t pid
)
653 struct process_info
*proc
= find_process_pid (pid
);
655 return &proc
->priv
->arch_private
->debug_reg_state
;
658 /* When GDBSERVER is built as a 64-bit application on linux, the
659 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
660 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
661 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
662 conversion in-place ourselves. */
664 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
665 layout of the inferiors' architecture. Returns true if any
666 conversion was done; false otherwise. If DIRECTION is 1, then copy
667 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
671 x86_siginfo_fixup (siginfo_t
*ptrace
, gdb_byte
*inf
, int direction
)
674 unsigned int machine
;
675 int tid
= lwpid_of (current_thread
);
676 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
678 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
679 if (!is_64bit_tdesc ())
680 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
682 /* No fixup for native x32 GDB. */
683 else if (!is_elf64
&& sizeof (void *) == 8)
684 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
693 /* Format of XSAVE extended state is:
697 sw_usable_bytes[464..511]
698 xstate_hdr_bytes[512..575]
703 Same memory layout will be used for the coredump NT_X86_XSTATE
704 representing the XSAVE extended state registers.
706 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
707 extended state mask, which is the same as the extended control register
708 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
709 together with the mask saved in the xstate_hdr_bytes to determine what
710 states the processor/OS supports and what state, used or initialized,
711 the process/thread is in. */
712 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
714 /* Does the current host support the GETFPXREGS request? The header
715 file may or may not define it, and even if it is defined, the
716 kernel will return EIO if it's running on a pre-SSE processor. */
717 int have_ptrace_getfpxregs
=
718 #ifdef HAVE_PTRACE_GETFPXREGS
725 /* Get Linux/x86 target description from running target. */
727 static const struct target_desc
*
728 x86_linux_read_description (void)
730 unsigned int machine
;
734 static uint64_t xcr0
;
735 struct regset_info
*regset
;
737 tid
= lwpid_of (current_thread
);
739 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
741 if (sizeof (void *) == 4)
744 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
746 else if (machine
== EM_X86_64
)
747 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
751 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
752 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
754 elf_fpxregset_t fpxregs
;
756 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
758 have_ptrace_getfpxregs
= 0;
759 have_ptrace_getregset
= 0;
760 return i386_linux_read_description (X86_XSTATE_X87
);
763 have_ptrace_getfpxregs
= 1;
769 x86_xcr0
= X86_XSTATE_SSE_MASK
;
773 if (machine
== EM_X86_64
)
774 return tdesc_amd64_linux_no_xml
;
777 return tdesc_i386_linux_no_xml
;
780 if (have_ptrace_getregset
== -1)
782 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
785 iov
.iov_base
= xstateregs
;
786 iov
.iov_len
= sizeof (xstateregs
);
788 /* Check if PTRACE_GETREGSET works. */
789 if (ptrace (PTRACE_GETREGSET
, tid
,
790 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
791 have_ptrace_getregset
= 0;
794 have_ptrace_getregset
= 1;
796 /* Get XCR0 from XSAVE extended state. */
797 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
798 / sizeof (uint64_t))];
800 /* Use PTRACE_GETREGSET if it is available. */
801 for (regset
= x86_regsets
;
802 regset
->fill_function
!= NULL
; regset
++)
803 if (regset
->get_request
== PTRACE_GETREGSET
)
804 regset
->size
= X86_XSTATE_SIZE (xcr0
);
805 else if (regset
->type
!= GENERAL_REGS
)
810 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
811 xcr0_features
= (have_ptrace_getregset
812 && (xcr0
& X86_XSTATE_ALL_MASK
));
817 if (machine
== EM_X86_64
)
824 switch (xcr0
& X86_XSTATE_ALL_MASK
)
826 case X86_XSTATE_AVX_MPX_AVX512_PKU_MASK
:
827 return tdesc_amd64_avx_mpx_avx512_pku_linux
;
829 case X86_XSTATE_AVX_AVX512_MASK
:
830 return tdesc_amd64_avx_avx512_linux
;
832 case X86_XSTATE_AVX_MPX_MASK
:
833 return tdesc_amd64_avx_mpx_linux
;
835 case X86_XSTATE_MPX_MASK
:
836 return tdesc_amd64_mpx_linux
;
838 case X86_XSTATE_AVX_MASK
:
839 return tdesc_amd64_avx_linux
;
842 return tdesc_amd64_linux
;
846 return tdesc_amd64_linux
;
852 switch (xcr0
& X86_XSTATE_ALL_MASK
)
854 case X86_XSTATE_AVX_MPX_AVX512_PKU_MASK
:
855 /* No x32 MPX and PKU, fall back to avx_avx512. */
856 return tdesc_x32_avx_avx512_linux
;
858 case X86_XSTATE_AVX_AVX512_MASK
:
859 return tdesc_x32_avx_avx512_linux
;
861 case X86_XSTATE_MPX_MASK
: /* No MPX on x32. */
862 case X86_XSTATE_AVX_MASK
:
863 return tdesc_x32_avx_linux
;
866 return tdesc_x32_linux
;
870 return tdesc_x32_linux
;
876 const target_desc
*tdesc
= NULL
;
879 tdesc
= i386_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
);
882 tdesc
= i386_linux_read_description (X86_XSTATE_SSE
);
887 gdb_assert_not_reached ("failed to return tdesc");
890 /* Callback for find_inferior. Stops iteration when a thread with a
891 given PID is found. */
894 same_process_callback (struct inferior_list_entry
*entry
, void *data
)
896 int pid
= *(int *) data
;
898 return (ptid_get_pid (entry
->id
) == pid
);
901 /* Callback for for_each_inferior. Calls the arch_setup routine for
905 x86_arch_setup_process_callback (struct inferior_list_entry
*entry
)
907 int pid
= ptid_get_pid (entry
->id
);
909 /* Look up any thread of this processes. */
911 = (struct thread_info
*) find_inferior (&all_threads
,
912 same_process_callback
, &pid
);
914 the_low_target
.arch_setup ();
917 /* Update all the target description of all processes; a new GDB
918 connected, and it may or not support xml target descriptions. */
921 x86_linux_update_xmltarget (void)
923 struct thread_info
*saved_thread
= current_thread
;
925 /* Before changing the register cache's internal layout, flush the
926 contents of the current valid caches back to the threads, and
927 release the current regcache objects. */
930 for_each_inferior (&all_processes
, x86_arch_setup_process_callback
);
932 current_thread
= saved_thread
;
935 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
939 x86_linux_process_qsupported (char **features
, int count
)
943 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
944 with "i386" in qSupported query, it supports x86 XML target
947 for (i
= 0; i
< count
; i
++)
949 const char *feature
= features
[i
];
951 if (startswith (feature
, "xmlRegisters="))
953 char *copy
= xstrdup (feature
+ 13);
956 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
958 if (strcmp (p
, "i386") == 0)
968 x86_linux_update_xmltarget ();
971 /* Common for x86/x86-64. */
973 static struct regsets_info x86_regsets_info
=
975 x86_regsets
, /* regsets */
977 NULL
, /* disabled_regsets */
981 static struct regs_info amd64_linux_regs_info
=
983 NULL
, /* regset_bitmap */
984 NULL
, /* usrregs_info */
988 static struct usrregs_info i386_linux_usrregs_info
=
994 static struct regs_info i386_linux_regs_info
=
996 NULL
, /* regset_bitmap */
997 &i386_linux_usrregs_info
,
1001 const struct regs_info
*
1002 x86_linux_regs_info (void)
1005 if (is_64bit_tdesc ())
1006 return &amd64_linux_regs_info
;
1009 return &i386_linux_regs_info
;
1012 /* Initialize the target description for the architecture of the
1016 x86_arch_setup (void)
1018 current_process ()->tdesc
= x86_linux_read_description ();
1021 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1022 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1025 x86_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
1027 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
1033 collect_register_by_name (regcache
, "orig_rax", &l_sysno
);
1034 *sysno
= (int) l_sysno
;
1037 collect_register_by_name (regcache
, "orig_eax", sysno
);
1041 x86_supports_tracepoints (void)
1047 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1049 write_inferior_memory (*to
, buf
, len
);
1054 push_opcode (unsigned char *buf
, const char *op
)
1056 unsigned char *buf_org
= buf
;
1061 unsigned long ul
= strtoul (op
, &endptr
, 16);
1070 return buf
- buf_org
;
1075 /* Build a jump pad that saves registers and calls a collection
1076 function. Writes a jump instruction to the jump pad to
1077 JJUMPAD_INSN. The caller is responsible to write it in at the
1078 tracepoint address. */
1081 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1082 CORE_ADDR collector
,
1085 CORE_ADDR
*jump_entry
,
1086 CORE_ADDR
*trampoline
,
1087 ULONGEST
*trampoline_size
,
1088 unsigned char *jjump_pad_insn
,
1089 ULONGEST
*jjump_pad_insn_size
,
1090 CORE_ADDR
*adjusted_insn_addr
,
1091 CORE_ADDR
*adjusted_insn_addr_end
,
1094 unsigned char buf
[40];
1098 CORE_ADDR buildaddr
= *jump_entry
;
1100 /* Build the jump pad. */
1102 /* First, do tracepoint data collection. Save registers. */
1104 /* Need to ensure stack pointer saved first. */
1105 buf
[i
++] = 0x54; /* push %rsp */
1106 buf
[i
++] = 0x55; /* push %rbp */
1107 buf
[i
++] = 0x57; /* push %rdi */
1108 buf
[i
++] = 0x56; /* push %rsi */
1109 buf
[i
++] = 0x52; /* push %rdx */
1110 buf
[i
++] = 0x51; /* push %rcx */
1111 buf
[i
++] = 0x53; /* push %rbx */
1112 buf
[i
++] = 0x50; /* push %rax */
1113 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1114 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1115 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1116 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1117 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1118 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1119 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1120 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1121 buf
[i
++] = 0x9c; /* pushfq */
1122 buf
[i
++] = 0x48; /* movabs <addr>,%rdi */
1124 memcpy (buf
+ i
, &tpaddr
, 8);
1126 buf
[i
++] = 0x57; /* push %rdi */
1127 append_insns (&buildaddr
, i
, buf
);
1129 /* Stack space for the collecting_t object. */
1131 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1132 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1133 memcpy (buf
+ i
, &tpoint
, 8);
1135 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1136 i
+= push_opcode (&buf
[i
],
1137 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1138 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1139 append_insns (&buildaddr
, i
, buf
);
1143 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1144 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1146 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1147 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1148 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1149 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1150 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1151 append_insns (&buildaddr
, i
, buf
);
1153 /* Set up the gdb_collect call. */
1154 /* At this point, (stack pointer + 0x18) is the base of our saved
1158 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1159 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1161 /* tpoint address may be 64-bit wide. */
1162 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1163 memcpy (buf
+ i
, &tpoint
, 8);
1165 append_insns (&buildaddr
, i
, buf
);
1167 /* The collector function being in the shared library, may be
1168 >31-bits away off the jump pad. */
1170 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1171 memcpy (buf
+ i
, &collector
, 8);
1173 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1174 append_insns (&buildaddr
, i
, buf
);
1176 /* Clear the spin-lock. */
1178 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1179 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1180 memcpy (buf
+ i
, &lockaddr
, 8);
1182 append_insns (&buildaddr
, i
, buf
);
1184 /* Remove stack that had been used for the collect_t object. */
1186 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1187 append_insns (&buildaddr
, i
, buf
);
1189 /* Restore register state. */
1191 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1195 buf
[i
++] = 0x9d; /* popfq */
1196 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1197 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1198 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1199 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1200 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1201 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1202 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1203 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1204 buf
[i
++] = 0x58; /* pop %rax */
1205 buf
[i
++] = 0x5b; /* pop %rbx */
1206 buf
[i
++] = 0x59; /* pop %rcx */
1207 buf
[i
++] = 0x5a; /* pop %rdx */
1208 buf
[i
++] = 0x5e; /* pop %rsi */
1209 buf
[i
++] = 0x5f; /* pop %rdi */
1210 buf
[i
++] = 0x5d; /* pop %rbp */
1211 buf
[i
++] = 0x5c; /* pop %rsp */
1212 append_insns (&buildaddr
, i
, buf
);
1214 /* Now, adjust the original instruction to execute in the jump
1216 *adjusted_insn_addr
= buildaddr
;
1217 relocate_instruction (&buildaddr
, tpaddr
);
1218 *adjusted_insn_addr_end
= buildaddr
;
1220 /* Finally, write a jump back to the program. */
1222 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1223 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1226 "E.Jump back from jump pad too far from tracepoint "
1227 "(offset 0x%" PRIx64
" > int32).", loffset
);
1231 offset
= (int) loffset
;
1232 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1233 memcpy (buf
+ 1, &offset
, 4);
1234 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1236 /* The jump pad is now built. Wire in a jump to our jump pad. This
1237 is always done last (by our caller actually), so that we can
1238 install fast tracepoints with threads running. This relies on
1239 the agent's atomic write support. */
1240 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1241 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1244 "E.Jump pad too far from tracepoint "
1245 "(offset 0x%" PRIx64
" > int32).", loffset
);
1249 offset
= (int) loffset
;
1251 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1252 memcpy (buf
+ 1, &offset
, 4);
1253 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1254 *jjump_pad_insn_size
= sizeof (jump_insn
);
1256 /* Return the end address of our pad. */
1257 *jump_entry
= buildaddr
;
1262 #endif /* __x86_64__ */
1264 /* Build a jump pad that saves registers and calls a collection
1265 function. Writes a jump instruction to the jump pad to
1266 JJUMPAD_INSN. The caller is responsible to write it in at the
1267 tracepoint address. */
1270 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1271 CORE_ADDR collector
,
1274 CORE_ADDR
*jump_entry
,
1275 CORE_ADDR
*trampoline
,
1276 ULONGEST
*trampoline_size
,
1277 unsigned char *jjump_pad_insn
,
1278 ULONGEST
*jjump_pad_insn_size
,
1279 CORE_ADDR
*adjusted_insn_addr
,
1280 CORE_ADDR
*adjusted_insn_addr_end
,
1283 unsigned char buf
[0x100];
1285 CORE_ADDR buildaddr
= *jump_entry
;
1287 /* Build the jump pad. */
1289 /* First, do tracepoint data collection. Save registers. */
1291 buf
[i
++] = 0x60; /* pushad */
1292 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1293 *((int *)(buf
+ i
)) = (int) tpaddr
;
1295 buf
[i
++] = 0x9c; /* pushf */
1296 buf
[i
++] = 0x1e; /* push %ds */
1297 buf
[i
++] = 0x06; /* push %es */
1298 buf
[i
++] = 0x0f; /* push %fs */
1300 buf
[i
++] = 0x0f; /* push %gs */
1302 buf
[i
++] = 0x16; /* push %ss */
1303 buf
[i
++] = 0x0e; /* push %cs */
1304 append_insns (&buildaddr
, i
, buf
);
1306 /* Stack space for the collecting_t object. */
1308 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1310 /* Build the object. */
1311 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1312 memcpy (buf
+ i
, &tpoint
, 4);
1314 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1316 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1317 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1318 append_insns (&buildaddr
, i
, buf
);
1320 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1321 If we cared for it, this could be using xchg alternatively. */
1324 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1325 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1327 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1329 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1330 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1331 append_insns (&buildaddr
, i
, buf
);
1334 /* Set up arguments to the gdb_collect call. */
1336 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1337 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1338 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1339 append_insns (&buildaddr
, i
, buf
);
1342 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1343 append_insns (&buildaddr
, i
, buf
);
1346 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1347 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1349 append_insns (&buildaddr
, i
, buf
);
1351 buf
[0] = 0xe8; /* call <reladdr> */
1352 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1353 memcpy (buf
+ 1, &offset
, 4);
1354 append_insns (&buildaddr
, 5, buf
);
1355 /* Clean up after the call. */
1356 buf
[0] = 0x83; /* add $0x8,%esp */
1359 append_insns (&buildaddr
, 3, buf
);
1362 /* Clear the spin-lock. This would need the LOCK prefix on older
1365 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1366 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1367 memcpy (buf
+ i
, &lockaddr
, 4);
1369 append_insns (&buildaddr
, i
, buf
);
1372 /* Remove stack that had been used for the collect_t object. */
1374 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1375 append_insns (&buildaddr
, i
, buf
);
1378 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1381 buf
[i
++] = 0x17; /* pop %ss */
1382 buf
[i
++] = 0x0f; /* pop %gs */
1384 buf
[i
++] = 0x0f; /* pop %fs */
1386 buf
[i
++] = 0x07; /* pop %es */
1387 buf
[i
++] = 0x1f; /* pop %ds */
1388 buf
[i
++] = 0x9d; /* popf */
1389 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1392 buf
[i
++] = 0x61; /* popad */
1393 append_insns (&buildaddr
, i
, buf
);
1395 /* Now, adjust the original instruction to execute in the jump
1397 *adjusted_insn_addr
= buildaddr
;
1398 relocate_instruction (&buildaddr
, tpaddr
);
1399 *adjusted_insn_addr_end
= buildaddr
;
1401 /* Write the jump back to the program. */
1402 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1403 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1404 memcpy (buf
+ 1, &offset
, 4);
1405 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1407 /* The jump pad is now built. Wire in a jump to our jump pad. This
1408 is always done last (by our caller actually), so that we can
1409 install fast tracepoints with threads running. This relies on
1410 the agent's atomic write support. */
1413 /* Create a trampoline. */
1414 *trampoline_size
= sizeof (jump_insn
);
1415 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1417 /* No trampoline space available. */
1419 "E.Cannot allocate trampoline space needed for fast "
1420 "tracepoints on 4-byte instructions.");
1424 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1425 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1426 memcpy (buf
+ 1, &offset
, 4);
1427 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1429 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1430 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1431 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1432 memcpy (buf
+ 2, &offset
, 2);
1433 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1434 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1438 /* Else use a 32-bit relative jump instruction. */
1439 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1440 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1441 memcpy (buf
+ 1, &offset
, 4);
1442 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1443 *jjump_pad_insn_size
= sizeof (jump_insn
);
1446 /* Return the end address of our pad. */
1447 *jump_entry
= buildaddr
;
1453 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1454 CORE_ADDR collector
,
1457 CORE_ADDR
*jump_entry
,
1458 CORE_ADDR
*trampoline
,
1459 ULONGEST
*trampoline_size
,
1460 unsigned char *jjump_pad_insn
,
1461 ULONGEST
*jjump_pad_insn_size
,
1462 CORE_ADDR
*adjusted_insn_addr
,
1463 CORE_ADDR
*adjusted_insn_addr_end
,
1467 if (is_64bit_tdesc ())
1468 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1469 collector
, lockaddr
,
1470 orig_size
, jump_entry
,
1471 trampoline
, trampoline_size
,
1473 jjump_pad_insn_size
,
1475 adjusted_insn_addr_end
,
1479 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1480 collector
, lockaddr
,
1481 orig_size
, jump_entry
,
1482 trampoline
, trampoline_size
,
1484 jjump_pad_insn_size
,
1486 adjusted_insn_addr_end
,
1490 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1494 x86_get_min_fast_tracepoint_insn_len (void)
1496 static int warned_about_fast_tracepoints
= 0;
1499 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1500 used for fast tracepoints. */
1501 if (is_64bit_tdesc ())
1505 if (agent_loaded_p ())
1507 char errbuf
[IPA_BUFSIZ
];
1511 /* On x86, if trampolines are available, then 4-byte jump instructions
1512 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1513 with a 4-byte offset are used instead. */
1514 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1518 /* GDB has no channel to explain to user why a shorter fast
1519 tracepoint is not possible, but at least make GDBserver
1520 mention that something has gone awry. */
1521 if (!warned_about_fast_tracepoints
)
1523 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
1524 warned_about_fast_tracepoints
= 1;
1531 /* Indicate that the minimum length is currently unknown since the IPA
1532 has not loaded yet. */
1538 add_insns (unsigned char *start
, int len
)
1540 CORE_ADDR buildaddr
= current_insn_ptr
;
1543 debug_printf ("Adding %d bytes of insn at %s\n",
1544 len
, paddress (buildaddr
));
1546 append_insns (&buildaddr
, len
, start
);
1547 current_insn_ptr
= buildaddr
;
1550 /* Our general strategy for emitting code is to avoid specifying raw
1551 bytes whenever possible, and instead copy a block of inline asm
1552 that is embedded in the function. This is a little messy, because
1553 we need to keep the compiler from discarding what looks like dead
1554 code, plus suppress various warnings. */
1556 #define EMIT_ASM(NAME, INSNS) \
1559 extern unsigned char start_ ## NAME, end_ ## NAME; \
1560 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1561 __asm__ ("jmp end_" #NAME "\n" \
1562 "\t" "start_" #NAME ":" \
1564 "\t" "end_" #NAME ":"); \
1569 #define EMIT_ASM32(NAME,INSNS) \
1572 extern unsigned char start_ ## NAME, end_ ## NAME; \
1573 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1574 __asm__ (".code32\n" \
1575 "\t" "jmp end_" #NAME "\n" \
1576 "\t" "start_" #NAME ":\n" \
1578 "\t" "end_" #NAME ":\n" \
1584 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1591 amd64_emit_prologue (void)
1593 EMIT_ASM (amd64_prologue
,
1595 "movq %rsp,%rbp\n\t"
1596 "sub $0x20,%rsp\n\t"
1597 "movq %rdi,-8(%rbp)\n\t"
1598 "movq %rsi,-16(%rbp)");
1603 amd64_emit_epilogue (void)
1605 EMIT_ASM (amd64_epilogue
,
1606 "movq -16(%rbp),%rdi\n\t"
1607 "movq %rax,(%rdi)\n\t"
1614 amd64_emit_add (void)
1616 EMIT_ASM (amd64_add
,
1617 "add (%rsp),%rax\n\t"
1618 "lea 0x8(%rsp),%rsp");
1622 amd64_emit_sub (void)
1624 EMIT_ASM (amd64_sub
,
1625 "sub %rax,(%rsp)\n\t"
1630 amd64_emit_mul (void)
1636 amd64_emit_lsh (void)
1642 amd64_emit_rsh_signed (void)
1648 amd64_emit_rsh_unsigned (void)
1654 amd64_emit_ext (int arg
)
1659 EMIT_ASM (amd64_ext_8
,
1665 EMIT_ASM (amd64_ext_16
,
1670 EMIT_ASM (amd64_ext_32
,
1679 amd64_emit_log_not (void)
1681 EMIT_ASM (amd64_log_not
,
1682 "test %rax,%rax\n\t"
1688 amd64_emit_bit_and (void)
1690 EMIT_ASM (amd64_and
,
1691 "and (%rsp),%rax\n\t"
1692 "lea 0x8(%rsp),%rsp");
1696 amd64_emit_bit_or (void)
1699 "or (%rsp),%rax\n\t"
1700 "lea 0x8(%rsp),%rsp");
1704 amd64_emit_bit_xor (void)
1706 EMIT_ASM (amd64_xor
,
1707 "xor (%rsp),%rax\n\t"
1708 "lea 0x8(%rsp),%rsp");
1712 amd64_emit_bit_not (void)
1714 EMIT_ASM (amd64_bit_not
,
1715 "xorq $0xffffffffffffffff,%rax");
1719 amd64_emit_equal (void)
1721 EMIT_ASM (amd64_equal
,
1722 "cmp %rax,(%rsp)\n\t"
1723 "je .Lamd64_equal_true\n\t"
1725 "jmp .Lamd64_equal_end\n\t"
1726 ".Lamd64_equal_true:\n\t"
1728 ".Lamd64_equal_end:\n\t"
1729 "lea 0x8(%rsp),%rsp");
1733 amd64_emit_less_signed (void)
1735 EMIT_ASM (amd64_less_signed
,
1736 "cmp %rax,(%rsp)\n\t"
1737 "jl .Lamd64_less_signed_true\n\t"
1739 "jmp .Lamd64_less_signed_end\n\t"
1740 ".Lamd64_less_signed_true:\n\t"
1742 ".Lamd64_less_signed_end:\n\t"
1743 "lea 0x8(%rsp),%rsp");
1747 amd64_emit_less_unsigned (void)
1749 EMIT_ASM (amd64_less_unsigned
,
1750 "cmp %rax,(%rsp)\n\t"
1751 "jb .Lamd64_less_unsigned_true\n\t"
1753 "jmp .Lamd64_less_unsigned_end\n\t"
1754 ".Lamd64_less_unsigned_true:\n\t"
1756 ".Lamd64_less_unsigned_end:\n\t"
1757 "lea 0x8(%rsp),%rsp");
1761 amd64_emit_ref (int size
)
1766 EMIT_ASM (amd64_ref1
,
1770 EMIT_ASM (amd64_ref2
,
1774 EMIT_ASM (amd64_ref4
,
1775 "movl (%rax),%eax");
1778 EMIT_ASM (amd64_ref8
,
1779 "movq (%rax),%rax");
1785 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1787 EMIT_ASM (amd64_if_goto
,
1791 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1799 amd64_emit_goto (int *offset_p
, int *size_p
)
1801 EMIT_ASM (amd64_goto
,
1802 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1810 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1812 int diff
= (to
- (from
+ size
));
1813 unsigned char buf
[sizeof (int)];
1821 memcpy (buf
, &diff
, sizeof (int));
1822 write_inferior_memory (from
, buf
, sizeof (int));
1826 amd64_emit_const (LONGEST num
)
1828 unsigned char buf
[16];
1830 CORE_ADDR buildaddr
= current_insn_ptr
;
1833 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1834 memcpy (&buf
[i
], &num
, sizeof (num
));
1836 append_insns (&buildaddr
, i
, buf
);
1837 current_insn_ptr
= buildaddr
;
1841 amd64_emit_call (CORE_ADDR fn
)
1843 unsigned char buf
[16];
1845 CORE_ADDR buildaddr
;
1848 /* The destination function being in the shared library, may be
1849 >31-bits away off the compiled code pad. */
1851 buildaddr
= current_insn_ptr
;
1853 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1857 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1859 /* Offset is too large for a call. Use callq, but that requires
1860 a register, so avoid it if possible. Use r10, since it is
1861 call-clobbered, we don't have to push/pop it. */
1862 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1864 memcpy (buf
+ i
, &fn
, 8);
1866 buf
[i
++] = 0xff; /* callq *%r10 */
1871 int offset32
= offset64
; /* we know we can't overflow here. */
1873 buf
[i
++] = 0xe8; /* call <reladdr> */
1874 memcpy (buf
+ i
, &offset32
, 4);
1878 append_insns (&buildaddr
, i
, buf
);
1879 current_insn_ptr
= buildaddr
;
1883 amd64_emit_reg (int reg
)
1885 unsigned char buf
[16];
1887 CORE_ADDR buildaddr
;
1889 /* Assume raw_regs is still in %rdi. */
1890 buildaddr
= current_insn_ptr
;
1892 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1893 memcpy (&buf
[i
], ®
, sizeof (reg
));
1895 append_insns (&buildaddr
, i
, buf
);
1896 current_insn_ptr
= buildaddr
;
1897 amd64_emit_call (get_raw_reg_func_addr ());
1901 amd64_emit_pop (void)
1903 EMIT_ASM (amd64_pop
,
1908 amd64_emit_stack_flush (void)
1910 EMIT_ASM (amd64_stack_flush
,
1915 amd64_emit_zero_ext (int arg
)
1920 EMIT_ASM (amd64_zero_ext_8
,
1924 EMIT_ASM (amd64_zero_ext_16
,
1925 "and $0xffff,%rax");
1928 EMIT_ASM (amd64_zero_ext_32
,
1929 "mov $0xffffffff,%rcx\n\t"
1938 amd64_emit_swap (void)
1940 EMIT_ASM (amd64_swap
,
1947 amd64_emit_stack_adjust (int n
)
1949 unsigned char buf
[16];
1951 CORE_ADDR buildaddr
= current_insn_ptr
;
1954 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
1958 /* This only handles adjustments up to 16, but we don't expect any more. */
1960 append_insns (&buildaddr
, i
, buf
);
1961 current_insn_ptr
= buildaddr
;
1964 /* FN's prototype is `LONGEST(*fn)(int)'. */
1967 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
1969 unsigned char buf
[16];
1971 CORE_ADDR buildaddr
;
1973 buildaddr
= current_insn_ptr
;
1975 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1976 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
1978 append_insns (&buildaddr
, i
, buf
);
1979 current_insn_ptr
= buildaddr
;
1980 amd64_emit_call (fn
);
1983 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1986 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
1988 unsigned char buf
[16];
1990 CORE_ADDR buildaddr
;
1992 buildaddr
= current_insn_ptr
;
1994 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1995 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
1997 append_insns (&buildaddr
, i
, buf
);
1998 current_insn_ptr
= buildaddr
;
1999 EMIT_ASM (amd64_void_call_2_a
,
2000 /* Save away a copy of the stack top. */
2002 /* Also pass top as the second argument. */
2004 amd64_emit_call (fn
);
2005 EMIT_ASM (amd64_void_call_2_b
,
2006 /* Restore the stack top, %rax may have been trashed. */
2011 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2014 "cmp %rax,(%rsp)\n\t"
2015 "jne .Lamd64_eq_fallthru\n\t"
2016 "lea 0x8(%rsp),%rsp\n\t"
2018 /* jmp, but don't trust the assembler to choose the right jump */
2019 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2020 ".Lamd64_eq_fallthru:\n\t"
2021 "lea 0x8(%rsp),%rsp\n\t"
2031 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2034 "cmp %rax,(%rsp)\n\t"
2035 "je .Lamd64_ne_fallthru\n\t"
2036 "lea 0x8(%rsp),%rsp\n\t"
2038 /* jmp, but don't trust the assembler to choose the right jump */
2039 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2040 ".Lamd64_ne_fallthru:\n\t"
2041 "lea 0x8(%rsp),%rsp\n\t"
2051 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2054 "cmp %rax,(%rsp)\n\t"
2055 "jnl .Lamd64_lt_fallthru\n\t"
2056 "lea 0x8(%rsp),%rsp\n\t"
2058 /* jmp, but don't trust the assembler to choose the right jump */
2059 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2060 ".Lamd64_lt_fallthru:\n\t"
2061 "lea 0x8(%rsp),%rsp\n\t"
2071 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2074 "cmp %rax,(%rsp)\n\t"
2075 "jnle .Lamd64_le_fallthru\n\t"
2076 "lea 0x8(%rsp),%rsp\n\t"
2078 /* jmp, but don't trust the assembler to choose the right jump */
2079 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2080 ".Lamd64_le_fallthru:\n\t"
2081 "lea 0x8(%rsp),%rsp\n\t"
2091 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2094 "cmp %rax,(%rsp)\n\t"
2095 "jng .Lamd64_gt_fallthru\n\t"
2096 "lea 0x8(%rsp),%rsp\n\t"
2098 /* jmp, but don't trust the assembler to choose the right jump */
2099 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2100 ".Lamd64_gt_fallthru:\n\t"
2101 "lea 0x8(%rsp),%rsp\n\t"
2111 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2114 "cmp %rax,(%rsp)\n\t"
2115 "jnge .Lamd64_ge_fallthru\n\t"
2116 ".Lamd64_ge_jump:\n\t"
2117 "lea 0x8(%rsp),%rsp\n\t"
2119 /* jmp, but don't trust the assembler to choose the right jump */
2120 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2121 ".Lamd64_ge_fallthru:\n\t"
2122 "lea 0x8(%rsp),%rsp\n\t"
2131 struct emit_ops amd64_emit_ops
=
2133 amd64_emit_prologue
,
2134 amd64_emit_epilogue
,
2139 amd64_emit_rsh_signed
,
2140 amd64_emit_rsh_unsigned
,
2148 amd64_emit_less_signed
,
2149 amd64_emit_less_unsigned
,
2153 amd64_write_goto_address
,
2158 amd64_emit_stack_flush
,
2159 amd64_emit_zero_ext
,
2161 amd64_emit_stack_adjust
,
2162 amd64_emit_int_call_1
,
2163 amd64_emit_void_call_2
,
2172 #endif /* __x86_64__ */
2175 i386_emit_prologue (void)
2177 EMIT_ASM32 (i386_prologue
,
2181 /* At this point, the raw regs base address is at 8(%ebp), and the
2182 value pointer is at 12(%ebp). */
2186 i386_emit_epilogue (void)
2188 EMIT_ASM32 (i386_epilogue
,
2189 "mov 12(%ebp),%ecx\n\t"
2190 "mov %eax,(%ecx)\n\t"
2191 "mov %ebx,0x4(%ecx)\n\t"
2199 i386_emit_add (void)
2201 EMIT_ASM32 (i386_add
,
2202 "add (%esp),%eax\n\t"
2203 "adc 0x4(%esp),%ebx\n\t"
2204 "lea 0x8(%esp),%esp");
2208 i386_emit_sub (void)
2210 EMIT_ASM32 (i386_sub
,
2211 "subl %eax,(%esp)\n\t"
2212 "sbbl %ebx,4(%esp)\n\t"
2218 i386_emit_mul (void)
2224 i386_emit_lsh (void)
2230 i386_emit_rsh_signed (void)
2236 i386_emit_rsh_unsigned (void)
2242 i386_emit_ext (int arg
)
2247 EMIT_ASM32 (i386_ext_8
,
2250 "movl %eax,%ebx\n\t"
2254 EMIT_ASM32 (i386_ext_16
,
2256 "movl %eax,%ebx\n\t"
2260 EMIT_ASM32 (i386_ext_32
,
2261 "movl %eax,%ebx\n\t"
2270 i386_emit_log_not (void)
2272 EMIT_ASM32 (i386_log_not
,
2274 "test %eax,%eax\n\t"
2281 i386_emit_bit_and (void)
2283 EMIT_ASM32 (i386_and
,
2284 "and (%esp),%eax\n\t"
2285 "and 0x4(%esp),%ebx\n\t"
2286 "lea 0x8(%esp),%esp");
2290 i386_emit_bit_or (void)
2292 EMIT_ASM32 (i386_or
,
2293 "or (%esp),%eax\n\t"
2294 "or 0x4(%esp),%ebx\n\t"
2295 "lea 0x8(%esp),%esp");
2299 i386_emit_bit_xor (void)
2301 EMIT_ASM32 (i386_xor
,
2302 "xor (%esp),%eax\n\t"
2303 "xor 0x4(%esp),%ebx\n\t"
2304 "lea 0x8(%esp),%esp");
2308 i386_emit_bit_not (void)
2310 EMIT_ASM32 (i386_bit_not
,
2311 "xor $0xffffffff,%eax\n\t"
2312 "xor $0xffffffff,%ebx\n\t");
2316 i386_emit_equal (void)
2318 EMIT_ASM32 (i386_equal
,
2319 "cmpl %ebx,4(%esp)\n\t"
2320 "jne .Li386_equal_false\n\t"
2321 "cmpl %eax,(%esp)\n\t"
2322 "je .Li386_equal_true\n\t"
2323 ".Li386_equal_false:\n\t"
2325 "jmp .Li386_equal_end\n\t"
2326 ".Li386_equal_true:\n\t"
2328 ".Li386_equal_end:\n\t"
2330 "lea 0x8(%esp),%esp");
2334 i386_emit_less_signed (void)
2336 EMIT_ASM32 (i386_less_signed
,
2337 "cmpl %ebx,4(%esp)\n\t"
2338 "jl .Li386_less_signed_true\n\t"
2339 "jne .Li386_less_signed_false\n\t"
2340 "cmpl %eax,(%esp)\n\t"
2341 "jl .Li386_less_signed_true\n\t"
2342 ".Li386_less_signed_false:\n\t"
2344 "jmp .Li386_less_signed_end\n\t"
2345 ".Li386_less_signed_true:\n\t"
2347 ".Li386_less_signed_end:\n\t"
2349 "lea 0x8(%esp),%esp");
2353 i386_emit_less_unsigned (void)
2355 EMIT_ASM32 (i386_less_unsigned
,
2356 "cmpl %ebx,4(%esp)\n\t"
2357 "jb .Li386_less_unsigned_true\n\t"
2358 "jne .Li386_less_unsigned_false\n\t"
2359 "cmpl %eax,(%esp)\n\t"
2360 "jb .Li386_less_unsigned_true\n\t"
2361 ".Li386_less_unsigned_false:\n\t"
2363 "jmp .Li386_less_unsigned_end\n\t"
2364 ".Li386_less_unsigned_true:\n\t"
2366 ".Li386_less_unsigned_end:\n\t"
2368 "lea 0x8(%esp),%esp");
2372 i386_emit_ref (int size
)
2377 EMIT_ASM32 (i386_ref1
,
2381 EMIT_ASM32 (i386_ref2
,
2385 EMIT_ASM32 (i386_ref4
,
2386 "movl (%eax),%eax");
2389 EMIT_ASM32 (i386_ref8
,
2390 "movl 4(%eax),%ebx\n\t"
2391 "movl (%eax),%eax");
2397 i386_emit_if_goto (int *offset_p
, int *size_p
)
2399 EMIT_ASM32 (i386_if_goto
,
2405 /* Don't trust the assembler to choose the right jump */
2406 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2409 *offset_p
= 11; /* be sure that this matches the sequence above */
2415 i386_emit_goto (int *offset_p
, int *size_p
)
2417 EMIT_ASM32 (i386_goto
,
2418 /* Don't trust the assembler to choose the right jump */
2419 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2427 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2429 int diff
= (to
- (from
+ size
));
2430 unsigned char buf
[sizeof (int)];
2432 /* We're only doing 4-byte sizes at the moment. */
2439 memcpy (buf
, &diff
, sizeof (int));
2440 write_inferior_memory (from
, buf
, sizeof (int));
2444 i386_emit_const (LONGEST num
)
2446 unsigned char buf
[16];
2448 CORE_ADDR buildaddr
= current_insn_ptr
;
2451 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2452 lo
= num
& 0xffffffff;
2453 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2455 hi
= ((num
>> 32) & 0xffffffff);
2458 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2459 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2464 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2466 append_insns (&buildaddr
, i
, buf
);
2467 current_insn_ptr
= buildaddr
;
2471 i386_emit_call (CORE_ADDR fn
)
2473 unsigned char buf
[16];
2475 CORE_ADDR buildaddr
;
2477 buildaddr
= current_insn_ptr
;
2479 buf
[i
++] = 0xe8; /* call <reladdr> */
2480 offset
= ((int) fn
) - (buildaddr
+ 5);
2481 memcpy (buf
+ 1, &offset
, 4);
2482 append_insns (&buildaddr
, 5, buf
);
2483 current_insn_ptr
= buildaddr
;
2487 i386_emit_reg (int reg
)
2489 unsigned char buf
[16];
2491 CORE_ADDR buildaddr
;
2493 EMIT_ASM32 (i386_reg_a
,
2495 buildaddr
= current_insn_ptr
;
2497 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2498 memcpy (&buf
[i
], ®
, sizeof (reg
));
2500 append_insns (&buildaddr
, i
, buf
);
2501 current_insn_ptr
= buildaddr
;
2502 EMIT_ASM32 (i386_reg_b
,
2503 "mov %eax,4(%esp)\n\t"
2504 "mov 8(%ebp),%eax\n\t"
2506 i386_emit_call (get_raw_reg_func_addr ());
2507 EMIT_ASM32 (i386_reg_c
,
2509 "lea 0x8(%esp),%esp");
2513 i386_emit_pop (void)
2515 EMIT_ASM32 (i386_pop
,
2521 i386_emit_stack_flush (void)
2523 EMIT_ASM32 (i386_stack_flush
,
2529 i386_emit_zero_ext (int arg
)
2534 EMIT_ASM32 (i386_zero_ext_8
,
2535 "and $0xff,%eax\n\t"
2539 EMIT_ASM32 (i386_zero_ext_16
,
2540 "and $0xffff,%eax\n\t"
2544 EMIT_ASM32 (i386_zero_ext_32
,
2553 i386_emit_swap (void)
2555 EMIT_ASM32 (i386_swap
,
2565 i386_emit_stack_adjust (int n
)
2567 unsigned char buf
[16];
2569 CORE_ADDR buildaddr
= current_insn_ptr
;
2572 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2576 append_insns (&buildaddr
, i
, buf
);
2577 current_insn_ptr
= buildaddr
;
2580 /* FN's prototype is `LONGEST(*fn)(int)'. */
2583 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2585 unsigned char buf
[16];
2587 CORE_ADDR buildaddr
;
2589 EMIT_ASM32 (i386_int_call_1_a
,
2590 /* Reserve a bit of stack space. */
2592 /* Put the one argument on the stack. */
2593 buildaddr
= current_insn_ptr
;
2595 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2598 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2600 append_insns (&buildaddr
, i
, buf
);
2601 current_insn_ptr
= buildaddr
;
2602 i386_emit_call (fn
);
2603 EMIT_ASM32 (i386_int_call_1_c
,
2605 "lea 0x8(%esp),%esp");
2608 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2611 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2613 unsigned char buf
[16];
2615 CORE_ADDR buildaddr
;
2617 EMIT_ASM32 (i386_void_call_2_a
,
2618 /* Preserve %eax only; we don't have to worry about %ebx. */
2620 /* Reserve a bit of stack space for arguments. */
2621 "sub $0x10,%esp\n\t"
2622 /* Copy "top" to the second argument position. (Note that
2623 we can't assume function won't scribble on its
2624 arguments, so don't try to restore from this.) */
2625 "mov %eax,4(%esp)\n\t"
2626 "mov %ebx,8(%esp)");
2627 /* Put the first argument on the stack. */
2628 buildaddr
= current_insn_ptr
;
2630 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2633 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2635 append_insns (&buildaddr
, i
, buf
);
2636 current_insn_ptr
= buildaddr
;
2637 i386_emit_call (fn
);
2638 EMIT_ASM32 (i386_void_call_2_b
,
2639 "lea 0x10(%esp),%esp\n\t"
2640 /* Restore original stack top. */
2646 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2649 /* Check low half first, more likely to be decider */
2650 "cmpl %eax,(%esp)\n\t"
2651 "jne .Leq_fallthru\n\t"
2652 "cmpl %ebx,4(%esp)\n\t"
2653 "jne .Leq_fallthru\n\t"
2654 "lea 0x8(%esp),%esp\n\t"
2657 /* jmp, but don't trust the assembler to choose the right jump */
2658 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2659 ".Leq_fallthru:\n\t"
2660 "lea 0x8(%esp),%esp\n\t"
2671 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2674 /* Check low half first, more likely to be decider */
2675 "cmpl %eax,(%esp)\n\t"
2677 "cmpl %ebx,4(%esp)\n\t"
2678 "je .Lne_fallthru\n\t"
2680 "lea 0x8(%esp),%esp\n\t"
2683 /* jmp, but don't trust the assembler to choose the right jump */
2684 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2685 ".Lne_fallthru:\n\t"
2686 "lea 0x8(%esp),%esp\n\t"
2697 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2700 "cmpl %ebx,4(%esp)\n\t"
2702 "jne .Llt_fallthru\n\t"
2703 "cmpl %eax,(%esp)\n\t"
2704 "jnl .Llt_fallthru\n\t"
2706 "lea 0x8(%esp),%esp\n\t"
2709 /* jmp, but don't trust the assembler to choose the right jump */
2710 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2711 ".Llt_fallthru:\n\t"
2712 "lea 0x8(%esp),%esp\n\t"
2723 i386_emit_le_goto (int *offset_p
, int *size_p
)
2726 "cmpl %ebx,4(%esp)\n\t"
2728 "jne .Lle_fallthru\n\t"
2729 "cmpl %eax,(%esp)\n\t"
2730 "jnle .Lle_fallthru\n\t"
2732 "lea 0x8(%esp),%esp\n\t"
2735 /* jmp, but don't trust the assembler to choose the right jump */
2736 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2737 ".Lle_fallthru:\n\t"
2738 "lea 0x8(%esp),%esp\n\t"
2749 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2752 "cmpl %ebx,4(%esp)\n\t"
2754 "jne .Lgt_fallthru\n\t"
2755 "cmpl %eax,(%esp)\n\t"
2756 "jng .Lgt_fallthru\n\t"
2758 "lea 0x8(%esp),%esp\n\t"
2761 /* jmp, but don't trust the assembler to choose the right jump */
2762 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2763 ".Lgt_fallthru:\n\t"
2764 "lea 0x8(%esp),%esp\n\t"
2775 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2778 "cmpl %ebx,4(%esp)\n\t"
2780 "jne .Lge_fallthru\n\t"
2781 "cmpl %eax,(%esp)\n\t"
2782 "jnge .Lge_fallthru\n\t"
2784 "lea 0x8(%esp),%esp\n\t"
2787 /* jmp, but don't trust the assembler to choose the right jump */
2788 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2789 ".Lge_fallthru:\n\t"
2790 "lea 0x8(%esp),%esp\n\t"
2800 struct emit_ops i386_emit_ops
=
2808 i386_emit_rsh_signed
,
2809 i386_emit_rsh_unsigned
,
2817 i386_emit_less_signed
,
2818 i386_emit_less_unsigned
,
2822 i386_write_goto_address
,
2827 i386_emit_stack_flush
,
2830 i386_emit_stack_adjust
,
2831 i386_emit_int_call_1
,
2832 i386_emit_void_call_2
,
2842 static struct emit_ops
*
2846 if (is_64bit_tdesc ())
2847 return &amd64_emit_ops
;
2850 return &i386_emit_ops
;
2853 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2855 static const gdb_byte
*
2856 x86_sw_breakpoint_from_kind (int kind
, int *size
)
2858 *size
= x86_breakpoint_len
;
2859 return x86_breakpoint
;
2863 x86_supports_range_stepping (void)
2868 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2872 x86_supports_hardware_single_step (void)
2878 x86_get_ipa_tdesc_idx (void)
2880 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
2881 const struct target_desc
*tdesc
= regcache
->tdesc
;
2884 if (tdesc
== tdesc_amd64_linux
|| tdesc
== tdesc_amd64_linux_no_xml
2885 || tdesc
== tdesc_x32_linux
)
2886 return X86_TDESC_SSE
;
2887 if (tdesc
== tdesc_amd64_avx_linux
|| tdesc
== tdesc_x32_avx_linux
)
2888 return X86_TDESC_AVX
;
2889 if (tdesc
== tdesc_amd64_mpx_linux
)
2890 return X86_TDESC_MPX
;
2891 if (tdesc
== tdesc_amd64_avx_mpx_linux
)
2892 return X86_TDESC_AVX_MPX
;
2893 if (tdesc
== tdesc_amd64_avx_mpx_avx512_pku_linux
|| tdesc
== tdesc_x32_avx_avx512_linux
)
2894 return X86_TDESC_AVX_MPX_AVX512_PKU
;
2895 if (tdesc
== tdesc_amd64_avx_avx512_linux
)
2896 return X86_TDESC_AVX_AVX512
;
2899 if (tdesc
== tdesc_i386_linux_no_xml
)
2900 return X86_TDESC_SSE
;
2902 return i386_get_ipa_tdesc_idx (tdesc
);
2905 /* This is initialized assuming an amd64 target.
2906 x86_arch_setup will correct it for i386 or amd64 targets. */
2908 struct linux_target_ops the_low_target
=
2911 x86_linux_regs_info
,
2912 x86_cannot_fetch_register
,
2913 x86_cannot_store_register
,
2914 NULL
, /* fetch_register */
2917 NULL
, /* breakpoint_kind_from_pc */
2918 x86_sw_breakpoint_from_kind
,
2922 x86_supports_z_point_type
,
2925 x86_stopped_by_watchpoint
,
2926 x86_stopped_data_address
,
2927 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2928 native i386 case (no registers smaller than an xfer unit), and are not
2929 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2932 /* need to fix up i386 siginfo if host is amd64 */
2934 x86_linux_new_process
,
2935 x86_linux_new_thread
,
2937 x86_linux_prepare_to_resume
,
2938 x86_linux_process_qsupported
,
2939 x86_supports_tracepoints
,
2940 x86_get_thread_area
,
2941 x86_install_fast_tracepoint_jump_pad
,
2943 x86_get_min_fast_tracepoint_insn_len
,
2944 x86_supports_range_stepping
,
2945 NULL
, /* breakpoint_kind_from_current_state */
2946 x86_supports_hardware_single_step
,
2947 x86_get_syscall_trapinfo
,
2948 x86_get_ipa_tdesc_idx
,
2952 initialize_low_arch (void)
2954 /* Initialize the Linux target descriptions. */
2956 init_registers_amd64_linux ();
2957 init_registers_amd64_avx_linux ();
2958 init_registers_amd64_mpx_linux ();
2959 init_registers_amd64_avx_mpx_linux ();
2960 init_registers_amd64_avx_avx512_linux ();
2961 init_registers_amd64_avx_mpx_avx512_pku_linux ();
2963 init_registers_x32_linux ();
2964 init_registers_x32_avx_linux ();
2965 init_registers_x32_avx_avx512_linux ();
2967 tdesc_amd64_linux_no_xml
= XNEW (struct target_desc
);
2968 copy_target_description (tdesc_amd64_linux_no_xml
, tdesc_amd64_linux
);
2969 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
2973 initialize_low_tdesc ();
2976 tdesc_i386_linux_no_xml
= XNEW (struct target_desc
);
2977 copy_target_description (tdesc_i386_linux_no_xml
,
2978 i386_linux_read_description (X86_XSTATE_SSE_MASK
));
2979 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
2981 initialize_regsets_info (&x86_regsets_info
);