1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
31 #include "nat/amd64-linux-siginfo.h"
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
38 #include "elf/common.h"
41 #include "gdbsupport/agent.h"
43 #include "tracepoint.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
51 static struct target_desc
*tdesc_amd64_linux_no_xml
;
53 static struct target_desc
*tdesc_i386_linux_no_xml
;
56 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
59 /* Backward compatibility for gdb without XML support. */
61 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
67 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
74 #include <sys/procfs.h>
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
99 class x86_target
: public linux_process_target
103 /* Update all the target description of all processes; a new GDB
104 connected, and it may or not support xml target descriptions. */
105 void update_xmltarget ();
107 const regs_info
*get_regs_info () override
;
109 const gdb_byte
*sw_breakpoint_from_kind (int kind
, int *size
) override
;
111 bool supports_z_point_type (char z_type
) override
;
115 void low_arch_setup () override
;
117 bool low_cannot_fetch_register (int regno
) override
;
119 bool low_cannot_store_register (int regno
) override
;
121 bool low_supports_breakpoints () override
;
123 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
125 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
127 int low_decr_pc_after_break () override
;
129 bool low_breakpoint_at (CORE_ADDR pc
) override
;
131 int low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
132 int size
, raw_breakpoint
*bp
) override
;
134 int low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
135 int size
, raw_breakpoint
*bp
) override
;
138 /* The singleton target ops object. */
140 static x86_target the_x86_target
;
142 /* Per-process arch-specific data we want to keep. */
144 struct arch_process_info
146 struct x86_debug_reg_state debug_reg_state
;
151 /* Mapping between the general-purpose registers in `struct user'
152 format and GDB's register array layout.
153 Note that the transfer layout uses 64-bit regs. */
154 static /*const*/ int i386_regmap
[] =
156 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
157 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
158 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
159 DS
* 8, ES
* 8, FS
* 8, GS
* 8
162 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
164 /* So code below doesn't have to care, i386 or amd64. */
165 #define ORIG_EAX ORIG_RAX
168 static const int x86_64_regmap
[] =
170 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
171 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
172 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
173 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
174 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
175 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
176 -1, -1, -1, -1, -1, -1, -1, -1,
177 -1, -1, -1, -1, -1, -1, -1, -1,
178 -1, -1, -1, -1, -1, -1, -1, -1,
180 -1, -1, -1, -1, -1, -1, -1, -1,
182 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
187 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
188 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
189 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
190 -1, -1, -1, -1, -1, -1, -1, -1,
191 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
192 -1, -1, -1, -1, -1, -1, -1, -1,
193 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
194 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
195 -1, -1, -1, -1, -1, -1, -1, -1,
196 -1, -1, -1, -1, -1, -1, -1, -1,
197 -1, -1, -1, -1, -1, -1, -1, -1,
201 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
202 #define X86_64_USER_REGS (GS + 1)
204 #else /* ! __x86_64__ */
206 /* Mapping between the general-purpose registers in `struct user'
207 format and GDB's register array layout. */
208 static /*const*/ int i386_regmap
[] =
210 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
211 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
212 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
213 DS
* 4, ES
* 4, FS
* 4, GS
* 4
216 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
224 /* Returns true if the current inferior belongs to a x86-64 process,
228 is_64bit_tdesc (void)
230 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
232 return register_size (regcache
->tdesc
, 0) == 8;
238 /* Called by libthread_db. */
241 ps_get_thread_area (struct ps_prochandle
*ph
,
242 lwpid_t lwpid
, int idx
, void **base
)
245 int use_64bit
= is_64bit_tdesc ();
252 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
256 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
267 unsigned int desc
[4];
269 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
270 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
273 /* Ensure we properly extend the value to 64-bits for x86_64. */
274 *base
= (void *) (uintptr_t) desc
[1];
279 /* Get the thread area address. This is used to recognize which
280 thread is which when tracing with the in-process agent library. We
281 don't read anything from the address, and treat it as opaque; it's
282 the address itself that we assume is unique per-thread. */
285 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
288 int use_64bit
= is_64bit_tdesc ();
293 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
295 *addr
= (CORE_ADDR
) (uintptr_t) base
;
304 struct lwp_info
*lwp
= find_lwp_pid (ptid_t (lwpid
));
305 struct thread_info
*thr
= get_lwp_thread (lwp
);
306 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
307 unsigned int desc
[4];
309 const int reg_thread_area
= 3; /* bits to scale down register value. */
312 collect_register_by_name (regcache
, "gs", &gs
);
314 idx
= gs
>> reg_thread_area
;
316 if (ptrace (PTRACE_GET_THREAD_AREA
,
318 (void *) (long) idx
, (unsigned long) &desc
) < 0)
329 x86_target::low_cannot_store_register (int regno
)
332 if (is_64bit_tdesc ())
336 return regno
>= I386_NUM_REGS
;
340 x86_target::low_cannot_fetch_register (int regno
)
343 if (is_64bit_tdesc ())
347 return regno
>= I386_NUM_REGS
;
351 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
356 if (register_size (regcache
->tdesc
, 0) == 8)
358 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
359 if (x86_64_regmap
[i
] != -1)
360 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
362 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
365 int lwpid
= lwpid_of (current_thread
);
367 collect_register_by_name (regcache
, "fs_base", &base
);
368 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_FS
);
370 collect_register_by_name (regcache
, "gs_base", &base
);
371 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_GS
);
378 /* 32-bit inferior registers need to be zero-extended.
379 Callers would read uninitialized memory otherwise. */
380 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
383 for (i
= 0; i
< I386_NUM_REGS
; i
++)
384 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
386 collect_register_by_name (regcache
, "orig_eax",
387 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
390 /* Sign extend EAX value to avoid potential syscall restart
393 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
394 for a detailed explanation. */
395 if (register_size (regcache
->tdesc
, 0) == 4)
397 void *ptr
= ((gdb_byte
*) buf
398 + i386_regmap
[find_regno (regcache
->tdesc
, "eax")]);
400 *(int64_t *) ptr
= *(int32_t *) ptr
;
406 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
411 if (register_size (regcache
->tdesc
, 0) == 8)
413 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
414 if (x86_64_regmap
[i
] != -1)
415 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
417 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
420 int lwpid
= lwpid_of (current_thread
);
422 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
423 supply_register_by_name (regcache
, "fs_base", &base
);
425 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_GS
) == 0)
426 supply_register_by_name (regcache
, "gs_base", &base
);
433 for (i
= 0; i
< I386_NUM_REGS
; i
++)
434 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
436 supply_register_by_name (regcache
, "orig_eax",
437 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
441 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
444 i387_cache_to_fxsave (regcache
, buf
);
446 i387_cache_to_fsave (regcache
, buf
);
451 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
454 i387_fxsave_to_cache (regcache
, buf
);
456 i387_fsave_to_cache (regcache
, buf
);
463 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
465 i387_cache_to_fxsave (regcache
, buf
);
469 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
471 i387_fxsave_to_cache (regcache
, buf
);
477 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
479 i387_cache_to_xsave (regcache
, buf
);
483 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
485 i387_xsave_to_cache (regcache
, buf
);
488 /* ??? The non-biarch i386 case stores all the i387 regs twice.
489 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
490 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
491 doesn't work. IWBN to avoid the duplication in the case where it
492 does work. Maybe the arch_setup routine could check whether it works
493 and update the supported regsets accordingly. */
495 static struct regset_info x86_regsets
[] =
497 #ifdef HAVE_PTRACE_GETREGS
498 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
500 x86_fill_gregset
, x86_store_gregset
},
501 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
502 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
504 # ifdef HAVE_PTRACE_GETFPXREGS
505 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
507 x86_fill_fpxregset
, x86_store_fpxregset
},
510 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
512 x86_fill_fpregset
, x86_store_fpregset
},
513 #endif /* HAVE_PTRACE_GETREGS */
518 x86_target::low_supports_breakpoints ()
524 x86_target::low_get_pc (regcache
*regcache
)
526 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
532 collect_register_by_name (regcache
, "rip", &pc
);
533 return (CORE_ADDR
) pc
;
539 collect_register_by_name (regcache
, "eip", &pc
);
540 return (CORE_ADDR
) pc
;
545 x86_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
547 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
553 supply_register_by_name (regcache
, "rip", &newpc
);
559 supply_register_by_name (regcache
, "eip", &newpc
);
564 x86_target::low_decr_pc_after_break ()
570 static const gdb_byte x86_breakpoint
[] = { 0xCC };
571 #define x86_breakpoint_len 1
574 x86_target::low_breakpoint_at (CORE_ADDR pc
)
578 read_memory (pc
, &c
, 1);
585 /* Low-level function vector. */
586 struct x86_dr_low_type x86_dr_low
=
588 x86_linux_dr_set_control
,
589 x86_linux_dr_set_addr
,
590 x86_linux_dr_get_addr
,
591 x86_linux_dr_get_status
,
592 x86_linux_dr_get_control
,
596 /* Breakpoint/Watchpoint support. */
599 x86_target::supports_z_point_type (char z_type
)
605 case Z_PACKET_WRITE_WP
:
606 case Z_PACKET_ACCESS_WP
:
614 x86_target::low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
615 int size
, raw_breakpoint
*bp
)
617 struct process_info
*proc
= current_process ();
621 case raw_bkpt_type_hw
:
622 case raw_bkpt_type_write_wp
:
623 case raw_bkpt_type_access_wp
:
625 enum target_hw_bp_type hw_type
626 = raw_bkpt_type_to_target_hw_bp_type (type
);
627 struct x86_debug_reg_state
*state
628 = &proc
->priv
->arch_private
->debug_reg_state
;
630 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
640 x86_target::low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
641 int size
, raw_breakpoint
*bp
)
643 struct process_info
*proc
= current_process ();
647 case raw_bkpt_type_hw
:
648 case raw_bkpt_type_write_wp
:
649 case raw_bkpt_type_access_wp
:
651 enum target_hw_bp_type hw_type
652 = raw_bkpt_type_to_target_hw_bp_type (type
);
653 struct x86_debug_reg_state
*state
654 = &proc
->priv
->arch_private
->debug_reg_state
;
656 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
665 x86_stopped_by_watchpoint (void)
667 struct process_info
*proc
= current_process ();
668 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
672 x86_stopped_data_address (void)
674 struct process_info
*proc
= current_process ();
676 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
682 /* Called when a new process is created. */
684 static struct arch_process_info
*
685 x86_linux_new_process (void)
687 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
689 x86_low_init_dregs (&info
->debug_reg_state
);
694 /* Called when a process is being deleted. */
697 x86_linux_delete_process (struct arch_process_info
*info
)
702 /* Target routine for linux_new_fork. */
705 x86_linux_new_fork (struct process_info
*parent
, struct process_info
*child
)
707 /* These are allocated by linux_add_process. */
708 gdb_assert (parent
->priv
!= NULL
709 && parent
->priv
->arch_private
!= NULL
);
710 gdb_assert (child
->priv
!= NULL
711 && child
->priv
->arch_private
!= NULL
);
713 /* Linux kernel before 2.6.33 commit
714 72f674d203cd230426437cdcf7dd6f681dad8b0d
715 will inherit hardware debug registers from parent
716 on fork/vfork/clone. Newer Linux kernels create such tasks with
717 zeroed debug registers.
719 GDB core assumes the child inherits the watchpoints/hw
720 breakpoints of the parent, and will remove them all from the
721 forked off process. Copy the debug registers mirrors into the
722 new process so that all breakpoints and watchpoints can be
723 removed together. The debug registers mirror will become zeroed
724 in the end before detaching the forked off process, thus making
725 this compatible with older Linux kernels too. */
727 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
730 /* See nat/x86-dregs.h. */
732 struct x86_debug_reg_state
*
733 x86_debug_reg_state (pid_t pid
)
735 struct process_info
*proc
= find_process_pid (pid
);
737 return &proc
->priv
->arch_private
->debug_reg_state
;
740 /* When GDBSERVER is built as a 64-bit application on linux, the
741 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
742 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
743 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
744 conversion in-place ourselves. */
746 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
747 layout of the inferiors' architecture. Returns true if any
748 conversion was done; false otherwise. If DIRECTION is 1, then copy
749 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
753 x86_siginfo_fixup (siginfo_t
*ptrace
, gdb_byte
*inf
, int direction
)
756 unsigned int machine
;
757 int tid
= lwpid_of (current_thread
);
758 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
760 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
761 if (!is_64bit_tdesc ())
762 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
764 /* No fixup for native x32 GDB. */
765 else if (!is_elf64
&& sizeof (void *) == 8)
766 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
775 /* Format of XSAVE extended state is:
779 sw_usable_bytes[464..511]
780 xstate_hdr_bytes[512..575]
785 Same memory layout will be used for the coredump NT_X86_XSTATE
786 representing the XSAVE extended state registers.
788 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
789 extended state mask, which is the same as the extended control register
790 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
791 together with the mask saved in the xstate_hdr_bytes to determine what
792 states the processor/OS supports and what state, used or initialized,
793 the process/thread is in. */
794 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
796 /* Does the current host support the GETFPXREGS request? The header
797 file may or may not define it, and even if it is defined, the
798 kernel will return EIO if it's running on a pre-SSE processor. */
799 int have_ptrace_getfpxregs
=
800 #ifdef HAVE_PTRACE_GETFPXREGS
807 /* Get Linux/x86 target description from running target. */
809 static const struct target_desc
*
810 x86_linux_read_description (void)
812 unsigned int machine
;
816 static uint64_t xcr0
;
817 struct regset_info
*regset
;
819 tid
= lwpid_of (current_thread
);
821 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
823 if (sizeof (void *) == 4)
826 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
828 else if (machine
== EM_X86_64
)
829 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
833 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
834 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
836 elf_fpxregset_t fpxregs
;
838 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
840 have_ptrace_getfpxregs
= 0;
841 have_ptrace_getregset
= 0;
842 return i386_linux_read_description (X86_XSTATE_X87
);
845 have_ptrace_getfpxregs
= 1;
851 x86_xcr0
= X86_XSTATE_SSE_MASK
;
855 if (machine
== EM_X86_64
)
856 return tdesc_amd64_linux_no_xml
;
859 return tdesc_i386_linux_no_xml
;
862 if (have_ptrace_getregset
== -1)
864 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
867 iov
.iov_base
= xstateregs
;
868 iov
.iov_len
= sizeof (xstateregs
);
870 /* Check if PTRACE_GETREGSET works. */
871 if (ptrace (PTRACE_GETREGSET
, tid
,
872 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
873 have_ptrace_getregset
= 0;
876 have_ptrace_getregset
= 1;
878 /* Get XCR0 from XSAVE extended state. */
879 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
880 / sizeof (uint64_t))];
882 /* Use PTRACE_GETREGSET if it is available. */
883 for (regset
= x86_regsets
;
884 regset
->fill_function
!= NULL
; regset
++)
885 if (regset
->get_request
== PTRACE_GETREGSET
)
886 regset
->size
= X86_XSTATE_SIZE (xcr0
);
887 else if (regset
->type
!= GENERAL_REGS
)
892 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
893 xcr0_features
= (have_ptrace_getregset
894 && (xcr0
& X86_XSTATE_ALL_MASK
));
899 if (machine
== EM_X86_64
)
902 const target_desc
*tdesc
= NULL
;
906 tdesc
= amd64_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
,
911 tdesc
= amd64_linux_read_description (X86_XSTATE_SSE_MASK
, !is_elf64
);
917 const target_desc
*tdesc
= NULL
;
920 tdesc
= i386_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
);
923 tdesc
= i386_linux_read_description (X86_XSTATE_SSE
);
928 gdb_assert_not_reached ("failed to return tdesc");
931 /* Update all the target description of all processes; a new GDB
932 connected, and it may or not support xml target descriptions. */
935 x86_target::update_xmltarget ()
937 struct thread_info
*saved_thread
= current_thread
;
939 /* Before changing the register cache's internal layout, flush the
940 contents of the current valid caches back to the threads, and
941 release the current regcache objects. */
944 for_each_process ([this] (process_info
*proc
) {
947 /* Look up any thread of this process. */
948 current_thread
= find_any_thread_of_pid (pid
);
953 current_thread
= saved_thread
;
956 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
960 x86_linux_process_qsupported (char **features
, int count
)
964 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
965 with "i386" in qSupported query, it supports x86 XML target
968 for (i
= 0; i
< count
; i
++)
970 const char *feature
= features
[i
];
972 if (startswith (feature
, "xmlRegisters="))
974 char *copy
= xstrdup (feature
+ 13);
977 for (char *p
= strtok_r (copy
, ",", &saveptr
);
979 p
= strtok_r (NULL
, ",", &saveptr
))
981 if (strcmp (p
, "i386") == 0)
991 the_x86_target
.update_xmltarget ();
994 /* Common for x86/x86-64. */
996 static struct regsets_info x86_regsets_info
=
998 x86_regsets
, /* regsets */
1000 NULL
, /* disabled_regsets */
1004 static struct regs_info amd64_linux_regs_info
=
1006 NULL
, /* regset_bitmap */
1007 NULL
, /* usrregs_info */
1011 static struct usrregs_info i386_linux_usrregs_info
=
1017 static struct regs_info i386_linux_regs_info
=
1019 NULL
, /* regset_bitmap */
1020 &i386_linux_usrregs_info
,
1025 x86_target::get_regs_info ()
1028 if (is_64bit_tdesc ())
1029 return &amd64_linux_regs_info
;
1032 return &i386_linux_regs_info
;
1035 /* Initialize the target description for the architecture of the
1039 x86_target::low_arch_setup ()
1041 current_process ()->tdesc
= x86_linux_read_description ();
1044 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1045 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1048 x86_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
1050 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
1056 collect_register_by_name (regcache
, "orig_rax", &l_sysno
);
1057 *sysno
= (int) l_sysno
;
1060 collect_register_by_name (regcache
, "orig_eax", sysno
);
1064 x86_supports_tracepoints (void)
1070 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1072 target_write_memory (*to
, buf
, len
);
1077 push_opcode (unsigned char *buf
, const char *op
)
1079 unsigned char *buf_org
= buf
;
1084 unsigned long ul
= strtoul (op
, &endptr
, 16);
1093 return buf
- buf_org
;
1098 /* Build a jump pad that saves registers and calls a collection
1099 function. Writes a jump instruction to the jump pad to
1100 JJUMPAD_INSN. The caller is responsible to write it in at the
1101 tracepoint address. */
1104 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1105 CORE_ADDR collector
,
1108 CORE_ADDR
*jump_entry
,
1109 CORE_ADDR
*trampoline
,
1110 ULONGEST
*trampoline_size
,
1111 unsigned char *jjump_pad_insn
,
1112 ULONGEST
*jjump_pad_insn_size
,
1113 CORE_ADDR
*adjusted_insn_addr
,
1114 CORE_ADDR
*adjusted_insn_addr_end
,
1117 unsigned char buf
[40];
1121 CORE_ADDR buildaddr
= *jump_entry
;
1123 /* Build the jump pad. */
1125 /* First, do tracepoint data collection. Save registers. */
1127 /* Need to ensure stack pointer saved first. */
1128 buf
[i
++] = 0x54; /* push %rsp */
1129 buf
[i
++] = 0x55; /* push %rbp */
1130 buf
[i
++] = 0x57; /* push %rdi */
1131 buf
[i
++] = 0x56; /* push %rsi */
1132 buf
[i
++] = 0x52; /* push %rdx */
1133 buf
[i
++] = 0x51; /* push %rcx */
1134 buf
[i
++] = 0x53; /* push %rbx */
1135 buf
[i
++] = 0x50; /* push %rax */
1136 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1137 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1138 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1139 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1140 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1141 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1142 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1143 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1144 buf
[i
++] = 0x9c; /* pushfq */
1145 buf
[i
++] = 0x48; /* movabs <addr>,%rdi */
1147 memcpy (buf
+ i
, &tpaddr
, 8);
1149 buf
[i
++] = 0x57; /* push %rdi */
1150 append_insns (&buildaddr
, i
, buf
);
1152 /* Stack space for the collecting_t object. */
1154 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1155 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1156 memcpy (buf
+ i
, &tpoint
, 8);
1158 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1159 i
+= push_opcode (&buf
[i
],
1160 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1161 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1162 append_insns (&buildaddr
, i
, buf
);
1166 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1167 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1169 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1170 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1171 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1172 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1173 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1174 append_insns (&buildaddr
, i
, buf
);
1176 /* Set up the gdb_collect call. */
1177 /* At this point, (stack pointer + 0x18) is the base of our saved
1181 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1182 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1184 /* tpoint address may be 64-bit wide. */
1185 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1186 memcpy (buf
+ i
, &tpoint
, 8);
1188 append_insns (&buildaddr
, i
, buf
);
1190 /* The collector function being in the shared library, may be
1191 >31-bits away off the jump pad. */
1193 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1194 memcpy (buf
+ i
, &collector
, 8);
1196 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1197 append_insns (&buildaddr
, i
, buf
);
1199 /* Clear the spin-lock. */
1201 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1202 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1203 memcpy (buf
+ i
, &lockaddr
, 8);
1205 append_insns (&buildaddr
, i
, buf
);
1207 /* Remove stack that had been used for the collect_t object. */
1209 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1210 append_insns (&buildaddr
, i
, buf
);
1212 /* Restore register state. */
1214 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1218 buf
[i
++] = 0x9d; /* popfq */
1219 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1220 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1221 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1222 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1223 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1224 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1225 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1226 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1227 buf
[i
++] = 0x58; /* pop %rax */
1228 buf
[i
++] = 0x5b; /* pop %rbx */
1229 buf
[i
++] = 0x59; /* pop %rcx */
1230 buf
[i
++] = 0x5a; /* pop %rdx */
1231 buf
[i
++] = 0x5e; /* pop %rsi */
1232 buf
[i
++] = 0x5f; /* pop %rdi */
1233 buf
[i
++] = 0x5d; /* pop %rbp */
1234 buf
[i
++] = 0x5c; /* pop %rsp */
1235 append_insns (&buildaddr
, i
, buf
);
1237 /* Now, adjust the original instruction to execute in the jump
1239 *adjusted_insn_addr
= buildaddr
;
1240 relocate_instruction (&buildaddr
, tpaddr
);
1241 *adjusted_insn_addr_end
= buildaddr
;
1243 /* Finally, write a jump back to the program. */
1245 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1246 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1249 "E.Jump back from jump pad too far from tracepoint "
1250 "(offset 0x%" PRIx64
" > int32).", loffset
);
1254 offset
= (int) loffset
;
1255 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1256 memcpy (buf
+ 1, &offset
, 4);
1257 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1259 /* The jump pad is now built. Wire in a jump to our jump pad. This
1260 is always done last (by our caller actually), so that we can
1261 install fast tracepoints with threads running. This relies on
1262 the agent's atomic write support. */
1263 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1264 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1267 "E.Jump pad too far from tracepoint "
1268 "(offset 0x%" PRIx64
" > int32).", loffset
);
1272 offset
= (int) loffset
;
1274 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1275 memcpy (buf
+ 1, &offset
, 4);
1276 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1277 *jjump_pad_insn_size
= sizeof (jump_insn
);
1279 /* Return the end address of our pad. */
1280 *jump_entry
= buildaddr
;
1285 #endif /* __x86_64__ */
1287 /* Build a jump pad that saves registers and calls a collection
1288 function. Writes a jump instruction to the jump pad to
1289 JJUMPAD_INSN. The caller is responsible to write it in at the
1290 tracepoint address. */
1293 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1294 CORE_ADDR collector
,
1297 CORE_ADDR
*jump_entry
,
1298 CORE_ADDR
*trampoline
,
1299 ULONGEST
*trampoline_size
,
1300 unsigned char *jjump_pad_insn
,
1301 ULONGEST
*jjump_pad_insn_size
,
1302 CORE_ADDR
*adjusted_insn_addr
,
1303 CORE_ADDR
*adjusted_insn_addr_end
,
1306 unsigned char buf
[0x100];
1308 CORE_ADDR buildaddr
= *jump_entry
;
1310 /* Build the jump pad. */
1312 /* First, do tracepoint data collection. Save registers. */
1314 buf
[i
++] = 0x60; /* pushad */
1315 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1316 *((int *)(buf
+ i
)) = (int) tpaddr
;
1318 buf
[i
++] = 0x9c; /* pushf */
1319 buf
[i
++] = 0x1e; /* push %ds */
1320 buf
[i
++] = 0x06; /* push %es */
1321 buf
[i
++] = 0x0f; /* push %fs */
1323 buf
[i
++] = 0x0f; /* push %gs */
1325 buf
[i
++] = 0x16; /* push %ss */
1326 buf
[i
++] = 0x0e; /* push %cs */
1327 append_insns (&buildaddr
, i
, buf
);
1329 /* Stack space for the collecting_t object. */
1331 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1333 /* Build the object. */
1334 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1335 memcpy (buf
+ i
, &tpoint
, 4);
1337 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1339 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1340 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1341 append_insns (&buildaddr
, i
, buf
);
1343 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1344 If we cared for it, this could be using xchg alternatively. */
1347 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1348 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1350 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1352 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1353 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1354 append_insns (&buildaddr
, i
, buf
);
1357 /* Set up arguments to the gdb_collect call. */
1359 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1360 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1361 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1362 append_insns (&buildaddr
, i
, buf
);
1365 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1366 append_insns (&buildaddr
, i
, buf
);
1369 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1370 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1372 append_insns (&buildaddr
, i
, buf
);
1374 buf
[0] = 0xe8; /* call <reladdr> */
1375 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1376 memcpy (buf
+ 1, &offset
, 4);
1377 append_insns (&buildaddr
, 5, buf
);
1378 /* Clean up after the call. */
1379 buf
[0] = 0x83; /* add $0x8,%esp */
1382 append_insns (&buildaddr
, 3, buf
);
1385 /* Clear the spin-lock. This would need the LOCK prefix on older
1388 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1389 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1390 memcpy (buf
+ i
, &lockaddr
, 4);
1392 append_insns (&buildaddr
, i
, buf
);
1395 /* Remove stack that had been used for the collect_t object. */
1397 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1398 append_insns (&buildaddr
, i
, buf
);
1401 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1404 buf
[i
++] = 0x17; /* pop %ss */
1405 buf
[i
++] = 0x0f; /* pop %gs */
1407 buf
[i
++] = 0x0f; /* pop %fs */
1409 buf
[i
++] = 0x07; /* pop %es */
1410 buf
[i
++] = 0x1f; /* pop %ds */
1411 buf
[i
++] = 0x9d; /* popf */
1412 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1415 buf
[i
++] = 0x61; /* popad */
1416 append_insns (&buildaddr
, i
, buf
);
1418 /* Now, adjust the original instruction to execute in the jump
1420 *adjusted_insn_addr
= buildaddr
;
1421 relocate_instruction (&buildaddr
, tpaddr
);
1422 *adjusted_insn_addr_end
= buildaddr
;
1424 /* Write the jump back to the program. */
1425 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1426 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1427 memcpy (buf
+ 1, &offset
, 4);
1428 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1430 /* The jump pad is now built. Wire in a jump to our jump pad. This
1431 is always done last (by our caller actually), so that we can
1432 install fast tracepoints with threads running. This relies on
1433 the agent's atomic write support. */
1436 /* Create a trampoline. */
1437 *trampoline_size
= sizeof (jump_insn
);
1438 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1440 /* No trampoline space available. */
1442 "E.Cannot allocate trampoline space needed for fast "
1443 "tracepoints on 4-byte instructions.");
1447 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1448 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1449 memcpy (buf
+ 1, &offset
, 4);
1450 target_write_memory (*trampoline
, buf
, sizeof (jump_insn
));
1452 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1453 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1454 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1455 memcpy (buf
+ 2, &offset
, 2);
1456 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1457 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1461 /* Else use a 32-bit relative jump instruction. */
1462 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1463 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1464 memcpy (buf
+ 1, &offset
, 4);
1465 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1466 *jjump_pad_insn_size
= sizeof (jump_insn
);
1469 /* Return the end address of our pad. */
1470 *jump_entry
= buildaddr
;
1476 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1477 CORE_ADDR collector
,
1480 CORE_ADDR
*jump_entry
,
1481 CORE_ADDR
*trampoline
,
1482 ULONGEST
*trampoline_size
,
1483 unsigned char *jjump_pad_insn
,
1484 ULONGEST
*jjump_pad_insn_size
,
1485 CORE_ADDR
*adjusted_insn_addr
,
1486 CORE_ADDR
*adjusted_insn_addr_end
,
1490 if (is_64bit_tdesc ())
1491 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1492 collector
, lockaddr
,
1493 orig_size
, jump_entry
,
1494 trampoline
, trampoline_size
,
1496 jjump_pad_insn_size
,
1498 adjusted_insn_addr_end
,
1502 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1503 collector
, lockaddr
,
1504 orig_size
, jump_entry
,
1505 trampoline
, trampoline_size
,
1507 jjump_pad_insn_size
,
1509 adjusted_insn_addr_end
,
1513 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1517 x86_get_min_fast_tracepoint_insn_len (void)
1519 static int warned_about_fast_tracepoints
= 0;
1522 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1523 used for fast tracepoints. */
1524 if (is_64bit_tdesc ())
1528 if (agent_loaded_p ())
1530 char errbuf
[IPA_BUFSIZ
];
1534 /* On x86, if trampolines are available, then 4-byte jump instructions
1535 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1536 with a 4-byte offset are used instead. */
1537 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1541 /* GDB has no channel to explain to user why a shorter fast
1542 tracepoint is not possible, but at least make GDBserver
1543 mention that something has gone awry. */
1544 if (!warned_about_fast_tracepoints
)
1546 warning ("4-byte fast tracepoints not available; %s", errbuf
);
1547 warned_about_fast_tracepoints
= 1;
1554 /* Indicate that the minimum length is currently unknown since the IPA
1555 has not loaded yet. */
1561 add_insns (unsigned char *start
, int len
)
1563 CORE_ADDR buildaddr
= current_insn_ptr
;
1566 debug_printf ("Adding %d bytes of insn at %s\n",
1567 len
, paddress (buildaddr
));
1569 append_insns (&buildaddr
, len
, start
);
1570 current_insn_ptr
= buildaddr
;
1573 /* Our general strategy for emitting code is to avoid specifying raw
1574 bytes whenever possible, and instead copy a block of inline asm
1575 that is embedded in the function. This is a little messy, because
1576 we need to keep the compiler from discarding what looks like dead
1577 code, plus suppress various warnings. */
1579 #define EMIT_ASM(NAME, INSNS) \
1582 extern unsigned char start_ ## NAME, end_ ## NAME; \
1583 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1584 __asm__ ("jmp end_" #NAME "\n" \
1585 "\t" "start_" #NAME ":" \
1587 "\t" "end_" #NAME ":"); \
1592 #define EMIT_ASM32(NAME,INSNS) \
1595 extern unsigned char start_ ## NAME, end_ ## NAME; \
1596 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1597 __asm__ (".code32\n" \
1598 "\t" "jmp end_" #NAME "\n" \
1599 "\t" "start_" #NAME ":\n" \
1601 "\t" "end_" #NAME ":\n" \
1607 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1614 amd64_emit_prologue (void)
1616 EMIT_ASM (amd64_prologue
,
1618 "movq %rsp,%rbp\n\t"
1619 "sub $0x20,%rsp\n\t"
1620 "movq %rdi,-8(%rbp)\n\t"
1621 "movq %rsi,-16(%rbp)");
1626 amd64_emit_epilogue (void)
1628 EMIT_ASM (amd64_epilogue
,
1629 "movq -16(%rbp),%rdi\n\t"
1630 "movq %rax,(%rdi)\n\t"
1637 amd64_emit_add (void)
1639 EMIT_ASM (amd64_add
,
1640 "add (%rsp),%rax\n\t"
1641 "lea 0x8(%rsp),%rsp");
1645 amd64_emit_sub (void)
1647 EMIT_ASM (amd64_sub
,
1648 "sub %rax,(%rsp)\n\t"
1653 amd64_emit_mul (void)
1659 amd64_emit_lsh (void)
1665 amd64_emit_rsh_signed (void)
1671 amd64_emit_rsh_unsigned (void)
1677 amd64_emit_ext (int arg
)
1682 EMIT_ASM (amd64_ext_8
,
1688 EMIT_ASM (amd64_ext_16
,
1693 EMIT_ASM (amd64_ext_32
,
1702 amd64_emit_log_not (void)
1704 EMIT_ASM (amd64_log_not
,
1705 "test %rax,%rax\n\t"
1711 amd64_emit_bit_and (void)
1713 EMIT_ASM (amd64_and
,
1714 "and (%rsp),%rax\n\t"
1715 "lea 0x8(%rsp),%rsp");
1719 amd64_emit_bit_or (void)
1722 "or (%rsp),%rax\n\t"
1723 "lea 0x8(%rsp),%rsp");
1727 amd64_emit_bit_xor (void)
1729 EMIT_ASM (amd64_xor
,
1730 "xor (%rsp),%rax\n\t"
1731 "lea 0x8(%rsp),%rsp");
1735 amd64_emit_bit_not (void)
1737 EMIT_ASM (amd64_bit_not
,
1738 "xorq $0xffffffffffffffff,%rax");
1742 amd64_emit_equal (void)
1744 EMIT_ASM (amd64_equal
,
1745 "cmp %rax,(%rsp)\n\t"
1746 "je .Lamd64_equal_true\n\t"
1748 "jmp .Lamd64_equal_end\n\t"
1749 ".Lamd64_equal_true:\n\t"
1751 ".Lamd64_equal_end:\n\t"
1752 "lea 0x8(%rsp),%rsp");
1756 amd64_emit_less_signed (void)
1758 EMIT_ASM (amd64_less_signed
,
1759 "cmp %rax,(%rsp)\n\t"
1760 "jl .Lamd64_less_signed_true\n\t"
1762 "jmp .Lamd64_less_signed_end\n\t"
1763 ".Lamd64_less_signed_true:\n\t"
1765 ".Lamd64_less_signed_end:\n\t"
1766 "lea 0x8(%rsp),%rsp");
1770 amd64_emit_less_unsigned (void)
1772 EMIT_ASM (amd64_less_unsigned
,
1773 "cmp %rax,(%rsp)\n\t"
1774 "jb .Lamd64_less_unsigned_true\n\t"
1776 "jmp .Lamd64_less_unsigned_end\n\t"
1777 ".Lamd64_less_unsigned_true:\n\t"
1779 ".Lamd64_less_unsigned_end:\n\t"
1780 "lea 0x8(%rsp),%rsp");
1784 amd64_emit_ref (int size
)
1789 EMIT_ASM (amd64_ref1
,
1793 EMIT_ASM (amd64_ref2
,
1797 EMIT_ASM (amd64_ref4
,
1798 "movl (%rax),%eax");
1801 EMIT_ASM (amd64_ref8
,
1802 "movq (%rax),%rax");
1808 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1810 EMIT_ASM (amd64_if_goto
,
1814 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1822 amd64_emit_goto (int *offset_p
, int *size_p
)
1824 EMIT_ASM (amd64_goto
,
1825 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1833 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1835 int diff
= (to
- (from
+ size
));
1836 unsigned char buf
[sizeof (int)];
1844 memcpy (buf
, &diff
, sizeof (int));
1845 target_write_memory (from
, buf
, sizeof (int));
1849 amd64_emit_const (LONGEST num
)
1851 unsigned char buf
[16];
1853 CORE_ADDR buildaddr
= current_insn_ptr
;
1856 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1857 memcpy (&buf
[i
], &num
, sizeof (num
));
1859 append_insns (&buildaddr
, i
, buf
);
1860 current_insn_ptr
= buildaddr
;
1864 amd64_emit_call (CORE_ADDR fn
)
1866 unsigned char buf
[16];
1868 CORE_ADDR buildaddr
;
1871 /* The destination function being in the shared library, may be
1872 >31-bits away off the compiled code pad. */
1874 buildaddr
= current_insn_ptr
;
1876 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1880 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1882 /* Offset is too large for a call. Use callq, but that requires
1883 a register, so avoid it if possible. Use r10, since it is
1884 call-clobbered, we don't have to push/pop it. */
1885 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1887 memcpy (buf
+ i
, &fn
, 8);
1889 buf
[i
++] = 0xff; /* callq *%r10 */
1894 int offset32
= offset64
; /* we know we can't overflow here. */
1896 buf
[i
++] = 0xe8; /* call <reladdr> */
1897 memcpy (buf
+ i
, &offset32
, 4);
1901 append_insns (&buildaddr
, i
, buf
);
1902 current_insn_ptr
= buildaddr
;
1906 amd64_emit_reg (int reg
)
1908 unsigned char buf
[16];
1910 CORE_ADDR buildaddr
;
1912 /* Assume raw_regs is still in %rdi. */
1913 buildaddr
= current_insn_ptr
;
1915 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1916 memcpy (&buf
[i
], ®
, sizeof (reg
));
1918 append_insns (&buildaddr
, i
, buf
);
1919 current_insn_ptr
= buildaddr
;
1920 amd64_emit_call (get_raw_reg_func_addr ());
1924 amd64_emit_pop (void)
1926 EMIT_ASM (amd64_pop
,
1931 amd64_emit_stack_flush (void)
1933 EMIT_ASM (amd64_stack_flush
,
1938 amd64_emit_zero_ext (int arg
)
1943 EMIT_ASM (amd64_zero_ext_8
,
1947 EMIT_ASM (amd64_zero_ext_16
,
1948 "and $0xffff,%rax");
1951 EMIT_ASM (amd64_zero_ext_32
,
1952 "mov $0xffffffff,%rcx\n\t"
1961 amd64_emit_swap (void)
1963 EMIT_ASM (amd64_swap
,
1970 amd64_emit_stack_adjust (int n
)
1972 unsigned char buf
[16];
1974 CORE_ADDR buildaddr
= current_insn_ptr
;
1977 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
1981 /* This only handles adjustments up to 16, but we don't expect any more. */
1983 append_insns (&buildaddr
, i
, buf
);
1984 current_insn_ptr
= buildaddr
;
1987 /* FN's prototype is `LONGEST(*fn)(int)'. */
1990 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
1992 unsigned char buf
[16];
1994 CORE_ADDR buildaddr
;
1996 buildaddr
= current_insn_ptr
;
1998 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1999 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2001 append_insns (&buildaddr
, i
, buf
);
2002 current_insn_ptr
= buildaddr
;
2003 amd64_emit_call (fn
);
2006 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2009 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2011 unsigned char buf
[16];
2013 CORE_ADDR buildaddr
;
2015 buildaddr
= current_insn_ptr
;
2017 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2018 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2020 append_insns (&buildaddr
, i
, buf
);
2021 current_insn_ptr
= buildaddr
;
2022 EMIT_ASM (amd64_void_call_2_a
,
2023 /* Save away a copy of the stack top. */
2025 /* Also pass top as the second argument. */
2027 amd64_emit_call (fn
);
2028 EMIT_ASM (amd64_void_call_2_b
,
2029 /* Restore the stack top, %rax may have been trashed. */
2034 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2037 "cmp %rax,(%rsp)\n\t"
2038 "jne .Lamd64_eq_fallthru\n\t"
2039 "lea 0x8(%rsp),%rsp\n\t"
2041 /* jmp, but don't trust the assembler to choose the right jump */
2042 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2043 ".Lamd64_eq_fallthru:\n\t"
2044 "lea 0x8(%rsp),%rsp\n\t"
2054 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2057 "cmp %rax,(%rsp)\n\t"
2058 "je .Lamd64_ne_fallthru\n\t"
2059 "lea 0x8(%rsp),%rsp\n\t"
2061 /* jmp, but don't trust the assembler to choose the right jump */
2062 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2063 ".Lamd64_ne_fallthru:\n\t"
2064 "lea 0x8(%rsp),%rsp\n\t"
2074 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2077 "cmp %rax,(%rsp)\n\t"
2078 "jnl .Lamd64_lt_fallthru\n\t"
2079 "lea 0x8(%rsp),%rsp\n\t"
2081 /* jmp, but don't trust the assembler to choose the right jump */
2082 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2083 ".Lamd64_lt_fallthru:\n\t"
2084 "lea 0x8(%rsp),%rsp\n\t"
2094 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2097 "cmp %rax,(%rsp)\n\t"
2098 "jnle .Lamd64_le_fallthru\n\t"
2099 "lea 0x8(%rsp),%rsp\n\t"
2101 /* jmp, but don't trust the assembler to choose the right jump */
2102 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2103 ".Lamd64_le_fallthru:\n\t"
2104 "lea 0x8(%rsp),%rsp\n\t"
2114 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2117 "cmp %rax,(%rsp)\n\t"
2118 "jng .Lamd64_gt_fallthru\n\t"
2119 "lea 0x8(%rsp),%rsp\n\t"
2121 /* jmp, but don't trust the assembler to choose the right jump */
2122 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2123 ".Lamd64_gt_fallthru:\n\t"
2124 "lea 0x8(%rsp),%rsp\n\t"
2134 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2137 "cmp %rax,(%rsp)\n\t"
2138 "jnge .Lamd64_ge_fallthru\n\t"
2139 ".Lamd64_ge_jump:\n\t"
2140 "lea 0x8(%rsp),%rsp\n\t"
2142 /* jmp, but don't trust the assembler to choose the right jump */
2143 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2144 ".Lamd64_ge_fallthru:\n\t"
2145 "lea 0x8(%rsp),%rsp\n\t"
2154 struct emit_ops amd64_emit_ops
=
2156 amd64_emit_prologue
,
2157 amd64_emit_epilogue
,
2162 amd64_emit_rsh_signed
,
2163 amd64_emit_rsh_unsigned
,
2171 amd64_emit_less_signed
,
2172 amd64_emit_less_unsigned
,
2176 amd64_write_goto_address
,
2181 amd64_emit_stack_flush
,
2182 amd64_emit_zero_ext
,
2184 amd64_emit_stack_adjust
,
2185 amd64_emit_int_call_1
,
2186 amd64_emit_void_call_2
,
2195 #endif /* __x86_64__ */
2198 i386_emit_prologue (void)
2200 EMIT_ASM32 (i386_prologue
,
2204 /* At this point, the raw regs base address is at 8(%ebp), and the
2205 value pointer is at 12(%ebp). */
2209 i386_emit_epilogue (void)
2211 EMIT_ASM32 (i386_epilogue
,
2212 "mov 12(%ebp),%ecx\n\t"
2213 "mov %eax,(%ecx)\n\t"
2214 "mov %ebx,0x4(%ecx)\n\t"
2222 i386_emit_add (void)
2224 EMIT_ASM32 (i386_add
,
2225 "add (%esp),%eax\n\t"
2226 "adc 0x4(%esp),%ebx\n\t"
2227 "lea 0x8(%esp),%esp");
2231 i386_emit_sub (void)
2233 EMIT_ASM32 (i386_sub
,
2234 "subl %eax,(%esp)\n\t"
2235 "sbbl %ebx,4(%esp)\n\t"
2241 i386_emit_mul (void)
2247 i386_emit_lsh (void)
2253 i386_emit_rsh_signed (void)
2259 i386_emit_rsh_unsigned (void)
2265 i386_emit_ext (int arg
)
2270 EMIT_ASM32 (i386_ext_8
,
2273 "movl %eax,%ebx\n\t"
2277 EMIT_ASM32 (i386_ext_16
,
2279 "movl %eax,%ebx\n\t"
2283 EMIT_ASM32 (i386_ext_32
,
2284 "movl %eax,%ebx\n\t"
2293 i386_emit_log_not (void)
2295 EMIT_ASM32 (i386_log_not
,
2297 "test %eax,%eax\n\t"
2304 i386_emit_bit_and (void)
2306 EMIT_ASM32 (i386_and
,
2307 "and (%esp),%eax\n\t"
2308 "and 0x4(%esp),%ebx\n\t"
2309 "lea 0x8(%esp),%esp");
2313 i386_emit_bit_or (void)
2315 EMIT_ASM32 (i386_or
,
2316 "or (%esp),%eax\n\t"
2317 "or 0x4(%esp),%ebx\n\t"
2318 "lea 0x8(%esp),%esp");
2322 i386_emit_bit_xor (void)
2324 EMIT_ASM32 (i386_xor
,
2325 "xor (%esp),%eax\n\t"
2326 "xor 0x4(%esp),%ebx\n\t"
2327 "lea 0x8(%esp),%esp");
2331 i386_emit_bit_not (void)
2333 EMIT_ASM32 (i386_bit_not
,
2334 "xor $0xffffffff,%eax\n\t"
2335 "xor $0xffffffff,%ebx\n\t");
2339 i386_emit_equal (void)
2341 EMIT_ASM32 (i386_equal
,
2342 "cmpl %ebx,4(%esp)\n\t"
2343 "jne .Li386_equal_false\n\t"
2344 "cmpl %eax,(%esp)\n\t"
2345 "je .Li386_equal_true\n\t"
2346 ".Li386_equal_false:\n\t"
2348 "jmp .Li386_equal_end\n\t"
2349 ".Li386_equal_true:\n\t"
2351 ".Li386_equal_end:\n\t"
2353 "lea 0x8(%esp),%esp");
2357 i386_emit_less_signed (void)
2359 EMIT_ASM32 (i386_less_signed
,
2360 "cmpl %ebx,4(%esp)\n\t"
2361 "jl .Li386_less_signed_true\n\t"
2362 "jne .Li386_less_signed_false\n\t"
2363 "cmpl %eax,(%esp)\n\t"
2364 "jl .Li386_less_signed_true\n\t"
2365 ".Li386_less_signed_false:\n\t"
2367 "jmp .Li386_less_signed_end\n\t"
2368 ".Li386_less_signed_true:\n\t"
2370 ".Li386_less_signed_end:\n\t"
2372 "lea 0x8(%esp),%esp");
2376 i386_emit_less_unsigned (void)
2378 EMIT_ASM32 (i386_less_unsigned
,
2379 "cmpl %ebx,4(%esp)\n\t"
2380 "jb .Li386_less_unsigned_true\n\t"
2381 "jne .Li386_less_unsigned_false\n\t"
2382 "cmpl %eax,(%esp)\n\t"
2383 "jb .Li386_less_unsigned_true\n\t"
2384 ".Li386_less_unsigned_false:\n\t"
2386 "jmp .Li386_less_unsigned_end\n\t"
2387 ".Li386_less_unsigned_true:\n\t"
2389 ".Li386_less_unsigned_end:\n\t"
2391 "lea 0x8(%esp),%esp");
2395 i386_emit_ref (int size
)
2400 EMIT_ASM32 (i386_ref1
,
2404 EMIT_ASM32 (i386_ref2
,
2408 EMIT_ASM32 (i386_ref4
,
2409 "movl (%eax),%eax");
2412 EMIT_ASM32 (i386_ref8
,
2413 "movl 4(%eax),%ebx\n\t"
2414 "movl (%eax),%eax");
2420 i386_emit_if_goto (int *offset_p
, int *size_p
)
2422 EMIT_ASM32 (i386_if_goto
,
2428 /* Don't trust the assembler to choose the right jump */
2429 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2432 *offset_p
= 11; /* be sure that this matches the sequence above */
2438 i386_emit_goto (int *offset_p
, int *size_p
)
2440 EMIT_ASM32 (i386_goto
,
2441 /* Don't trust the assembler to choose the right jump */
2442 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2450 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2452 int diff
= (to
- (from
+ size
));
2453 unsigned char buf
[sizeof (int)];
2455 /* We're only doing 4-byte sizes at the moment. */
2462 memcpy (buf
, &diff
, sizeof (int));
2463 target_write_memory (from
, buf
, sizeof (int));
2467 i386_emit_const (LONGEST num
)
2469 unsigned char buf
[16];
2471 CORE_ADDR buildaddr
= current_insn_ptr
;
2474 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2475 lo
= num
& 0xffffffff;
2476 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2478 hi
= ((num
>> 32) & 0xffffffff);
2481 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2482 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2487 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2489 append_insns (&buildaddr
, i
, buf
);
2490 current_insn_ptr
= buildaddr
;
2494 i386_emit_call (CORE_ADDR fn
)
2496 unsigned char buf
[16];
2498 CORE_ADDR buildaddr
;
2500 buildaddr
= current_insn_ptr
;
2502 buf
[i
++] = 0xe8; /* call <reladdr> */
2503 offset
= ((int) fn
) - (buildaddr
+ 5);
2504 memcpy (buf
+ 1, &offset
, 4);
2505 append_insns (&buildaddr
, 5, buf
);
2506 current_insn_ptr
= buildaddr
;
2510 i386_emit_reg (int reg
)
2512 unsigned char buf
[16];
2514 CORE_ADDR buildaddr
;
2516 EMIT_ASM32 (i386_reg_a
,
2518 buildaddr
= current_insn_ptr
;
2520 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2521 memcpy (&buf
[i
], ®
, sizeof (reg
));
2523 append_insns (&buildaddr
, i
, buf
);
2524 current_insn_ptr
= buildaddr
;
2525 EMIT_ASM32 (i386_reg_b
,
2526 "mov %eax,4(%esp)\n\t"
2527 "mov 8(%ebp),%eax\n\t"
2529 i386_emit_call (get_raw_reg_func_addr ());
2530 EMIT_ASM32 (i386_reg_c
,
2532 "lea 0x8(%esp),%esp");
2536 i386_emit_pop (void)
2538 EMIT_ASM32 (i386_pop
,
2544 i386_emit_stack_flush (void)
2546 EMIT_ASM32 (i386_stack_flush
,
2552 i386_emit_zero_ext (int arg
)
2557 EMIT_ASM32 (i386_zero_ext_8
,
2558 "and $0xff,%eax\n\t"
2562 EMIT_ASM32 (i386_zero_ext_16
,
2563 "and $0xffff,%eax\n\t"
2567 EMIT_ASM32 (i386_zero_ext_32
,
2576 i386_emit_swap (void)
2578 EMIT_ASM32 (i386_swap
,
2588 i386_emit_stack_adjust (int n
)
2590 unsigned char buf
[16];
2592 CORE_ADDR buildaddr
= current_insn_ptr
;
2595 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2599 append_insns (&buildaddr
, i
, buf
);
2600 current_insn_ptr
= buildaddr
;
2603 /* FN's prototype is `LONGEST(*fn)(int)'. */
2606 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2608 unsigned char buf
[16];
2610 CORE_ADDR buildaddr
;
2612 EMIT_ASM32 (i386_int_call_1_a
,
2613 /* Reserve a bit of stack space. */
2615 /* Put the one argument on the stack. */
2616 buildaddr
= current_insn_ptr
;
2618 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2621 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2623 append_insns (&buildaddr
, i
, buf
);
2624 current_insn_ptr
= buildaddr
;
2625 i386_emit_call (fn
);
2626 EMIT_ASM32 (i386_int_call_1_c
,
2628 "lea 0x8(%esp),%esp");
2631 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2634 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2636 unsigned char buf
[16];
2638 CORE_ADDR buildaddr
;
2640 EMIT_ASM32 (i386_void_call_2_a
,
2641 /* Preserve %eax only; we don't have to worry about %ebx. */
2643 /* Reserve a bit of stack space for arguments. */
2644 "sub $0x10,%esp\n\t"
2645 /* Copy "top" to the second argument position. (Note that
2646 we can't assume function won't scribble on its
2647 arguments, so don't try to restore from this.) */
2648 "mov %eax,4(%esp)\n\t"
2649 "mov %ebx,8(%esp)");
2650 /* Put the first argument on the stack. */
2651 buildaddr
= current_insn_ptr
;
2653 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2656 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2658 append_insns (&buildaddr
, i
, buf
);
2659 current_insn_ptr
= buildaddr
;
2660 i386_emit_call (fn
);
2661 EMIT_ASM32 (i386_void_call_2_b
,
2662 "lea 0x10(%esp),%esp\n\t"
2663 /* Restore original stack top. */
2669 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2672 /* Check low half first, more likely to be decider */
2673 "cmpl %eax,(%esp)\n\t"
2674 "jne .Leq_fallthru\n\t"
2675 "cmpl %ebx,4(%esp)\n\t"
2676 "jne .Leq_fallthru\n\t"
2677 "lea 0x8(%esp),%esp\n\t"
2680 /* jmp, but don't trust the assembler to choose the right jump */
2681 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2682 ".Leq_fallthru:\n\t"
2683 "lea 0x8(%esp),%esp\n\t"
2694 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2697 /* Check low half first, more likely to be decider */
2698 "cmpl %eax,(%esp)\n\t"
2700 "cmpl %ebx,4(%esp)\n\t"
2701 "je .Lne_fallthru\n\t"
2703 "lea 0x8(%esp),%esp\n\t"
2706 /* jmp, but don't trust the assembler to choose the right jump */
2707 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2708 ".Lne_fallthru:\n\t"
2709 "lea 0x8(%esp),%esp\n\t"
2720 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2723 "cmpl %ebx,4(%esp)\n\t"
2725 "jne .Llt_fallthru\n\t"
2726 "cmpl %eax,(%esp)\n\t"
2727 "jnl .Llt_fallthru\n\t"
2729 "lea 0x8(%esp),%esp\n\t"
2732 /* jmp, but don't trust the assembler to choose the right jump */
2733 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2734 ".Llt_fallthru:\n\t"
2735 "lea 0x8(%esp),%esp\n\t"
2746 i386_emit_le_goto (int *offset_p
, int *size_p
)
2749 "cmpl %ebx,4(%esp)\n\t"
2751 "jne .Lle_fallthru\n\t"
2752 "cmpl %eax,(%esp)\n\t"
2753 "jnle .Lle_fallthru\n\t"
2755 "lea 0x8(%esp),%esp\n\t"
2758 /* jmp, but don't trust the assembler to choose the right jump */
2759 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2760 ".Lle_fallthru:\n\t"
2761 "lea 0x8(%esp),%esp\n\t"
2772 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2775 "cmpl %ebx,4(%esp)\n\t"
2777 "jne .Lgt_fallthru\n\t"
2778 "cmpl %eax,(%esp)\n\t"
2779 "jng .Lgt_fallthru\n\t"
2781 "lea 0x8(%esp),%esp\n\t"
2784 /* jmp, but don't trust the assembler to choose the right jump */
2785 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2786 ".Lgt_fallthru:\n\t"
2787 "lea 0x8(%esp),%esp\n\t"
2798 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2801 "cmpl %ebx,4(%esp)\n\t"
2803 "jne .Lge_fallthru\n\t"
2804 "cmpl %eax,(%esp)\n\t"
2805 "jnge .Lge_fallthru\n\t"
2807 "lea 0x8(%esp),%esp\n\t"
2810 /* jmp, but don't trust the assembler to choose the right jump */
2811 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2812 ".Lge_fallthru:\n\t"
2813 "lea 0x8(%esp),%esp\n\t"
2823 struct emit_ops i386_emit_ops
=
2831 i386_emit_rsh_signed
,
2832 i386_emit_rsh_unsigned
,
2840 i386_emit_less_signed
,
2841 i386_emit_less_unsigned
,
2845 i386_write_goto_address
,
2850 i386_emit_stack_flush
,
2853 i386_emit_stack_adjust
,
2854 i386_emit_int_call_1
,
2855 i386_emit_void_call_2
,
2865 static struct emit_ops
*
2869 if (is_64bit_tdesc ())
2870 return &amd64_emit_ops
;
2873 return &i386_emit_ops
;
2876 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2879 x86_target::sw_breakpoint_from_kind (int kind
, int *size
)
2881 *size
= x86_breakpoint_len
;
2882 return x86_breakpoint
;
2886 x86_supports_range_stepping (void)
2891 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2895 x86_supports_hardware_single_step (void)
2901 x86_get_ipa_tdesc_idx (void)
2903 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
2904 const struct target_desc
*tdesc
= regcache
->tdesc
;
2907 return amd64_get_ipa_tdesc_idx (tdesc
);
2910 if (tdesc
== tdesc_i386_linux_no_xml
)
2911 return X86_TDESC_SSE
;
2913 return i386_get_ipa_tdesc_idx (tdesc
);
2916 /* This is initialized assuming an amd64 target.
2917 x86_arch_setup will correct it for i386 or amd64 targets. */
2919 struct linux_target_ops the_low_target
=
2921 x86_stopped_by_watchpoint
,
2922 x86_stopped_data_address
,
2923 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2924 native i386 case (no registers smaller than an xfer unit), and are not
2925 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2928 /* need to fix up i386 siginfo if host is amd64 */
2930 x86_linux_new_process
,
2931 x86_linux_delete_process
,
2932 x86_linux_new_thread
,
2933 x86_linux_delete_thread
,
2935 x86_linux_prepare_to_resume
,
2936 x86_linux_process_qsupported
,
2937 x86_supports_tracepoints
,
2938 x86_get_thread_area
,
2939 x86_install_fast_tracepoint_jump_pad
,
2941 x86_get_min_fast_tracepoint_insn_len
,
2942 x86_supports_range_stepping
,
2943 x86_supports_hardware_single_step
,
2944 x86_get_syscall_trapinfo
,
2945 x86_get_ipa_tdesc_idx
,
2948 /* The linux target ops object. */
2950 linux_process_target
*the_linux_target
= &the_x86_target
;
2953 initialize_low_arch (void)
2955 /* Initialize the Linux target descriptions. */
2957 tdesc_amd64_linux_no_xml
= allocate_target_description ();
2958 copy_target_description (tdesc_amd64_linux_no_xml
,
2959 amd64_linux_read_description (X86_XSTATE_SSE_MASK
,
2961 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
2964 tdesc_i386_linux_no_xml
= allocate_target_description ();
2965 copy_target_description (tdesc_i386_linux_no_xml
,
2966 i386_linux_read_description (X86_XSTATE_SSE_MASK
));
2967 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
2969 initialize_regsets_info (&x86_regsets_info
);