1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
31 #include "nat/amd64-linux-siginfo.h"
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
38 #include "elf/common.h"
41 #include "gdbsupport/agent.h"
43 #include "tracepoint.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
51 static struct target_desc
*tdesc_amd64_linux_no_xml
;
53 static struct target_desc
*tdesc_i386_linux_no_xml
;
56 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
59 /* Backward compatibility for gdb without XML support. */
61 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
67 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
74 #include <sys/procfs.h>
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
99 class x86_target
: public linux_process_target
103 const regs_info
*get_regs_info () override
;
105 const gdb_byte
*sw_breakpoint_from_kind (int kind
, int *size
) override
;
107 bool supports_z_point_type (char z_type
) override
;
109 void process_qsupported (char **features
, int count
) override
;
113 void low_arch_setup () override
;
115 bool low_cannot_fetch_register (int regno
) override
;
117 bool low_cannot_store_register (int regno
) override
;
119 bool low_supports_breakpoints () override
;
121 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
123 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
125 int low_decr_pc_after_break () override
;
127 bool low_breakpoint_at (CORE_ADDR pc
) override
;
129 int low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
130 int size
, raw_breakpoint
*bp
) override
;
132 int low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
133 int size
, raw_breakpoint
*bp
) override
;
135 bool low_stopped_by_watchpoint () override
;
137 CORE_ADDR
low_stopped_data_address () override
;
139 /* collect_ptrace_register/supply_ptrace_register are not needed in the
140 native i386 case (no registers smaller than an xfer unit), and are not
141 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
143 /* Need to fix up i386 siginfo if host is amd64. */
144 bool low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
145 int direction
) override
;
147 arch_process_info
*low_new_process () override
;
149 void low_delete_process (arch_process_info
*info
) override
;
151 void low_new_thread (lwp_info
*) override
;
153 void low_delete_thread (arch_lwp_info
*) override
;
155 void low_new_fork (process_info
*parent
, process_info
*child
) override
;
157 void low_prepare_to_resume (lwp_info
*lwp
) override
;
161 /* Update all the target description of all processes; a new GDB
162 connected, and it may or not support xml target descriptions. */
163 void update_xmltarget ();
166 /* The singleton target ops object. */
168 static x86_target the_x86_target
;
170 /* Per-process arch-specific data we want to keep. */
172 struct arch_process_info
174 struct x86_debug_reg_state debug_reg_state
;
179 /* Mapping between the general-purpose registers in `struct user'
180 format and GDB's register array layout.
181 Note that the transfer layout uses 64-bit regs. */
182 static /*const*/ int i386_regmap
[] =
184 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
185 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
186 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
187 DS
* 8, ES
* 8, FS
* 8, GS
* 8
190 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
192 /* So code below doesn't have to care, i386 or amd64. */
193 #define ORIG_EAX ORIG_RAX
196 static const int x86_64_regmap
[] =
198 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
199 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
200 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
201 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
202 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
203 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
204 -1, -1, -1, -1, -1, -1, -1, -1,
205 -1, -1, -1, -1, -1, -1, -1, -1,
206 -1, -1, -1, -1, -1, -1, -1, -1,
208 -1, -1, -1, -1, -1, -1, -1, -1,
210 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
215 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
216 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
217 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
218 -1, -1, -1, -1, -1, -1, -1, -1,
219 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
220 -1, -1, -1, -1, -1, -1, -1, -1,
221 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
222 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
223 -1, -1, -1, -1, -1, -1, -1, -1,
224 -1, -1, -1, -1, -1, -1, -1, -1,
225 -1, -1, -1, -1, -1, -1, -1, -1,
229 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
230 #define X86_64_USER_REGS (GS + 1)
232 #else /* ! __x86_64__ */
234 /* Mapping between the general-purpose registers in `struct user'
235 format and GDB's register array layout. */
236 static /*const*/ int i386_regmap
[] =
238 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
239 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
240 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
241 DS
* 4, ES
* 4, FS
* 4, GS
* 4
244 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
252 /* Returns true if the current inferior belongs to a x86-64 process,
256 is_64bit_tdesc (void)
258 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
260 return register_size (regcache
->tdesc
, 0) == 8;
266 /* Called by libthread_db. */
269 ps_get_thread_area (struct ps_prochandle
*ph
,
270 lwpid_t lwpid
, int idx
, void **base
)
273 int use_64bit
= is_64bit_tdesc ();
280 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
284 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
295 unsigned int desc
[4];
297 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
298 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
301 /* Ensure we properly extend the value to 64-bits for x86_64. */
302 *base
= (void *) (uintptr_t) desc
[1];
307 /* Get the thread area address. This is used to recognize which
308 thread is which when tracing with the in-process agent library. We
309 don't read anything from the address, and treat it as opaque; it's
310 the address itself that we assume is unique per-thread. */
313 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
316 int use_64bit
= is_64bit_tdesc ();
321 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
323 *addr
= (CORE_ADDR
) (uintptr_t) base
;
332 struct lwp_info
*lwp
= find_lwp_pid (ptid_t (lwpid
));
333 struct thread_info
*thr
= get_lwp_thread (lwp
);
334 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
335 unsigned int desc
[4];
337 const int reg_thread_area
= 3; /* bits to scale down register value. */
340 collect_register_by_name (regcache
, "gs", &gs
);
342 idx
= gs
>> reg_thread_area
;
344 if (ptrace (PTRACE_GET_THREAD_AREA
,
346 (void *) (long) idx
, (unsigned long) &desc
) < 0)
357 x86_target::low_cannot_store_register (int regno
)
360 if (is_64bit_tdesc ())
364 return regno
>= I386_NUM_REGS
;
368 x86_target::low_cannot_fetch_register (int regno
)
371 if (is_64bit_tdesc ())
375 return regno
>= I386_NUM_REGS
;
379 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
384 if (register_size (regcache
->tdesc
, 0) == 8)
386 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
387 if (x86_64_regmap
[i
] != -1)
388 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
390 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
393 int lwpid
= lwpid_of (current_thread
);
395 collect_register_by_name (regcache
, "fs_base", &base
);
396 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_FS
);
398 collect_register_by_name (regcache
, "gs_base", &base
);
399 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_GS
);
406 /* 32-bit inferior registers need to be zero-extended.
407 Callers would read uninitialized memory otherwise. */
408 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
411 for (i
= 0; i
< I386_NUM_REGS
; i
++)
412 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
414 collect_register_by_name (regcache
, "orig_eax",
415 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
418 /* Sign extend EAX value to avoid potential syscall restart
421 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
422 for a detailed explanation. */
423 if (register_size (regcache
->tdesc
, 0) == 4)
425 void *ptr
= ((gdb_byte
*) buf
426 + i386_regmap
[find_regno (regcache
->tdesc
, "eax")]);
428 *(int64_t *) ptr
= *(int32_t *) ptr
;
434 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
439 if (register_size (regcache
->tdesc
, 0) == 8)
441 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
442 if (x86_64_regmap
[i
] != -1)
443 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
445 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
448 int lwpid
= lwpid_of (current_thread
);
450 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
451 supply_register_by_name (regcache
, "fs_base", &base
);
453 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_GS
) == 0)
454 supply_register_by_name (regcache
, "gs_base", &base
);
461 for (i
= 0; i
< I386_NUM_REGS
; i
++)
462 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
464 supply_register_by_name (regcache
, "orig_eax",
465 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
469 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
472 i387_cache_to_fxsave (regcache
, buf
);
474 i387_cache_to_fsave (regcache
, buf
);
479 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
482 i387_fxsave_to_cache (regcache
, buf
);
484 i387_fsave_to_cache (regcache
, buf
);
491 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
493 i387_cache_to_fxsave (regcache
, buf
);
497 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
499 i387_fxsave_to_cache (regcache
, buf
);
505 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
507 i387_cache_to_xsave (regcache
, buf
);
511 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
513 i387_xsave_to_cache (regcache
, buf
);
516 /* ??? The non-biarch i386 case stores all the i387 regs twice.
517 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
518 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
519 doesn't work. IWBN to avoid the duplication in the case where it
520 does work. Maybe the arch_setup routine could check whether it works
521 and update the supported regsets accordingly. */
523 static struct regset_info x86_regsets
[] =
525 #ifdef HAVE_PTRACE_GETREGS
526 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
528 x86_fill_gregset
, x86_store_gregset
},
529 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
530 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
532 # ifdef HAVE_PTRACE_GETFPXREGS
533 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
535 x86_fill_fpxregset
, x86_store_fpxregset
},
538 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
540 x86_fill_fpregset
, x86_store_fpregset
},
541 #endif /* HAVE_PTRACE_GETREGS */
546 x86_target::low_supports_breakpoints ()
552 x86_target::low_get_pc (regcache
*regcache
)
554 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
560 collect_register_by_name (regcache
, "rip", &pc
);
561 return (CORE_ADDR
) pc
;
567 collect_register_by_name (regcache
, "eip", &pc
);
568 return (CORE_ADDR
) pc
;
573 x86_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
575 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
581 supply_register_by_name (regcache
, "rip", &newpc
);
587 supply_register_by_name (regcache
, "eip", &newpc
);
592 x86_target::low_decr_pc_after_break ()
598 static const gdb_byte x86_breakpoint
[] = { 0xCC };
599 #define x86_breakpoint_len 1
602 x86_target::low_breakpoint_at (CORE_ADDR pc
)
606 read_memory (pc
, &c
, 1);
613 /* Low-level function vector. */
614 struct x86_dr_low_type x86_dr_low
=
616 x86_linux_dr_set_control
,
617 x86_linux_dr_set_addr
,
618 x86_linux_dr_get_addr
,
619 x86_linux_dr_get_status
,
620 x86_linux_dr_get_control
,
624 /* Breakpoint/Watchpoint support. */
627 x86_target::supports_z_point_type (char z_type
)
633 case Z_PACKET_WRITE_WP
:
634 case Z_PACKET_ACCESS_WP
:
642 x86_target::low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
643 int size
, raw_breakpoint
*bp
)
645 struct process_info
*proc
= current_process ();
649 case raw_bkpt_type_hw
:
650 case raw_bkpt_type_write_wp
:
651 case raw_bkpt_type_access_wp
:
653 enum target_hw_bp_type hw_type
654 = raw_bkpt_type_to_target_hw_bp_type (type
);
655 struct x86_debug_reg_state
*state
656 = &proc
->priv
->arch_private
->debug_reg_state
;
658 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
668 x86_target::low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
669 int size
, raw_breakpoint
*bp
)
671 struct process_info
*proc
= current_process ();
675 case raw_bkpt_type_hw
:
676 case raw_bkpt_type_write_wp
:
677 case raw_bkpt_type_access_wp
:
679 enum target_hw_bp_type hw_type
680 = raw_bkpt_type_to_target_hw_bp_type (type
);
681 struct x86_debug_reg_state
*state
682 = &proc
->priv
->arch_private
->debug_reg_state
;
684 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
693 x86_target::low_stopped_by_watchpoint ()
695 struct process_info
*proc
= current_process ();
696 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
700 x86_target::low_stopped_data_address ()
702 struct process_info
*proc
= current_process ();
704 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
710 /* Called when a new process is created. */
713 x86_target::low_new_process ()
715 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
717 x86_low_init_dregs (&info
->debug_reg_state
);
722 /* Called when a process is being deleted. */
725 x86_target::low_delete_process (arch_process_info
*info
)
731 x86_target::low_new_thread (lwp_info
*lwp
)
733 /* This comes from nat/. */
734 x86_linux_new_thread (lwp
);
738 x86_target::low_delete_thread (arch_lwp_info
*alwp
)
740 /* This comes from nat/. */
741 x86_linux_delete_thread (alwp
);
744 /* Target routine for new_fork. */
747 x86_target::low_new_fork (process_info
*parent
, process_info
*child
)
749 /* These are allocated by linux_add_process. */
750 gdb_assert (parent
->priv
!= NULL
751 && parent
->priv
->arch_private
!= NULL
);
752 gdb_assert (child
->priv
!= NULL
753 && child
->priv
->arch_private
!= NULL
);
755 /* Linux kernel before 2.6.33 commit
756 72f674d203cd230426437cdcf7dd6f681dad8b0d
757 will inherit hardware debug registers from parent
758 on fork/vfork/clone. Newer Linux kernels create such tasks with
759 zeroed debug registers.
761 GDB core assumes the child inherits the watchpoints/hw
762 breakpoints of the parent, and will remove them all from the
763 forked off process. Copy the debug registers mirrors into the
764 new process so that all breakpoints and watchpoints can be
765 removed together. The debug registers mirror will become zeroed
766 in the end before detaching the forked off process, thus making
767 this compatible with older Linux kernels too. */
769 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
773 x86_target::low_prepare_to_resume (lwp_info
*lwp
)
775 /* This comes from nat/. */
776 x86_linux_prepare_to_resume (lwp
);
779 /* See nat/x86-dregs.h. */
781 struct x86_debug_reg_state
*
782 x86_debug_reg_state (pid_t pid
)
784 struct process_info
*proc
= find_process_pid (pid
);
786 return &proc
->priv
->arch_private
->debug_reg_state
;
789 /* When GDBSERVER is built as a 64-bit application on linux, the
790 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
791 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
792 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
793 conversion in-place ourselves. */
795 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
796 layout of the inferiors' architecture. Returns true if any
797 conversion was done; false otherwise. If DIRECTION is 1, then copy
798 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
802 x86_target::low_siginfo_fixup (siginfo_t
*ptrace
, gdb_byte
*inf
, int direction
)
805 unsigned int machine
;
806 int tid
= lwpid_of (current_thread
);
807 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
809 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
810 if (!is_64bit_tdesc ())
811 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
813 /* No fixup for native x32 GDB. */
814 else if (!is_elf64
&& sizeof (void *) == 8)
815 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
824 /* Format of XSAVE extended state is:
828 sw_usable_bytes[464..511]
829 xstate_hdr_bytes[512..575]
834 Same memory layout will be used for the coredump NT_X86_XSTATE
835 representing the XSAVE extended state registers.
837 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
838 extended state mask, which is the same as the extended control register
839 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
840 together with the mask saved in the xstate_hdr_bytes to determine what
841 states the processor/OS supports and what state, used or initialized,
842 the process/thread is in. */
843 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
845 /* Does the current host support the GETFPXREGS request? The header
846 file may or may not define it, and even if it is defined, the
847 kernel will return EIO if it's running on a pre-SSE processor. */
848 int have_ptrace_getfpxregs
=
849 #ifdef HAVE_PTRACE_GETFPXREGS
856 /* Get Linux/x86 target description from running target. */
858 static const struct target_desc
*
859 x86_linux_read_description (void)
861 unsigned int machine
;
865 static uint64_t xcr0
;
866 struct regset_info
*regset
;
868 tid
= lwpid_of (current_thread
);
870 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
872 if (sizeof (void *) == 4)
875 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
877 else if (machine
== EM_X86_64
)
878 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
882 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
883 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
885 elf_fpxregset_t fpxregs
;
887 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
889 have_ptrace_getfpxregs
= 0;
890 have_ptrace_getregset
= 0;
891 return i386_linux_read_description (X86_XSTATE_X87
);
894 have_ptrace_getfpxregs
= 1;
900 x86_xcr0
= X86_XSTATE_SSE_MASK
;
904 if (machine
== EM_X86_64
)
905 return tdesc_amd64_linux_no_xml
;
908 return tdesc_i386_linux_no_xml
;
911 if (have_ptrace_getregset
== -1)
913 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
916 iov
.iov_base
= xstateregs
;
917 iov
.iov_len
= sizeof (xstateregs
);
919 /* Check if PTRACE_GETREGSET works. */
920 if (ptrace (PTRACE_GETREGSET
, tid
,
921 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
922 have_ptrace_getregset
= 0;
925 have_ptrace_getregset
= 1;
927 /* Get XCR0 from XSAVE extended state. */
928 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
929 / sizeof (uint64_t))];
931 /* Use PTRACE_GETREGSET if it is available. */
932 for (regset
= x86_regsets
;
933 regset
->fill_function
!= NULL
; regset
++)
934 if (regset
->get_request
== PTRACE_GETREGSET
)
935 regset
->size
= X86_XSTATE_SIZE (xcr0
);
936 else if (regset
->type
!= GENERAL_REGS
)
941 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
942 xcr0_features
= (have_ptrace_getregset
943 && (xcr0
& X86_XSTATE_ALL_MASK
));
948 if (machine
== EM_X86_64
)
951 const target_desc
*tdesc
= NULL
;
955 tdesc
= amd64_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
,
960 tdesc
= amd64_linux_read_description (X86_XSTATE_SSE_MASK
, !is_elf64
);
966 const target_desc
*tdesc
= NULL
;
969 tdesc
= i386_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
);
972 tdesc
= i386_linux_read_description (X86_XSTATE_SSE
);
977 gdb_assert_not_reached ("failed to return tdesc");
980 /* Update all the target description of all processes; a new GDB
981 connected, and it may or not support xml target descriptions. */
984 x86_target::update_xmltarget ()
986 struct thread_info
*saved_thread
= current_thread
;
988 /* Before changing the register cache's internal layout, flush the
989 contents of the current valid caches back to the threads, and
990 release the current regcache objects. */
993 for_each_process ([this] (process_info
*proc
) {
996 /* Look up any thread of this process. */
997 current_thread
= find_any_thread_of_pid (pid
);
1002 current_thread
= saved_thread
;
1005 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1006 PTRACE_GETREGSET. */
1009 x86_target::process_qsupported (char **features
, int count
)
1013 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1014 with "i386" in qSupported query, it supports x86 XML target
1017 for (i
= 0; i
< count
; i
++)
1019 const char *feature
= features
[i
];
1021 if (startswith (feature
, "xmlRegisters="))
1023 char *copy
= xstrdup (feature
+ 13);
1026 for (char *p
= strtok_r (copy
, ",", &saveptr
);
1028 p
= strtok_r (NULL
, ",", &saveptr
))
1030 if (strcmp (p
, "i386") == 0)
1040 update_xmltarget ();
1043 /* Common for x86/x86-64. */
1045 static struct regsets_info x86_regsets_info
=
1047 x86_regsets
, /* regsets */
1048 0, /* num_regsets */
1049 NULL
, /* disabled_regsets */
1053 static struct regs_info amd64_linux_regs_info
=
1055 NULL
, /* regset_bitmap */
1056 NULL
, /* usrregs_info */
1060 static struct usrregs_info i386_linux_usrregs_info
=
1066 static struct regs_info i386_linux_regs_info
=
1068 NULL
, /* regset_bitmap */
1069 &i386_linux_usrregs_info
,
1074 x86_target::get_regs_info ()
1077 if (is_64bit_tdesc ())
1078 return &amd64_linux_regs_info
;
1081 return &i386_linux_regs_info
;
1084 /* Initialize the target description for the architecture of the
1088 x86_target::low_arch_setup ()
1090 current_process ()->tdesc
= x86_linux_read_description ();
1093 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1094 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1097 x86_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
1099 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
1105 collect_register_by_name (regcache
, "orig_rax", &l_sysno
);
1106 *sysno
= (int) l_sysno
;
1109 collect_register_by_name (regcache
, "orig_eax", sysno
);
1113 x86_supports_tracepoints (void)
1119 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1121 target_write_memory (*to
, buf
, len
);
1126 push_opcode (unsigned char *buf
, const char *op
)
1128 unsigned char *buf_org
= buf
;
1133 unsigned long ul
= strtoul (op
, &endptr
, 16);
1142 return buf
- buf_org
;
1147 /* Build a jump pad that saves registers and calls a collection
1148 function. Writes a jump instruction to the jump pad to
1149 JJUMPAD_INSN. The caller is responsible to write it in at the
1150 tracepoint address. */
1153 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1154 CORE_ADDR collector
,
1157 CORE_ADDR
*jump_entry
,
1158 CORE_ADDR
*trampoline
,
1159 ULONGEST
*trampoline_size
,
1160 unsigned char *jjump_pad_insn
,
1161 ULONGEST
*jjump_pad_insn_size
,
1162 CORE_ADDR
*adjusted_insn_addr
,
1163 CORE_ADDR
*adjusted_insn_addr_end
,
1166 unsigned char buf
[40];
1170 CORE_ADDR buildaddr
= *jump_entry
;
1172 /* Build the jump pad. */
1174 /* First, do tracepoint data collection. Save registers. */
1176 /* Need to ensure stack pointer saved first. */
1177 buf
[i
++] = 0x54; /* push %rsp */
1178 buf
[i
++] = 0x55; /* push %rbp */
1179 buf
[i
++] = 0x57; /* push %rdi */
1180 buf
[i
++] = 0x56; /* push %rsi */
1181 buf
[i
++] = 0x52; /* push %rdx */
1182 buf
[i
++] = 0x51; /* push %rcx */
1183 buf
[i
++] = 0x53; /* push %rbx */
1184 buf
[i
++] = 0x50; /* push %rax */
1185 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1186 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1187 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1188 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1189 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1190 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1191 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1192 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1193 buf
[i
++] = 0x9c; /* pushfq */
1194 buf
[i
++] = 0x48; /* movabs <addr>,%rdi */
1196 memcpy (buf
+ i
, &tpaddr
, 8);
1198 buf
[i
++] = 0x57; /* push %rdi */
1199 append_insns (&buildaddr
, i
, buf
);
1201 /* Stack space for the collecting_t object. */
1203 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1204 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1205 memcpy (buf
+ i
, &tpoint
, 8);
1207 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1208 i
+= push_opcode (&buf
[i
],
1209 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1210 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1211 append_insns (&buildaddr
, i
, buf
);
1215 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1216 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1218 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1219 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1220 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1221 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1222 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1223 append_insns (&buildaddr
, i
, buf
);
1225 /* Set up the gdb_collect call. */
1226 /* At this point, (stack pointer + 0x18) is the base of our saved
1230 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1231 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1233 /* tpoint address may be 64-bit wide. */
1234 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1235 memcpy (buf
+ i
, &tpoint
, 8);
1237 append_insns (&buildaddr
, i
, buf
);
1239 /* The collector function being in the shared library, may be
1240 >31-bits away off the jump pad. */
1242 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1243 memcpy (buf
+ i
, &collector
, 8);
1245 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1246 append_insns (&buildaddr
, i
, buf
);
1248 /* Clear the spin-lock. */
1250 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1251 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1252 memcpy (buf
+ i
, &lockaddr
, 8);
1254 append_insns (&buildaddr
, i
, buf
);
1256 /* Remove stack that had been used for the collect_t object. */
1258 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1259 append_insns (&buildaddr
, i
, buf
);
1261 /* Restore register state. */
1263 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1267 buf
[i
++] = 0x9d; /* popfq */
1268 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1269 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1270 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1271 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1272 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1273 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1274 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1275 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1276 buf
[i
++] = 0x58; /* pop %rax */
1277 buf
[i
++] = 0x5b; /* pop %rbx */
1278 buf
[i
++] = 0x59; /* pop %rcx */
1279 buf
[i
++] = 0x5a; /* pop %rdx */
1280 buf
[i
++] = 0x5e; /* pop %rsi */
1281 buf
[i
++] = 0x5f; /* pop %rdi */
1282 buf
[i
++] = 0x5d; /* pop %rbp */
1283 buf
[i
++] = 0x5c; /* pop %rsp */
1284 append_insns (&buildaddr
, i
, buf
);
1286 /* Now, adjust the original instruction to execute in the jump
1288 *adjusted_insn_addr
= buildaddr
;
1289 relocate_instruction (&buildaddr
, tpaddr
);
1290 *adjusted_insn_addr_end
= buildaddr
;
1292 /* Finally, write a jump back to the program. */
1294 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1295 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1298 "E.Jump back from jump pad too far from tracepoint "
1299 "(offset 0x%" PRIx64
" > int32).", loffset
);
1303 offset
= (int) loffset
;
1304 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1305 memcpy (buf
+ 1, &offset
, 4);
1306 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1308 /* The jump pad is now built. Wire in a jump to our jump pad. This
1309 is always done last (by our caller actually), so that we can
1310 install fast tracepoints with threads running. This relies on
1311 the agent's atomic write support. */
1312 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1313 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1316 "E.Jump pad too far from tracepoint "
1317 "(offset 0x%" PRIx64
" > int32).", loffset
);
1321 offset
= (int) loffset
;
1323 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1324 memcpy (buf
+ 1, &offset
, 4);
1325 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1326 *jjump_pad_insn_size
= sizeof (jump_insn
);
1328 /* Return the end address of our pad. */
1329 *jump_entry
= buildaddr
;
1334 #endif /* __x86_64__ */
1336 /* Build a jump pad that saves registers and calls a collection
1337 function. Writes a jump instruction to the jump pad to
1338 JJUMPAD_INSN. The caller is responsible to write it in at the
1339 tracepoint address. */
1342 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1343 CORE_ADDR collector
,
1346 CORE_ADDR
*jump_entry
,
1347 CORE_ADDR
*trampoline
,
1348 ULONGEST
*trampoline_size
,
1349 unsigned char *jjump_pad_insn
,
1350 ULONGEST
*jjump_pad_insn_size
,
1351 CORE_ADDR
*adjusted_insn_addr
,
1352 CORE_ADDR
*adjusted_insn_addr_end
,
1355 unsigned char buf
[0x100];
1357 CORE_ADDR buildaddr
= *jump_entry
;
1359 /* Build the jump pad. */
1361 /* First, do tracepoint data collection. Save registers. */
1363 buf
[i
++] = 0x60; /* pushad */
1364 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1365 *((int *)(buf
+ i
)) = (int) tpaddr
;
1367 buf
[i
++] = 0x9c; /* pushf */
1368 buf
[i
++] = 0x1e; /* push %ds */
1369 buf
[i
++] = 0x06; /* push %es */
1370 buf
[i
++] = 0x0f; /* push %fs */
1372 buf
[i
++] = 0x0f; /* push %gs */
1374 buf
[i
++] = 0x16; /* push %ss */
1375 buf
[i
++] = 0x0e; /* push %cs */
1376 append_insns (&buildaddr
, i
, buf
);
1378 /* Stack space for the collecting_t object. */
1380 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1382 /* Build the object. */
1383 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1384 memcpy (buf
+ i
, &tpoint
, 4);
1386 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1388 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1389 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1390 append_insns (&buildaddr
, i
, buf
);
1392 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1393 If we cared for it, this could be using xchg alternatively. */
1396 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1397 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1399 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1401 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1402 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1403 append_insns (&buildaddr
, i
, buf
);
1406 /* Set up arguments to the gdb_collect call. */
1408 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1409 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1410 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1411 append_insns (&buildaddr
, i
, buf
);
1414 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1415 append_insns (&buildaddr
, i
, buf
);
1418 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1419 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1421 append_insns (&buildaddr
, i
, buf
);
1423 buf
[0] = 0xe8; /* call <reladdr> */
1424 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1425 memcpy (buf
+ 1, &offset
, 4);
1426 append_insns (&buildaddr
, 5, buf
);
1427 /* Clean up after the call. */
1428 buf
[0] = 0x83; /* add $0x8,%esp */
1431 append_insns (&buildaddr
, 3, buf
);
1434 /* Clear the spin-lock. This would need the LOCK prefix on older
1437 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1438 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1439 memcpy (buf
+ i
, &lockaddr
, 4);
1441 append_insns (&buildaddr
, i
, buf
);
1444 /* Remove stack that had been used for the collect_t object. */
1446 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1447 append_insns (&buildaddr
, i
, buf
);
1450 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1453 buf
[i
++] = 0x17; /* pop %ss */
1454 buf
[i
++] = 0x0f; /* pop %gs */
1456 buf
[i
++] = 0x0f; /* pop %fs */
1458 buf
[i
++] = 0x07; /* pop %es */
1459 buf
[i
++] = 0x1f; /* pop %ds */
1460 buf
[i
++] = 0x9d; /* popf */
1461 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1464 buf
[i
++] = 0x61; /* popad */
1465 append_insns (&buildaddr
, i
, buf
);
1467 /* Now, adjust the original instruction to execute in the jump
1469 *adjusted_insn_addr
= buildaddr
;
1470 relocate_instruction (&buildaddr
, tpaddr
);
1471 *adjusted_insn_addr_end
= buildaddr
;
1473 /* Write the jump back to the program. */
1474 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1475 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1476 memcpy (buf
+ 1, &offset
, 4);
1477 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1479 /* The jump pad is now built. Wire in a jump to our jump pad. This
1480 is always done last (by our caller actually), so that we can
1481 install fast tracepoints with threads running. This relies on
1482 the agent's atomic write support. */
1485 /* Create a trampoline. */
1486 *trampoline_size
= sizeof (jump_insn
);
1487 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1489 /* No trampoline space available. */
1491 "E.Cannot allocate trampoline space needed for fast "
1492 "tracepoints on 4-byte instructions.");
1496 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1497 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1498 memcpy (buf
+ 1, &offset
, 4);
1499 target_write_memory (*trampoline
, buf
, sizeof (jump_insn
));
1501 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1502 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1503 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1504 memcpy (buf
+ 2, &offset
, 2);
1505 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1506 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1510 /* Else use a 32-bit relative jump instruction. */
1511 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1512 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1513 memcpy (buf
+ 1, &offset
, 4);
1514 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1515 *jjump_pad_insn_size
= sizeof (jump_insn
);
1518 /* Return the end address of our pad. */
1519 *jump_entry
= buildaddr
;
1525 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1526 CORE_ADDR collector
,
1529 CORE_ADDR
*jump_entry
,
1530 CORE_ADDR
*trampoline
,
1531 ULONGEST
*trampoline_size
,
1532 unsigned char *jjump_pad_insn
,
1533 ULONGEST
*jjump_pad_insn_size
,
1534 CORE_ADDR
*adjusted_insn_addr
,
1535 CORE_ADDR
*adjusted_insn_addr_end
,
1539 if (is_64bit_tdesc ())
1540 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1541 collector
, lockaddr
,
1542 orig_size
, jump_entry
,
1543 trampoline
, trampoline_size
,
1545 jjump_pad_insn_size
,
1547 adjusted_insn_addr_end
,
1551 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1552 collector
, lockaddr
,
1553 orig_size
, jump_entry
,
1554 trampoline
, trampoline_size
,
1556 jjump_pad_insn_size
,
1558 adjusted_insn_addr_end
,
1562 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1566 x86_get_min_fast_tracepoint_insn_len (void)
1568 static int warned_about_fast_tracepoints
= 0;
1571 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1572 used for fast tracepoints. */
1573 if (is_64bit_tdesc ())
1577 if (agent_loaded_p ())
1579 char errbuf
[IPA_BUFSIZ
];
1583 /* On x86, if trampolines are available, then 4-byte jump instructions
1584 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1585 with a 4-byte offset are used instead. */
1586 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1590 /* GDB has no channel to explain to user why a shorter fast
1591 tracepoint is not possible, but at least make GDBserver
1592 mention that something has gone awry. */
1593 if (!warned_about_fast_tracepoints
)
1595 warning ("4-byte fast tracepoints not available; %s", errbuf
);
1596 warned_about_fast_tracepoints
= 1;
1603 /* Indicate that the minimum length is currently unknown since the IPA
1604 has not loaded yet. */
1610 add_insns (unsigned char *start
, int len
)
1612 CORE_ADDR buildaddr
= current_insn_ptr
;
1615 debug_printf ("Adding %d bytes of insn at %s\n",
1616 len
, paddress (buildaddr
));
1618 append_insns (&buildaddr
, len
, start
);
1619 current_insn_ptr
= buildaddr
;
1622 /* Our general strategy for emitting code is to avoid specifying raw
1623 bytes whenever possible, and instead copy a block of inline asm
1624 that is embedded in the function. This is a little messy, because
1625 we need to keep the compiler from discarding what looks like dead
1626 code, plus suppress various warnings. */
1628 #define EMIT_ASM(NAME, INSNS) \
1631 extern unsigned char start_ ## NAME, end_ ## NAME; \
1632 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1633 __asm__ ("jmp end_" #NAME "\n" \
1634 "\t" "start_" #NAME ":" \
1636 "\t" "end_" #NAME ":"); \
1641 #define EMIT_ASM32(NAME,INSNS) \
1644 extern unsigned char start_ ## NAME, end_ ## NAME; \
1645 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1646 __asm__ (".code32\n" \
1647 "\t" "jmp end_" #NAME "\n" \
1648 "\t" "start_" #NAME ":\n" \
1650 "\t" "end_" #NAME ":\n" \
1656 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1663 amd64_emit_prologue (void)
1665 EMIT_ASM (amd64_prologue
,
1667 "movq %rsp,%rbp\n\t"
1668 "sub $0x20,%rsp\n\t"
1669 "movq %rdi,-8(%rbp)\n\t"
1670 "movq %rsi,-16(%rbp)");
1675 amd64_emit_epilogue (void)
1677 EMIT_ASM (amd64_epilogue
,
1678 "movq -16(%rbp),%rdi\n\t"
1679 "movq %rax,(%rdi)\n\t"
1686 amd64_emit_add (void)
1688 EMIT_ASM (amd64_add
,
1689 "add (%rsp),%rax\n\t"
1690 "lea 0x8(%rsp),%rsp");
1694 amd64_emit_sub (void)
1696 EMIT_ASM (amd64_sub
,
1697 "sub %rax,(%rsp)\n\t"
1702 amd64_emit_mul (void)
1708 amd64_emit_lsh (void)
1714 amd64_emit_rsh_signed (void)
1720 amd64_emit_rsh_unsigned (void)
1726 amd64_emit_ext (int arg
)
1731 EMIT_ASM (amd64_ext_8
,
1737 EMIT_ASM (amd64_ext_16
,
1742 EMIT_ASM (amd64_ext_32
,
1751 amd64_emit_log_not (void)
1753 EMIT_ASM (amd64_log_not
,
1754 "test %rax,%rax\n\t"
1760 amd64_emit_bit_and (void)
1762 EMIT_ASM (amd64_and
,
1763 "and (%rsp),%rax\n\t"
1764 "lea 0x8(%rsp),%rsp");
1768 amd64_emit_bit_or (void)
1771 "or (%rsp),%rax\n\t"
1772 "lea 0x8(%rsp),%rsp");
1776 amd64_emit_bit_xor (void)
1778 EMIT_ASM (amd64_xor
,
1779 "xor (%rsp),%rax\n\t"
1780 "lea 0x8(%rsp),%rsp");
1784 amd64_emit_bit_not (void)
1786 EMIT_ASM (amd64_bit_not
,
1787 "xorq $0xffffffffffffffff,%rax");
1791 amd64_emit_equal (void)
1793 EMIT_ASM (amd64_equal
,
1794 "cmp %rax,(%rsp)\n\t"
1795 "je .Lamd64_equal_true\n\t"
1797 "jmp .Lamd64_equal_end\n\t"
1798 ".Lamd64_equal_true:\n\t"
1800 ".Lamd64_equal_end:\n\t"
1801 "lea 0x8(%rsp),%rsp");
1805 amd64_emit_less_signed (void)
1807 EMIT_ASM (amd64_less_signed
,
1808 "cmp %rax,(%rsp)\n\t"
1809 "jl .Lamd64_less_signed_true\n\t"
1811 "jmp .Lamd64_less_signed_end\n\t"
1812 ".Lamd64_less_signed_true:\n\t"
1814 ".Lamd64_less_signed_end:\n\t"
1815 "lea 0x8(%rsp),%rsp");
1819 amd64_emit_less_unsigned (void)
1821 EMIT_ASM (amd64_less_unsigned
,
1822 "cmp %rax,(%rsp)\n\t"
1823 "jb .Lamd64_less_unsigned_true\n\t"
1825 "jmp .Lamd64_less_unsigned_end\n\t"
1826 ".Lamd64_less_unsigned_true:\n\t"
1828 ".Lamd64_less_unsigned_end:\n\t"
1829 "lea 0x8(%rsp),%rsp");
1833 amd64_emit_ref (int size
)
1838 EMIT_ASM (amd64_ref1
,
1842 EMIT_ASM (amd64_ref2
,
1846 EMIT_ASM (amd64_ref4
,
1847 "movl (%rax),%eax");
1850 EMIT_ASM (amd64_ref8
,
1851 "movq (%rax),%rax");
1857 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1859 EMIT_ASM (amd64_if_goto
,
1863 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1871 amd64_emit_goto (int *offset_p
, int *size_p
)
1873 EMIT_ASM (amd64_goto
,
1874 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1882 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1884 int diff
= (to
- (from
+ size
));
1885 unsigned char buf
[sizeof (int)];
1893 memcpy (buf
, &diff
, sizeof (int));
1894 target_write_memory (from
, buf
, sizeof (int));
1898 amd64_emit_const (LONGEST num
)
1900 unsigned char buf
[16];
1902 CORE_ADDR buildaddr
= current_insn_ptr
;
1905 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1906 memcpy (&buf
[i
], &num
, sizeof (num
));
1908 append_insns (&buildaddr
, i
, buf
);
1909 current_insn_ptr
= buildaddr
;
1913 amd64_emit_call (CORE_ADDR fn
)
1915 unsigned char buf
[16];
1917 CORE_ADDR buildaddr
;
1920 /* The destination function being in the shared library, may be
1921 >31-bits away off the compiled code pad. */
1923 buildaddr
= current_insn_ptr
;
1925 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1929 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1931 /* Offset is too large for a call. Use callq, but that requires
1932 a register, so avoid it if possible. Use r10, since it is
1933 call-clobbered, we don't have to push/pop it. */
1934 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1936 memcpy (buf
+ i
, &fn
, 8);
1938 buf
[i
++] = 0xff; /* callq *%r10 */
1943 int offset32
= offset64
; /* we know we can't overflow here. */
1945 buf
[i
++] = 0xe8; /* call <reladdr> */
1946 memcpy (buf
+ i
, &offset32
, 4);
1950 append_insns (&buildaddr
, i
, buf
);
1951 current_insn_ptr
= buildaddr
;
1955 amd64_emit_reg (int reg
)
1957 unsigned char buf
[16];
1959 CORE_ADDR buildaddr
;
1961 /* Assume raw_regs is still in %rdi. */
1962 buildaddr
= current_insn_ptr
;
1964 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1965 memcpy (&buf
[i
], ®
, sizeof (reg
));
1967 append_insns (&buildaddr
, i
, buf
);
1968 current_insn_ptr
= buildaddr
;
1969 amd64_emit_call (get_raw_reg_func_addr ());
1973 amd64_emit_pop (void)
1975 EMIT_ASM (amd64_pop
,
1980 amd64_emit_stack_flush (void)
1982 EMIT_ASM (amd64_stack_flush
,
1987 amd64_emit_zero_ext (int arg
)
1992 EMIT_ASM (amd64_zero_ext_8
,
1996 EMIT_ASM (amd64_zero_ext_16
,
1997 "and $0xffff,%rax");
2000 EMIT_ASM (amd64_zero_ext_32
,
2001 "mov $0xffffffff,%rcx\n\t"
2010 amd64_emit_swap (void)
2012 EMIT_ASM (amd64_swap
,
2019 amd64_emit_stack_adjust (int n
)
2021 unsigned char buf
[16];
2023 CORE_ADDR buildaddr
= current_insn_ptr
;
2026 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2030 /* This only handles adjustments up to 16, but we don't expect any more. */
2032 append_insns (&buildaddr
, i
, buf
);
2033 current_insn_ptr
= buildaddr
;
2036 /* FN's prototype is `LONGEST(*fn)(int)'. */
2039 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2041 unsigned char buf
[16];
2043 CORE_ADDR buildaddr
;
2045 buildaddr
= current_insn_ptr
;
2047 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2048 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2050 append_insns (&buildaddr
, i
, buf
);
2051 current_insn_ptr
= buildaddr
;
2052 amd64_emit_call (fn
);
2055 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2058 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2060 unsigned char buf
[16];
2062 CORE_ADDR buildaddr
;
2064 buildaddr
= current_insn_ptr
;
2066 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2067 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2069 append_insns (&buildaddr
, i
, buf
);
2070 current_insn_ptr
= buildaddr
;
2071 EMIT_ASM (amd64_void_call_2_a
,
2072 /* Save away a copy of the stack top. */
2074 /* Also pass top as the second argument. */
2076 amd64_emit_call (fn
);
2077 EMIT_ASM (amd64_void_call_2_b
,
2078 /* Restore the stack top, %rax may have been trashed. */
2083 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2086 "cmp %rax,(%rsp)\n\t"
2087 "jne .Lamd64_eq_fallthru\n\t"
2088 "lea 0x8(%rsp),%rsp\n\t"
2090 /* jmp, but don't trust the assembler to choose the right jump */
2091 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2092 ".Lamd64_eq_fallthru:\n\t"
2093 "lea 0x8(%rsp),%rsp\n\t"
2103 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2106 "cmp %rax,(%rsp)\n\t"
2107 "je .Lamd64_ne_fallthru\n\t"
2108 "lea 0x8(%rsp),%rsp\n\t"
2110 /* jmp, but don't trust the assembler to choose the right jump */
2111 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2112 ".Lamd64_ne_fallthru:\n\t"
2113 "lea 0x8(%rsp),%rsp\n\t"
2123 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2126 "cmp %rax,(%rsp)\n\t"
2127 "jnl .Lamd64_lt_fallthru\n\t"
2128 "lea 0x8(%rsp),%rsp\n\t"
2130 /* jmp, but don't trust the assembler to choose the right jump */
2131 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2132 ".Lamd64_lt_fallthru:\n\t"
2133 "lea 0x8(%rsp),%rsp\n\t"
2143 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2146 "cmp %rax,(%rsp)\n\t"
2147 "jnle .Lamd64_le_fallthru\n\t"
2148 "lea 0x8(%rsp),%rsp\n\t"
2150 /* jmp, but don't trust the assembler to choose the right jump */
2151 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2152 ".Lamd64_le_fallthru:\n\t"
2153 "lea 0x8(%rsp),%rsp\n\t"
2163 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2166 "cmp %rax,(%rsp)\n\t"
2167 "jng .Lamd64_gt_fallthru\n\t"
2168 "lea 0x8(%rsp),%rsp\n\t"
2170 /* jmp, but don't trust the assembler to choose the right jump */
2171 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2172 ".Lamd64_gt_fallthru:\n\t"
2173 "lea 0x8(%rsp),%rsp\n\t"
2183 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2186 "cmp %rax,(%rsp)\n\t"
2187 "jnge .Lamd64_ge_fallthru\n\t"
2188 ".Lamd64_ge_jump:\n\t"
2189 "lea 0x8(%rsp),%rsp\n\t"
2191 /* jmp, but don't trust the assembler to choose the right jump */
2192 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2193 ".Lamd64_ge_fallthru:\n\t"
2194 "lea 0x8(%rsp),%rsp\n\t"
2203 struct emit_ops amd64_emit_ops
=
2205 amd64_emit_prologue
,
2206 amd64_emit_epilogue
,
2211 amd64_emit_rsh_signed
,
2212 amd64_emit_rsh_unsigned
,
2220 amd64_emit_less_signed
,
2221 amd64_emit_less_unsigned
,
2225 amd64_write_goto_address
,
2230 amd64_emit_stack_flush
,
2231 amd64_emit_zero_ext
,
2233 amd64_emit_stack_adjust
,
2234 amd64_emit_int_call_1
,
2235 amd64_emit_void_call_2
,
2244 #endif /* __x86_64__ */
2247 i386_emit_prologue (void)
2249 EMIT_ASM32 (i386_prologue
,
2253 /* At this point, the raw regs base address is at 8(%ebp), and the
2254 value pointer is at 12(%ebp). */
2258 i386_emit_epilogue (void)
2260 EMIT_ASM32 (i386_epilogue
,
2261 "mov 12(%ebp),%ecx\n\t"
2262 "mov %eax,(%ecx)\n\t"
2263 "mov %ebx,0x4(%ecx)\n\t"
2271 i386_emit_add (void)
2273 EMIT_ASM32 (i386_add
,
2274 "add (%esp),%eax\n\t"
2275 "adc 0x4(%esp),%ebx\n\t"
2276 "lea 0x8(%esp),%esp");
2280 i386_emit_sub (void)
2282 EMIT_ASM32 (i386_sub
,
2283 "subl %eax,(%esp)\n\t"
2284 "sbbl %ebx,4(%esp)\n\t"
2290 i386_emit_mul (void)
2296 i386_emit_lsh (void)
2302 i386_emit_rsh_signed (void)
2308 i386_emit_rsh_unsigned (void)
2314 i386_emit_ext (int arg
)
2319 EMIT_ASM32 (i386_ext_8
,
2322 "movl %eax,%ebx\n\t"
2326 EMIT_ASM32 (i386_ext_16
,
2328 "movl %eax,%ebx\n\t"
2332 EMIT_ASM32 (i386_ext_32
,
2333 "movl %eax,%ebx\n\t"
2342 i386_emit_log_not (void)
2344 EMIT_ASM32 (i386_log_not
,
2346 "test %eax,%eax\n\t"
2353 i386_emit_bit_and (void)
2355 EMIT_ASM32 (i386_and
,
2356 "and (%esp),%eax\n\t"
2357 "and 0x4(%esp),%ebx\n\t"
2358 "lea 0x8(%esp),%esp");
2362 i386_emit_bit_or (void)
2364 EMIT_ASM32 (i386_or
,
2365 "or (%esp),%eax\n\t"
2366 "or 0x4(%esp),%ebx\n\t"
2367 "lea 0x8(%esp),%esp");
2371 i386_emit_bit_xor (void)
2373 EMIT_ASM32 (i386_xor
,
2374 "xor (%esp),%eax\n\t"
2375 "xor 0x4(%esp),%ebx\n\t"
2376 "lea 0x8(%esp),%esp");
2380 i386_emit_bit_not (void)
2382 EMIT_ASM32 (i386_bit_not
,
2383 "xor $0xffffffff,%eax\n\t"
2384 "xor $0xffffffff,%ebx\n\t");
2388 i386_emit_equal (void)
2390 EMIT_ASM32 (i386_equal
,
2391 "cmpl %ebx,4(%esp)\n\t"
2392 "jne .Li386_equal_false\n\t"
2393 "cmpl %eax,(%esp)\n\t"
2394 "je .Li386_equal_true\n\t"
2395 ".Li386_equal_false:\n\t"
2397 "jmp .Li386_equal_end\n\t"
2398 ".Li386_equal_true:\n\t"
2400 ".Li386_equal_end:\n\t"
2402 "lea 0x8(%esp),%esp");
2406 i386_emit_less_signed (void)
2408 EMIT_ASM32 (i386_less_signed
,
2409 "cmpl %ebx,4(%esp)\n\t"
2410 "jl .Li386_less_signed_true\n\t"
2411 "jne .Li386_less_signed_false\n\t"
2412 "cmpl %eax,(%esp)\n\t"
2413 "jl .Li386_less_signed_true\n\t"
2414 ".Li386_less_signed_false:\n\t"
2416 "jmp .Li386_less_signed_end\n\t"
2417 ".Li386_less_signed_true:\n\t"
2419 ".Li386_less_signed_end:\n\t"
2421 "lea 0x8(%esp),%esp");
2425 i386_emit_less_unsigned (void)
2427 EMIT_ASM32 (i386_less_unsigned
,
2428 "cmpl %ebx,4(%esp)\n\t"
2429 "jb .Li386_less_unsigned_true\n\t"
2430 "jne .Li386_less_unsigned_false\n\t"
2431 "cmpl %eax,(%esp)\n\t"
2432 "jb .Li386_less_unsigned_true\n\t"
2433 ".Li386_less_unsigned_false:\n\t"
2435 "jmp .Li386_less_unsigned_end\n\t"
2436 ".Li386_less_unsigned_true:\n\t"
2438 ".Li386_less_unsigned_end:\n\t"
2440 "lea 0x8(%esp),%esp");
2444 i386_emit_ref (int size
)
2449 EMIT_ASM32 (i386_ref1
,
2453 EMIT_ASM32 (i386_ref2
,
2457 EMIT_ASM32 (i386_ref4
,
2458 "movl (%eax),%eax");
2461 EMIT_ASM32 (i386_ref8
,
2462 "movl 4(%eax),%ebx\n\t"
2463 "movl (%eax),%eax");
2469 i386_emit_if_goto (int *offset_p
, int *size_p
)
2471 EMIT_ASM32 (i386_if_goto
,
2477 /* Don't trust the assembler to choose the right jump */
2478 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2481 *offset_p
= 11; /* be sure that this matches the sequence above */
2487 i386_emit_goto (int *offset_p
, int *size_p
)
2489 EMIT_ASM32 (i386_goto
,
2490 /* Don't trust the assembler to choose the right jump */
2491 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2499 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2501 int diff
= (to
- (from
+ size
));
2502 unsigned char buf
[sizeof (int)];
2504 /* We're only doing 4-byte sizes at the moment. */
2511 memcpy (buf
, &diff
, sizeof (int));
2512 target_write_memory (from
, buf
, sizeof (int));
2516 i386_emit_const (LONGEST num
)
2518 unsigned char buf
[16];
2520 CORE_ADDR buildaddr
= current_insn_ptr
;
2523 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2524 lo
= num
& 0xffffffff;
2525 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2527 hi
= ((num
>> 32) & 0xffffffff);
2530 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2531 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2536 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2538 append_insns (&buildaddr
, i
, buf
);
2539 current_insn_ptr
= buildaddr
;
2543 i386_emit_call (CORE_ADDR fn
)
2545 unsigned char buf
[16];
2547 CORE_ADDR buildaddr
;
2549 buildaddr
= current_insn_ptr
;
2551 buf
[i
++] = 0xe8; /* call <reladdr> */
2552 offset
= ((int) fn
) - (buildaddr
+ 5);
2553 memcpy (buf
+ 1, &offset
, 4);
2554 append_insns (&buildaddr
, 5, buf
);
2555 current_insn_ptr
= buildaddr
;
2559 i386_emit_reg (int reg
)
2561 unsigned char buf
[16];
2563 CORE_ADDR buildaddr
;
2565 EMIT_ASM32 (i386_reg_a
,
2567 buildaddr
= current_insn_ptr
;
2569 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2570 memcpy (&buf
[i
], ®
, sizeof (reg
));
2572 append_insns (&buildaddr
, i
, buf
);
2573 current_insn_ptr
= buildaddr
;
2574 EMIT_ASM32 (i386_reg_b
,
2575 "mov %eax,4(%esp)\n\t"
2576 "mov 8(%ebp),%eax\n\t"
2578 i386_emit_call (get_raw_reg_func_addr ());
2579 EMIT_ASM32 (i386_reg_c
,
2581 "lea 0x8(%esp),%esp");
2585 i386_emit_pop (void)
2587 EMIT_ASM32 (i386_pop
,
2593 i386_emit_stack_flush (void)
2595 EMIT_ASM32 (i386_stack_flush
,
2601 i386_emit_zero_ext (int arg
)
2606 EMIT_ASM32 (i386_zero_ext_8
,
2607 "and $0xff,%eax\n\t"
2611 EMIT_ASM32 (i386_zero_ext_16
,
2612 "and $0xffff,%eax\n\t"
2616 EMIT_ASM32 (i386_zero_ext_32
,
2625 i386_emit_swap (void)
2627 EMIT_ASM32 (i386_swap
,
2637 i386_emit_stack_adjust (int n
)
2639 unsigned char buf
[16];
2641 CORE_ADDR buildaddr
= current_insn_ptr
;
2644 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2648 append_insns (&buildaddr
, i
, buf
);
2649 current_insn_ptr
= buildaddr
;
2652 /* FN's prototype is `LONGEST(*fn)(int)'. */
2655 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2657 unsigned char buf
[16];
2659 CORE_ADDR buildaddr
;
2661 EMIT_ASM32 (i386_int_call_1_a
,
2662 /* Reserve a bit of stack space. */
2664 /* Put the one argument on the stack. */
2665 buildaddr
= current_insn_ptr
;
2667 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2670 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2672 append_insns (&buildaddr
, i
, buf
);
2673 current_insn_ptr
= buildaddr
;
2674 i386_emit_call (fn
);
2675 EMIT_ASM32 (i386_int_call_1_c
,
2677 "lea 0x8(%esp),%esp");
2680 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2683 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2685 unsigned char buf
[16];
2687 CORE_ADDR buildaddr
;
2689 EMIT_ASM32 (i386_void_call_2_a
,
2690 /* Preserve %eax only; we don't have to worry about %ebx. */
2692 /* Reserve a bit of stack space for arguments. */
2693 "sub $0x10,%esp\n\t"
2694 /* Copy "top" to the second argument position. (Note that
2695 we can't assume function won't scribble on its
2696 arguments, so don't try to restore from this.) */
2697 "mov %eax,4(%esp)\n\t"
2698 "mov %ebx,8(%esp)");
2699 /* Put the first argument on the stack. */
2700 buildaddr
= current_insn_ptr
;
2702 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2705 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2707 append_insns (&buildaddr
, i
, buf
);
2708 current_insn_ptr
= buildaddr
;
2709 i386_emit_call (fn
);
2710 EMIT_ASM32 (i386_void_call_2_b
,
2711 "lea 0x10(%esp),%esp\n\t"
2712 /* Restore original stack top. */
2718 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2721 /* Check low half first, more likely to be decider */
2722 "cmpl %eax,(%esp)\n\t"
2723 "jne .Leq_fallthru\n\t"
2724 "cmpl %ebx,4(%esp)\n\t"
2725 "jne .Leq_fallthru\n\t"
2726 "lea 0x8(%esp),%esp\n\t"
2729 /* jmp, but don't trust the assembler to choose the right jump */
2730 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2731 ".Leq_fallthru:\n\t"
2732 "lea 0x8(%esp),%esp\n\t"
2743 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2746 /* Check low half first, more likely to be decider */
2747 "cmpl %eax,(%esp)\n\t"
2749 "cmpl %ebx,4(%esp)\n\t"
2750 "je .Lne_fallthru\n\t"
2752 "lea 0x8(%esp),%esp\n\t"
2755 /* jmp, but don't trust the assembler to choose the right jump */
2756 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2757 ".Lne_fallthru:\n\t"
2758 "lea 0x8(%esp),%esp\n\t"
2769 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2772 "cmpl %ebx,4(%esp)\n\t"
2774 "jne .Llt_fallthru\n\t"
2775 "cmpl %eax,(%esp)\n\t"
2776 "jnl .Llt_fallthru\n\t"
2778 "lea 0x8(%esp),%esp\n\t"
2781 /* jmp, but don't trust the assembler to choose the right jump */
2782 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2783 ".Llt_fallthru:\n\t"
2784 "lea 0x8(%esp),%esp\n\t"
2795 i386_emit_le_goto (int *offset_p
, int *size_p
)
2798 "cmpl %ebx,4(%esp)\n\t"
2800 "jne .Lle_fallthru\n\t"
2801 "cmpl %eax,(%esp)\n\t"
2802 "jnle .Lle_fallthru\n\t"
2804 "lea 0x8(%esp),%esp\n\t"
2807 /* jmp, but don't trust the assembler to choose the right jump */
2808 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2809 ".Lle_fallthru:\n\t"
2810 "lea 0x8(%esp),%esp\n\t"
2821 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2824 "cmpl %ebx,4(%esp)\n\t"
2826 "jne .Lgt_fallthru\n\t"
2827 "cmpl %eax,(%esp)\n\t"
2828 "jng .Lgt_fallthru\n\t"
2830 "lea 0x8(%esp),%esp\n\t"
2833 /* jmp, but don't trust the assembler to choose the right jump */
2834 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2835 ".Lgt_fallthru:\n\t"
2836 "lea 0x8(%esp),%esp\n\t"
2847 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2850 "cmpl %ebx,4(%esp)\n\t"
2852 "jne .Lge_fallthru\n\t"
2853 "cmpl %eax,(%esp)\n\t"
2854 "jnge .Lge_fallthru\n\t"
2856 "lea 0x8(%esp),%esp\n\t"
2859 /* jmp, but don't trust the assembler to choose the right jump */
2860 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2861 ".Lge_fallthru:\n\t"
2862 "lea 0x8(%esp),%esp\n\t"
2872 struct emit_ops i386_emit_ops
=
2880 i386_emit_rsh_signed
,
2881 i386_emit_rsh_unsigned
,
2889 i386_emit_less_signed
,
2890 i386_emit_less_unsigned
,
2894 i386_write_goto_address
,
2899 i386_emit_stack_flush
,
2902 i386_emit_stack_adjust
,
2903 i386_emit_int_call_1
,
2904 i386_emit_void_call_2
,
2914 static struct emit_ops
*
2918 if (is_64bit_tdesc ())
2919 return &amd64_emit_ops
;
2922 return &i386_emit_ops
;
2925 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2928 x86_target::sw_breakpoint_from_kind (int kind
, int *size
)
2930 *size
= x86_breakpoint_len
;
2931 return x86_breakpoint
;
2935 x86_supports_range_stepping (void)
2940 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2944 x86_supports_hardware_single_step (void)
2950 x86_get_ipa_tdesc_idx (void)
2952 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
2953 const struct target_desc
*tdesc
= regcache
->tdesc
;
2956 return amd64_get_ipa_tdesc_idx (tdesc
);
2959 if (tdesc
== tdesc_i386_linux_no_xml
)
2960 return X86_TDESC_SSE
;
2962 return i386_get_ipa_tdesc_idx (tdesc
);
2965 /* This is initialized assuming an amd64 target.
2966 x86_arch_setup will correct it for i386 or amd64 targets. */
2968 struct linux_target_ops the_low_target
=
2970 x86_supports_tracepoints
,
2971 x86_get_thread_area
,
2972 x86_install_fast_tracepoint_jump_pad
,
2974 x86_get_min_fast_tracepoint_insn_len
,
2975 x86_supports_range_stepping
,
2976 x86_supports_hardware_single_step
,
2977 x86_get_syscall_trapinfo
,
2978 x86_get_ipa_tdesc_idx
,
2981 /* The linux target ops object. */
2983 linux_process_target
*the_linux_target
= &the_x86_target
;
2986 initialize_low_arch (void)
2988 /* Initialize the Linux target descriptions. */
2990 tdesc_amd64_linux_no_xml
= allocate_target_description ();
2991 copy_target_description (tdesc_amd64_linux_no_xml
,
2992 amd64_linux_read_description (X86_XSTATE_SSE_MASK
,
2994 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
2997 tdesc_i386_linux_no_xml
= allocate_target_description ();
2998 copy_target_description (tdesc_i386_linux_no_xml
,
2999 i386_linux_read_description (X86_XSTATE_SSE_MASK
));
3000 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3002 initialize_regsets_info (&x86_regsets_info
);