1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
31 #include "nat/amd64-linux-siginfo.h"
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
38 #include "elf/common.h"
41 #include "gdbsupport/agent.h"
43 #include "tracepoint.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
51 static struct target_desc
*tdesc_amd64_linux_no_xml
;
53 static struct target_desc
*tdesc_i386_linux_no_xml
;
56 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
59 /* Backward compatibility for gdb without XML support. */
61 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
67 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
74 #include <sys/procfs.h>
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
99 class x86_target
: public linux_process_target
103 /* Update all the target description of all processes; a new GDB
104 connected, and it may or not support xml target descriptions. */
105 void update_xmltarget ();
107 const regs_info
*get_regs_info () override
;
109 const gdb_byte
*sw_breakpoint_from_kind (int kind
, int *size
) override
;
111 bool supports_z_point_type (char z_type
) override
;
115 void low_arch_setup () override
;
117 bool low_cannot_fetch_register (int regno
) override
;
119 bool low_cannot_store_register (int regno
) override
;
121 bool low_supports_breakpoints () override
;
123 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
125 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
127 int low_decr_pc_after_break () override
;
129 bool low_breakpoint_at (CORE_ADDR pc
) override
;
131 int low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
132 int size
, raw_breakpoint
*bp
) override
;
134 int low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
135 int size
, raw_breakpoint
*bp
) override
;
137 bool low_stopped_by_watchpoint () override
;
139 CORE_ADDR
low_stopped_data_address () override
;
141 /* collect_ptrace_register/supply_ptrace_register are not needed in the
142 native i386 case (no registers smaller than an xfer unit), and are not
143 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
145 /* Need to fix up i386 siginfo if host is amd64. */
146 bool low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
147 int direction
) override
;
149 arch_process_info
*low_new_process () override
;
151 void low_delete_process (arch_process_info
*info
) override
;
153 void low_new_thread (lwp_info
*) override
;
155 void low_delete_thread (arch_lwp_info
*) override
;
157 void low_new_fork (process_info
*parent
, process_info
*child
) override
;
159 void low_prepare_to_resume (lwp_info
*lwp
) override
;
162 /* The singleton target ops object. */
164 static x86_target the_x86_target
;
166 /* Per-process arch-specific data we want to keep. */
168 struct arch_process_info
170 struct x86_debug_reg_state debug_reg_state
;
175 /* Mapping between the general-purpose registers in `struct user'
176 format and GDB's register array layout.
177 Note that the transfer layout uses 64-bit regs. */
178 static /*const*/ int i386_regmap
[] =
180 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
181 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
182 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
183 DS
* 8, ES
* 8, FS
* 8, GS
* 8
186 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
188 /* So code below doesn't have to care, i386 or amd64. */
189 #define ORIG_EAX ORIG_RAX
192 static const int x86_64_regmap
[] =
194 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
195 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
196 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
197 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
198 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
199 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
200 -1, -1, -1, -1, -1, -1, -1, -1,
201 -1, -1, -1, -1, -1, -1, -1, -1,
202 -1, -1, -1, -1, -1, -1, -1, -1,
204 -1, -1, -1, -1, -1, -1, -1, -1,
206 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
211 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
212 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
213 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
214 -1, -1, -1, -1, -1, -1, -1, -1,
215 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
216 -1, -1, -1, -1, -1, -1, -1, -1,
217 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
218 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
219 -1, -1, -1, -1, -1, -1, -1, -1,
220 -1, -1, -1, -1, -1, -1, -1, -1,
221 -1, -1, -1, -1, -1, -1, -1, -1,
225 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
226 #define X86_64_USER_REGS (GS + 1)
228 #else /* ! __x86_64__ */
230 /* Mapping between the general-purpose registers in `struct user'
231 format and GDB's register array layout. */
232 static /*const*/ int i386_regmap
[] =
234 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
235 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
236 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
237 DS
* 4, ES
* 4, FS
* 4, GS
* 4
240 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
248 /* Returns true if the current inferior belongs to a x86-64 process,
252 is_64bit_tdesc (void)
254 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
256 return register_size (regcache
->tdesc
, 0) == 8;
262 /* Called by libthread_db. */
265 ps_get_thread_area (struct ps_prochandle
*ph
,
266 lwpid_t lwpid
, int idx
, void **base
)
269 int use_64bit
= is_64bit_tdesc ();
276 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
280 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
291 unsigned int desc
[4];
293 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
294 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
297 /* Ensure we properly extend the value to 64-bits for x86_64. */
298 *base
= (void *) (uintptr_t) desc
[1];
303 /* Get the thread area address. This is used to recognize which
304 thread is which when tracing with the in-process agent library. We
305 don't read anything from the address, and treat it as opaque; it's
306 the address itself that we assume is unique per-thread. */
309 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
312 int use_64bit
= is_64bit_tdesc ();
317 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
319 *addr
= (CORE_ADDR
) (uintptr_t) base
;
328 struct lwp_info
*lwp
= find_lwp_pid (ptid_t (lwpid
));
329 struct thread_info
*thr
= get_lwp_thread (lwp
);
330 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
331 unsigned int desc
[4];
333 const int reg_thread_area
= 3; /* bits to scale down register value. */
336 collect_register_by_name (regcache
, "gs", &gs
);
338 idx
= gs
>> reg_thread_area
;
340 if (ptrace (PTRACE_GET_THREAD_AREA
,
342 (void *) (long) idx
, (unsigned long) &desc
) < 0)
353 x86_target::low_cannot_store_register (int regno
)
356 if (is_64bit_tdesc ())
360 return regno
>= I386_NUM_REGS
;
364 x86_target::low_cannot_fetch_register (int regno
)
367 if (is_64bit_tdesc ())
371 return regno
>= I386_NUM_REGS
;
375 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
380 if (register_size (regcache
->tdesc
, 0) == 8)
382 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
383 if (x86_64_regmap
[i
] != -1)
384 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
386 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
389 int lwpid
= lwpid_of (current_thread
);
391 collect_register_by_name (regcache
, "fs_base", &base
);
392 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_FS
);
394 collect_register_by_name (regcache
, "gs_base", &base
);
395 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_GS
);
402 /* 32-bit inferior registers need to be zero-extended.
403 Callers would read uninitialized memory otherwise. */
404 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
407 for (i
= 0; i
< I386_NUM_REGS
; i
++)
408 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
410 collect_register_by_name (regcache
, "orig_eax",
411 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
414 /* Sign extend EAX value to avoid potential syscall restart
417 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
418 for a detailed explanation. */
419 if (register_size (regcache
->tdesc
, 0) == 4)
421 void *ptr
= ((gdb_byte
*) buf
422 + i386_regmap
[find_regno (regcache
->tdesc
, "eax")]);
424 *(int64_t *) ptr
= *(int32_t *) ptr
;
430 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
435 if (register_size (regcache
->tdesc
, 0) == 8)
437 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
438 if (x86_64_regmap
[i
] != -1)
439 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
441 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
444 int lwpid
= lwpid_of (current_thread
);
446 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
447 supply_register_by_name (regcache
, "fs_base", &base
);
449 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_GS
) == 0)
450 supply_register_by_name (regcache
, "gs_base", &base
);
457 for (i
= 0; i
< I386_NUM_REGS
; i
++)
458 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
460 supply_register_by_name (regcache
, "orig_eax",
461 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
465 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
468 i387_cache_to_fxsave (regcache
, buf
);
470 i387_cache_to_fsave (regcache
, buf
);
475 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
478 i387_fxsave_to_cache (regcache
, buf
);
480 i387_fsave_to_cache (regcache
, buf
);
487 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
489 i387_cache_to_fxsave (regcache
, buf
);
493 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
495 i387_fxsave_to_cache (regcache
, buf
);
501 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
503 i387_cache_to_xsave (regcache
, buf
);
507 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
509 i387_xsave_to_cache (regcache
, buf
);
512 /* ??? The non-biarch i386 case stores all the i387 regs twice.
513 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
514 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
515 doesn't work. IWBN to avoid the duplication in the case where it
516 does work. Maybe the arch_setup routine could check whether it works
517 and update the supported regsets accordingly. */
519 static struct regset_info x86_regsets
[] =
521 #ifdef HAVE_PTRACE_GETREGS
522 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
524 x86_fill_gregset
, x86_store_gregset
},
525 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
526 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
528 # ifdef HAVE_PTRACE_GETFPXREGS
529 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
531 x86_fill_fpxregset
, x86_store_fpxregset
},
534 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
536 x86_fill_fpregset
, x86_store_fpregset
},
537 #endif /* HAVE_PTRACE_GETREGS */
542 x86_target::low_supports_breakpoints ()
548 x86_target::low_get_pc (regcache
*regcache
)
550 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
556 collect_register_by_name (regcache
, "rip", &pc
);
557 return (CORE_ADDR
) pc
;
563 collect_register_by_name (regcache
, "eip", &pc
);
564 return (CORE_ADDR
) pc
;
569 x86_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
571 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
577 supply_register_by_name (regcache
, "rip", &newpc
);
583 supply_register_by_name (regcache
, "eip", &newpc
);
588 x86_target::low_decr_pc_after_break ()
594 static const gdb_byte x86_breakpoint
[] = { 0xCC };
595 #define x86_breakpoint_len 1
598 x86_target::low_breakpoint_at (CORE_ADDR pc
)
602 read_memory (pc
, &c
, 1);
609 /* Low-level function vector. */
610 struct x86_dr_low_type x86_dr_low
=
612 x86_linux_dr_set_control
,
613 x86_linux_dr_set_addr
,
614 x86_linux_dr_get_addr
,
615 x86_linux_dr_get_status
,
616 x86_linux_dr_get_control
,
620 /* Breakpoint/Watchpoint support. */
623 x86_target::supports_z_point_type (char z_type
)
629 case Z_PACKET_WRITE_WP
:
630 case Z_PACKET_ACCESS_WP
:
638 x86_target::low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
639 int size
, raw_breakpoint
*bp
)
641 struct process_info
*proc
= current_process ();
645 case raw_bkpt_type_hw
:
646 case raw_bkpt_type_write_wp
:
647 case raw_bkpt_type_access_wp
:
649 enum target_hw_bp_type hw_type
650 = raw_bkpt_type_to_target_hw_bp_type (type
);
651 struct x86_debug_reg_state
*state
652 = &proc
->priv
->arch_private
->debug_reg_state
;
654 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
664 x86_target::low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
665 int size
, raw_breakpoint
*bp
)
667 struct process_info
*proc
= current_process ();
671 case raw_bkpt_type_hw
:
672 case raw_bkpt_type_write_wp
:
673 case raw_bkpt_type_access_wp
:
675 enum target_hw_bp_type hw_type
676 = raw_bkpt_type_to_target_hw_bp_type (type
);
677 struct x86_debug_reg_state
*state
678 = &proc
->priv
->arch_private
->debug_reg_state
;
680 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
689 x86_target::low_stopped_by_watchpoint ()
691 struct process_info
*proc
= current_process ();
692 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
696 x86_target::low_stopped_data_address ()
698 struct process_info
*proc
= current_process ();
700 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
706 /* Called when a new process is created. */
709 x86_target::low_new_process ()
711 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
713 x86_low_init_dregs (&info
->debug_reg_state
);
718 /* Called when a process is being deleted. */
721 x86_target::low_delete_process (arch_process_info
*info
)
727 x86_target::low_new_thread (lwp_info
*lwp
)
729 /* This comes from nat/. */
730 x86_linux_new_thread (lwp
);
734 x86_target::low_delete_thread (arch_lwp_info
*alwp
)
736 /* This comes from nat/. */
737 x86_linux_delete_thread (alwp
);
740 /* Target routine for new_fork. */
743 x86_target::low_new_fork (process_info
*parent
, process_info
*child
)
745 /* These are allocated by linux_add_process. */
746 gdb_assert (parent
->priv
!= NULL
747 && parent
->priv
->arch_private
!= NULL
);
748 gdb_assert (child
->priv
!= NULL
749 && child
->priv
->arch_private
!= NULL
);
751 /* Linux kernel before 2.6.33 commit
752 72f674d203cd230426437cdcf7dd6f681dad8b0d
753 will inherit hardware debug registers from parent
754 on fork/vfork/clone. Newer Linux kernels create such tasks with
755 zeroed debug registers.
757 GDB core assumes the child inherits the watchpoints/hw
758 breakpoints of the parent, and will remove them all from the
759 forked off process. Copy the debug registers mirrors into the
760 new process so that all breakpoints and watchpoints can be
761 removed together. The debug registers mirror will become zeroed
762 in the end before detaching the forked off process, thus making
763 this compatible with older Linux kernels too. */
765 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
769 x86_target::low_prepare_to_resume (lwp_info
*lwp
)
771 /* This comes from nat/. */
772 x86_linux_prepare_to_resume (lwp
);
775 /* See nat/x86-dregs.h. */
777 struct x86_debug_reg_state
*
778 x86_debug_reg_state (pid_t pid
)
780 struct process_info
*proc
= find_process_pid (pid
);
782 return &proc
->priv
->arch_private
->debug_reg_state
;
785 /* When GDBSERVER is built as a 64-bit application on linux, the
786 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
787 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
788 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
789 conversion in-place ourselves. */
791 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
792 layout of the inferiors' architecture. Returns true if any
793 conversion was done; false otherwise. If DIRECTION is 1, then copy
794 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
798 x86_target::low_siginfo_fixup (siginfo_t
*ptrace
, gdb_byte
*inf
, int direction
)
801 unsigned int machine
;
802 int tid
= lwpid_of (current_thread
);
803 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
805 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
806 if (!is_64bit_tdesc ())
807 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
809 /* No fixup for native x32 GDB. */
810 else if (!is_elf64
&& sizeof (void *) == 8)
811 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
820 /* Format of XSAVE extended state is:
824 sw_usable_bytes[464..511]
825 xstate_hdr_bytes[512..575]
830 Same memory layout will be used for the coredump NT_X86_XSTATE
831 representing the XSAVE extended state registers.
833 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
834 extended state mask, which is the same as the extended control register
835 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
836 together with the mask saved in the xstate_hdr_bytes to determine what
837 states the processor/OS supports and what state, used or initialized,
838 the process/thread is in. */
839 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
841 /* Does the current host support the GETFPXREGS request? The header
842 file may or may not define it, and even if it is defined, the
843 kernel will return EIO if it's running on a pre-SSE processor. */
844 int have_ptrace_getfpxregs
=
845 #ifdef HAVE_PTRACE_GETFPXREGS
852 /* Get Linux/x86 target description from running target. */
854 static const struct target_desc
*
855 x86_linux_read_description (void)
857 unsigned int machine
;
861 static uint64_t xcr0
;
862 struct regset_info
*regset
;
864 tid
= lwpid_of (current_thread
);
866 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
868 if (sizeof (void *) == 4)
871 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
873 else if (machine
== EM_X86_64
)
874 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
878 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
879 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
881 elf_fpxregset_t fpxregs
;
883 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
885 have_ptrace_getfpxregs
= 0;
886 have_ptrace_getregset
= 0;
887 return i386_linux_read_description (X86_XSTATE_X87
);
890 have_ptrace_getfpxregs
= 1;
896 x86_xcr0
= X86_XSTATE_SSE_MASK
;
900 if (machine
== EM_X86_64
)
901 return tdesc_amd64_linux_no_xml
;
904 return tdesc_i386_linux_no_xml
;
907 if (have_ptrace_getregset
== -1)
909 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
912 iov
.iov_base
= xstateregs
;
913 iov
.iov_len
= sizeof (xstateregs
);
915 /* Check if PTRACE_GETREGSET works. */
916 if (ptrace (PTRACE_GETREGSET
, tid
,
917 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
918 have_ptrace_getregset
= 0;
921 have_ptrace_getregset
= 1;
923 /* Get XCR0 from XSAVE extended state. */
924 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
925 / sizeof (uint64_t))];
927 /* Use PTRACE_GETREGSET if it is available. */
928 for (regset
= x86_regsets
;
929 regset
->fill_function
!= NULL
; regset
++)
930 if (regset
->get_request
== PTRACE_GETREGSET
)
931 regset
->size
= X86_XSTATE_SIZE (xcr0
);
932 else if (regset
->type
!= GENERAL_REGS
)
937 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
938 xcr0_features
= (have_ptrace_getregset
939 && (xcr0
& X86_XSTATE_ALL_MASK
));
944 if (machine
== EM_X86_64
)
947 const target_desc
*tdesc
= NULL
;
951 tdesc
= amd64_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
,
956 tdesc
= amd64_linux_read_description (X86_XSTATE_SSE_MASK
, !is_elf64
);
962 const target_desc
*tdesc
= NULL
;
965 tdesc
= i386_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
);
968 tdesc
= i386_linux_read_description (X86_XSTATE_SSE
);
973 gdb_assert_not_reached ("failed to return tdesc");
976 /* Update all the target description of all processes; a new GDB
977 connected, and it may or not support xml target descriptions. */
980 x86_target::update_xmltarget ()
982 struct thread_info
*saved_thread
= current_thread
;
984 /* Before changing the register cache's internal layout, flush the
985 contents of the current valid caches back to the threads, and
986 release the current regcache objects. */
989 for_each_process ([this] (process_info
*proc
) {
992 /* Look up any thread of this process. */
993 current_thread
= find_any_thread_of_pid (pid
);
998 current_thread
= saved_thread
;
1001 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1002 PTRACE_GETREGSET. */
1005 x86_linux_process_qsupported (char **features
, int count
)
1009 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1010 with "i386" in qSupported query, it supports x86 XML target
1013 for (i
= 0; i
< count
; i
++)
1015 const char *feature
= features
[i
];
1017 if (startswith (feature
, "xmlRegisters="))
1019 char *copy
= xstrdup (feature
+ 13);
1022 for (char *p
= strtok_r (copy
, ",", &saveptr
);
1024 p
= strtok_r (NULL
, ",", &saveptr
))
1026 if (strcmp (p
, "i386") == 0)
1036 the_x86_target
.update_xmltarget ();
1039 /* Common for x86/x86-64. */
1041 static struct regsets_info x86_regsets_info
=
1043 x86_regsets
, /* regsets */
1044 0, /* num_regsets */
1045 NULL
, /* disabled_regsets */
1049 static struct regs_info amd64_linux_regs_info
=
1051 NULL
, /* regset_bitmap */
1052 NULL
, /* usrregs_info */
1056 static struct usrregs_info i386_linux_usrregs_info
=
1062 static struct regs_info i386_linux_regs_info
=
1064 NULL
, /* regset_bitmap */
1065 &i386_linux_usrregs_info
,
1070 x86_target::get_regs_info ()
1073 if (is_64bit_tdesc ())
1074 return &amd64_linux_regs_info
;
1077 return &i386_linux_regs_info
;
1080 /* Initialize the target description for the architecture of the
1084 x86_target::low_arch_setup ()
1086 current_process ()->tdesc
= x86_linux_read_description ();
1089 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1090 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1093 x86_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
1095 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
1101 collect_register_by_name (regcache
, "orig_rax", &l_sysno
);
1102 *sysno
= (int) l_sysno
;
1105 collect_register_by_name (regcache
, "orig_eax", sysno
);
1109 x86_supports_tracepoints (void)
1115 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1117 target_write_memory (*to
, buf
, len
);
1122 push_opcode (unsigned char *buf
, const char *op
)
1124 unsigned char *buf_org
= buf
;
1129 unsigned long ul
= strtoul (op
, &endptr
, 16);
1138 return buf
- buf_org
;
1143 /* Build a jump pad that saves registers and calls a collection
1144 function. Writes a jump instruction to the jump pad to
1145 JJUMPAD_INSN. The caller is responsible to write it in at the
1146 tracepoint address. */
1149 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1150 CORE_ADDR collector
,
1153 CORE_ADDR
*jump_entry
,
1154 CORE_ADDR
*trampoline
,
1155 ULONGEST
*trampoline_size
,
1156 unsigned char *jjump_pad_insn
,
1157 ULONGEST
*jjump_pad_insn_size
,
1158 CORE_ADDR
*adjusted_insn_addr
,
1159 CORE_ADDR
*adjusted_insn_addr_end
,
1162 unsigned char buf
[40];
1166 CORE_ADDR buildaddr
= *jump_entry
;
1168 /* Build the jump pad. */
1170 /* First, do tracepoint data collection. Save registers. */
1172 /* Need to ensure stack pointer saved first. */
1173 buf
[i
++] = 0x54; /* push %rsp */
1174 buf
[i
++] = 0x55; /* push %rbp */
1175 buf
[i
++] = 0x57; /* push %rdi */
1176 buf
[i
++] = 0x56; /* push %rsi */
1177 buf
[i
++] = 0x52; /* push %rdx */
1178 buf
[i
++] = 0x51; /* push %rcx */
1179 buf
[i
++] = 0x53; /* push %rbx */
1180 buf
[i
++] = 0x50; /* push %rax */
1181 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1182 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1183 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1184 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1185 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1186 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1187 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1188 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1189 buf
[i
++] = 0x9c; /* pushfq */
1190 buf
[i
++] = 0x48; /* movabs <addr>,%rdi */
1192 memcpy (buf
+ i
, &tpaddr
, 8);
1194 buf
[i
++] = 0x57; /* push %rdi */
1195 append_insns (&buildaddr
, i
, buf
);
1197 /* Stack space for the collecting_t object. */
1199 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1200 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1201 memcpy (buf
+ i
, &tpoint
, 8);
1203 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1204 i
+= push_opcode (&buf
[i
],
1205 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1206 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1207 append_insns (&buildaddr
, i
, buf
);
1211 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1212 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1214 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1215 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1216 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1217 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1218 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1219 append_insns (&buildaddr
, i
, buf
);
1221 /* Set up the gdb_collect call. */
1222 /* At this point, (stack pointer + 0x18) is the base of our saved
1226 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1227 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1229 /* tpoint address may be 64-bit wide. */
1230 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1231 memcpy (buf
+ i
, &tpoint
, 8);
1233 append_insns (&buildaddr
, i
, buf
);
1235 /* The collector function being in the shared library, may be
1236 >31-bits away off the jump pad. */
1238 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1239 memcpy (buf
+ i
, &collector
, 8);
1241 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1242 append_insns (&buildaddr
, i
, buf
);
1244 /* Clear the spin-lock. */
1246 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1247 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1248 memcpy (buf
+ i
, &lockaddr
, 8);
1250 append_insns (&buildaddr
, i
, buf
);
1252 /* Remove stack that had been used for the collect_t object. */
1254 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1255 append_insns (&buildaddr
, i
, buf
);
1257 /* Restore register state. */
1259 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1263 buf
[i
++] = 0x9d; /* popfq */
1264 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1265 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1266 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1267 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1268 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1269 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1270 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1271 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1272 buf
[i
++] = 0x58; /* pop %rax */
1273 buf
[i
++] = 0x5b; /* pop %rbx */
1274 buf
[i
++] = 0x59; /* pop %rcx */
1275 buf
[i
++] = 0x5a; /* pop %rdx */
1276 buf
[i
++] = 0x5e; /* pop %rsi */
1277 buf
[i
++] = 0x5f; /* pop %rdi */
1278 buf
[i
++] = 0x5d; /* pop %rbp */
1279 buf
[i
++] = 0x5c; /* pop %rsp */
1280 append_insns (&buildaddr
, i
, buf
);
1282 /* Now, adjust the original instruction to execute in the jump
1284 *adjusted_insn_addr
= buildaddr
;
1285 relocate_instruction (&buildaddr
, tpaddr
);
1286 *adjusted_insn_addr_end
= buildaddr
;
1288 /* Finally, write a jump back to the program. */
1290 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1291 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1294 "E.Jump back from jump pad too far from tracepoint "
1295 "(offset 0x%" PRIx64
" > int32).", loffset
);
1299 offset
= (int) loffset
;
1300 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1301 memcpy (buf
+ 1, &offset
, 4);
1302 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1304 /* The jump pad is now built. Wire in a jump to our jump pad. This
1305 is always done last (by our caller actually), so that we can
1306 install fast tracepoints with threads running. This relies on
1307 the agent's atomic write support. */
1308 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1309 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1312 "E.Jump pad too far from tracepoint "
1313 "(offset 0x%" PRIx64
" > int32).", loffset
);
1317 offset
= (int) loffset
;
1319 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1320 memcpy (buf
+ 1, &offset
, 4);
1321 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1322 *jjump_pad_insn_size
= sizeof (jump_insn
);
1324 /* Return the end address of our pad. */
1325 *jump_entry
= buildaddr
;
1330 #endif /* __x86_64__ */
1332 /* Build a jump pad that saves registers and calls a collection
1333 function. Writes a jump instruction to the jump pad to
1334 JJUMPAD_INSN. The caller is responsible to write it in at the
1335 tracepoint address. */
1338 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1339 CORE_ADDR collector
,
1342 CORE_ADDR
*jump_entry
,
1343 CORE_ADDR
*trampoline
,
1344 ULONGEST
*trampoline_size
,
1345 unsigned char *jjump_pad_insn
,
1346 ULONGEST
*jjump_pad_insn_size
,
1347 CORE_ADDR
*adjusted_insn_addr
,
1348 CORE_ADDR
*adjusted_insn_addr_end
,
1351 unsigned char buf
[0x100];
1353 CORE_ADDR buildaddr
= *jump_entry
;
1355 /* Build the jump pad. */
1357 /* First, do tracepoint data collection. Save registers. */
1359 buf
[i
++] = 0x60; /* pushad */
1360 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1361 *((int *)(buf
+ i
)) = (int) tpaddr
;
1363 buf
[i
++] = 0x9c; /* pushf */
1364 buf
[i
++] = 0x1e; /* push %ds */
1365 buf
[i
++] = 0x06; /* push %es */
1366 buf
[i
++] = 0x0f; /* push %fs */
1368 buf
[i
++] = 0x0f; /* push %gs */
1370 buf
[i
++] = 0x16; /* push %ss */
1371 buf
[i
++] = 0x0e; /* push %cs */
1372 append_insns (&buildaddr
, i
, buf
);
1374 /* Stack space for the collecting_t object. */
1376 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1378 /* Build the object. */
1379 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1380 memcpy (buf
+ i
, &tpoint
, 4);
1382 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1384 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1385 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1386 append_insns (&buildaddr
, i
, buf
);
1388 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1389 If we cared for it, this could be using xchg alternatively. */
1392 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1393 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1395 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1397 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1398 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1399 append_insns (&buildaddr
, i
, buf
);
1402 /* Set up arguments to the gdb_collect call. */
1404 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1405 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1406 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1407 append_insns (&buildaddr
, i
, buf
);
1410 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1411 append_insns (&buildaddr
, i
, buf
);
1414 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1415 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1417 append_insns (&buildaddr
, i
, buf
);
1419 buf
[0] = 0xe8; /* call <reladdr> */
1420 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1421 memcpy (buf
+ 1, &offset
, 4);
1422 append_insns (&buildaddr
, 5, buf
);
1423 /* Clean up after the call. */
1424 buf
[0] = 0x83; /* add $0x8,%esp */
1427 append_insns (&buildaddr
, 3, buf
);
1430 /* Clear the spin-lock. This would need the LOCK prefix on older
1433 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1434 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1435 memcpy (buf
+ i
, &lockaddr
, 4);
1437 append_insns (&buildaddr
, i
, buf
);
1440 /* Remove stack that had been used for the collect_t object. */
1442 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1443 append_insns (&buildaddr
, i
, buf
);
1446 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1449 buf
[i
++] = 0x17; /* pop %ss */
1450 buf
[i
++] = 0x0f; /* pop %gs */
1452 buf
[i
++] = 0x0f; /* pop %fs */
1454 buf
[i
++] = 0x07; /* pop %es */
1455 buf
[i
++] = 0x1f; /* pop %ds */
1456 buf
[i
++] = 0x9d; /* popf */
1457 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1460 buf
[i
++] = 0x61; /* popad */
1461 append_insns (&buildaddr
, i
, buf
);
1463 /* Now, adjust the original instruction to execute in the jump
1465 *adjusted_insn_addr
= buildaddr
;
1466 relocate_instruction (&buildaddr
, tpaddr
);
1467 *adjusted_insn_addr_end
= buildaddr
;
1469 /* Write the jump back to the program. */
1470 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1471 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1472 memcpy (buf
+ 1, &offset
, 4);
1473 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1475 /* The jump pad is now built. Wire in a jump to our jump pad. This
1476 is always done last (by our caller actually), so that we can
1477 install fast tracepoints with threads running. This relies on
1478 the agent's atomic write support. */
1481 /* Create a trampoline. */
1482 *trampoline_size
= sizeof (jump_insn
);
1483 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1485 /* No trampoline space available. */
1487 "E.Cannot allocate trampoline space needed for fast "
1488 "tracepoints on 4-byte instructions.");
1492 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1493 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1494 memcpy (buf
+ 1, &offset
, 4);
1495 target_write_memory (*trampoline
, buf
, sizeof (jump_insn
));
1497 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1498 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1499 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1500 memcpy (buf
+ 2, &offset
, 2);
1501 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1502 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1506 /* Else use a 32-bit relative jump instruction. */
1507 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1508 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1509 memcpy (buf
+ 1, &offset
, 4);
1510 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1511 *jjump_pad_insn_size
= sizeof (jump_insn
);
1514 /* Return the end address of our pad. */
1515 *jump_entry
= buildaddr
;
1521 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1522 CORE_ADDR collector
,
1525 CORE_ADDR
*jump_entry
,
1526 CORE_ADDR
*trampoline
,
1527 ULONGEST
*trampoline_size
,
1528 unsigned char *jjump_pad_insn
,
1529 ULONGEST
*jjump_pad_insn_size
,
1530 CORE_ADDR
*adjusted_insn_addr
,
1531 CORE_ADDR
*adjusted_insn_addr_end
,
1535 if (is_64bit_tdesc ())
1536 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1537 collector
, lockaddr
,
1538 orig_size
, jump_entry
,
1539 trampoline
, trampoline_size
,
1541 jjump_pad_insn_size
,
1543 adjusted_insn_addr_end
,
1547 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1548 collector
, lockaddr
,
1549 orig_size
, jump_entry
,
1550 trampoline
, trampoline_size
,
1552 jjump_pad_insn_size
,
1554 adjusted_insn_addr_end
,
1558 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1562 x86_get_min_fast_tracepoint_insn_len (void)
1564 static int warned_about_fast_tracepoints
= 0;
1567 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1568 used for fast tracepoints. */
1569 if (is_64bit_tdesc ())
1573 if (agent_loaded_p ())
1575 char errbuf
[IPA_BUFSIZ
];
1579 /* On x86, if trampolines are available, then 4-byte jump instructions
1580 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1581 with a 4-byte offset are used instead. */
1582 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1586 /* GDB has no channel to explain to user why a shorter fast
1587 tracepoint is not possible, but at least make GDBserver
1588 mention that something has gone awry. */
1589 if (!warned_about_fast_tracepoints
)
1591 warning ("4-byte fast tracepoints not available; %s", errbuf
);
1592 warned_about_fast_tracepoints
= 1;
1599 /* Indicate that the minimum length is currently unknown since the IPA
1600 has not loaded yet. */
1606 add_insns (unsigned char *start
, int len
)
1608 CORE_ADDR buildaddr
= current_insn_ptr
;
1611 debug_printf ("Adding %d bytes of insn at %s\n",
1612 len
, paddress (buildaddr
));
1614 append_insns (&buildaddr
, len
, start
);
1615 current_insn_ptr
= buildaddr
;
1618 /* Our general strategy for emitting code is to avoid specifying raw
1619 bytes whenever possible, and instead copy a block of inline asm
1620 that is embedded in the function. This is a little messy, because
1621 we need to keep the compiler from discarding what looks like dead
1622 code, plus suppress various warnings. */
1624 #define EMIT_ASM(NAME, INSNS) \
1627 extern unsigned char start_ ## NAME, end_ ## NAME; \
1628 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1629 __asm__ ("jmp end_" #NAME "\n" \
1630 "\t" "start_" #NAME ":" \
1632 "\t" "end_" #NAME ":"); \
1637 #define EMIT_ASM32(NAME,INSNS) \
1640 extern unsigned char start_ ## NAME, end_ ## NAME; \
1641 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1642 __asm__ (".code32\n" \
1643 "\t" "jmp end_" #NAME "\n" \
1644 "\t" "start_" #NAME ":\n" \
1646 "\t" "end_" #NAME ":\n" \
1652 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1659 amd64_emit_prologue (void)
1661 EMIT_ASM (amd64_prologue
,
1663 "movq %rsp,%rbp\n\t"
1664 "sub $0x20,%rsp\n\t"
1665 "movq %rdi,-8(%rbp)\n\t"
1666 "movq %rsi,-16(%rbp)");
1671 amd64_emit_epilogue (void)
1673 EMIT_ASM (amd64_epilogue
,
1674 "movq -16(%rbp),%rdi\n\t"
1675 "movq %rax,(%rdi)\n\t"
1682 amd64_emit_add (void)
1684 EMIT_ASM (amd64_add
,
1685 "add (%rsp),%rax\n\t"
1686 "lea 0x8(%rsp),%rsp");
1690 amd64_emit_sub (void)
1692 EMIT_ASM (amd64_sub
,
1693 "sub %rax,(%rsp)\n\t"
1698 amd64_emit_mul (void)
1704 amd64_emit_lsh (void)
1710 amd64_emit_rsh_signed (void)
1716 amd64_emit_rsh_unsigned (void)
1722 amd64_emit_ext (int arg
)
1727 EMIT_ASM (amd64_ext_8
,
1733 EMIT_ASM (amd64_ext_16
,
1738 EMIT_ASM (amd64_ext_32
,
1747 amd64_emit_log_not (void)
1749 EMIT_ASM (amd64_log_not
,
1750 "test %rax,%rax\n\t"
1756 amd64_emit_bit_and (void)
1758 EMIT_ASM (amd64_and
,
1759 "and (%rsp),%rax\n\t"
1760 "lea 0x8(%rsp),%rsp");
1764 amd64_emit_bit_or (void)
1767 "or (%rsp),%rax\n\t"
1768 "lea 0x8(%rsp),%rsp");
1772 amd64_emit_bit_xor (void)
1774 EMIT_ASM (amd64_xor
,
1775 "xor (%rsp),%rax\n\t"
1776 "lea 0x8(%rsp),%rsp");
1780 amd64_emit_bit_not (void)
1782 EMIT_ASM (amd64_bit_not
,
1783 "xorq $0xffffffffffffffff,%rax");
1787 amd64_emit_equal (void)
1789 EMIT_ASM (amd64_equal
,
1790 "cmp %rax,(%rsp)\n\t"
1791 "je .Lamd64_equal_true\n\t"
1793 "jmp .Lamd64_equal_end\n\t"
1794 ".Lamd64_equal_true:\n\t"
1796 ".Lamd64_equal_end:\n\t"
1797 "lea 0x8(%rsp),%rsp");
1801 amd64_emit_less_signed (void)
1803 EMIT_ASM (amd64_less_signed
,
1804 "cmp %rax,(%rsp)\n\t"
1805 "jl .Lamd64_less_signed_true\n\t"
1807 "jmp .Lamd64_less_signed_end\n\t"
1808 ".Lamd64_less_signed_true:\n\t"
1810 ".Lamd64_less_signed_end:\n\t"
1811 "lea 0x8(%rsp),%rsp");
1815 amd64_emit_less_unsigned (void)
1817 EMIT_ASM (amd64_less_unsigned
,
1818 "cmp %rax,(%rsp)\n\t"
1819 "jb .Lamd64_less_unsigned_true\n\t"
1821 "jmp .Lamd64_less_unsigned_end\n\t"
1822 ".Lamd64_less_unsigned_true:\n\t"
1824 ".Lamd64_less_unsigned_end:\n\t"
1825 "lea 0x8(%rsp),%rsp");
1829 amd64_emit_ref (int size
)
1834 EMIT_ASM (amd64_ref1
,
1838 EMIT_ASM (amd64_ref2
,
1842 EMIT_ASM (amd64_ref4
,
1843 "movl (%rax),%eax");
1846 EMIT_ASM (amd64_ref8
,
1847 "movq (%rax),%rax");
1853 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1855 EMIT_ASM (amd64_if_goto
,
1859 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1867 amd64_emit_goto (int *offset_p
, int *size_p
)
1869 EMIT_ASM (amd64_goto
,
1870 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1878 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1880 int diff
= (to
- (from
+ size
));
1881 unsigned char buf
[sizeof (int)];
1889 memcpy (buf
, &diff
, sizeof (int));
1890 target_write_memory (from
, buf
, sizeof (int));
1894 amd64_emit_const (LONGEST num
)
1896 unsigned char buf
[16];
1898 CORE_ADDR buildaddr
= current_insn_ptr
;
1901 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1902 memcpy (&buf
[i
], &num
, sizeof (num
));
1904 append_insns (&buildaddr
, i
, buf
);
1905 current_insn_ptr
= buildaddr
;
1909 amd64_emit_call (CORE_ADDR fn
)
1911 unsigned char buf
[16];
1913 CORE_ADDR buildaddr
;
1916 /* The destination function being in the shared library, may be
1917 >31-bits away off the compiled code pad. */
1919 buildaddr
= current_insn_ptr
;
1921 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1925 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1927 /* Offset is too large for a call. Use callq, but that requires
1928 a register, so avoid it if possible. Use r10, since it is
1929 call-clobbered, we don't have to push/pop it. */
1930 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1932 memcpy (buf
+ i
, &fn
, 8);
1934 buf
[i
++] = 0xff; /* callq *%r10 */
1939 int offset32
= offset64
; /* we know we can't overflow here. */
1941 buf
[i
++] = 0xe8; /* call <reladdr> */
1942 memcpy (buf
+ i
, &offset32
, 4);
1946 append_insns (&buildaddr
, i
, buf
);
1947 current_insn_ptr
= buildaddr
;
1951 amd64_emit_reg (int reg
)
1953 unsigned char buf
[16];
1955 CORE_ADDR buildaddr
;
1957 /* Assume raw_regs is still in %rdi. */
1958 buildaddr
= current_insn_ptr
;
1960 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1961 memcpy (&buf
[i
], ®
, sizeof (reg
));
1963 append_insns (&buildaddr
, i
, buf
);
1964 current_insn_ptr
= buildaddr
;
1965 amd64_emit_call (get_raw_reg_func_addr ());
1969 amd64_emit_pop (void)
1971 EMIT_ASM (amd64_pop
,
1976 amd64_emit_stack_flush (void)
1978 EMIT_ASM (amd64_stack_flush
,
1983 amd64_emit_zero_ext (int arg
)
1988 EMIT_ASM (amd64_zero_ext_8
,
1992 EMIT_ASM (amd64_zero_ext_16
,
1993 "and $0xffff,%rax");
1996 EMIT_ASM (amd64_zero_ext_32
,
1997 "mov $0xffffffff,%rcx\n\t"
2006 amd64_emit_swap (void)
2008 EMIT_ASM (amd64_swap
,
2015 amd64_emit_stack_adjust (int n
)
2017 unsigned char buf
[16];
2019 CORE_ADDR buildaddr
= current_insn_ptr
;
2022 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2026 /* This only handles adjustments up to 16, but we don't expect any more. */
2028 append_insns (&buildaddr
, i
, buf
);
2029 current_insn_ptr
= buildaddr
;
2032 /* FN's prototype is `LONGEST(*fn)(int)'. */
2035 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2037 unsigned char buf
[16];
2039 CORE_ADDR buildaddr
;
2041 buildaddr
= current_insn_ptr
;
2043 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2044 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2046 append_insns (&buildaddr
, i
, buf
);
2047 current_insn_ptr
= buildaddr
;
2048 amd64_emit_call (fn
);
2051 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2054 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2056 unsigned char buf
[16];
2058 CORE_ADDR buildaddr
;
2060 buildaddr
= current_insn_ptr
;
2062 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2063 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2065 append_insns (&buildaddr
, i
, buf
);
2066 current_insn_ptr
= buildaddr
;
2067 EMIT_ASM (amd64_void_call_2_a
,
2068 /* Save away a copy of the stack top. */
2070 /* Also pass top as the second argument. */
2072 amd64_emit_call (fn
);
2073 EMIT_ASM (amd64_void_call_2_b
,
2074 /* Restore the stack top, %rax may have been trashed. */
2079 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2082 "cmp %rax,(%rsp)\n\t"
2083 "jne .Lamd64_eq_fallthru\n\t"
2084 "lea 0x8(%rsp),%rsp\n\t"
2086 /* jmp, but don't trust the assembler to choose the right jump */
2087 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2088 ".Lamd64_eq_fallthru:\n\t"
2089 "lea 0x8(%rsp),%rsp\n\t"
2099 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2102 "cmp %rax,(%rsp)\n\t"
2103 "je .Lamd64_ne_fallthru\n\t"
2104 "lea 0x8(%rsp),%rsp\n\t"
2106 /* jmp, but don't trust the assembler to choose the right jump */
2107 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2108 ".Lamd64_ne_fallthru:\n\t"
2109 "lea 0x8(%rsp),%rsp\n\t"
2119 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2122 "cmp %rax,(%rsp)\n\t"
2123 "jnl .Lamd64_lt_fallthru\n\t"
2124 "lea 0x8(%rsp),%rsp\n\t"
2126 /* jmp, but don't trust the assembler to choose the right jump */
2127 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2128 ".Lamd64_lt_fallthru:\n\t"
2129 "lea 0x8(%rsp),%rsp\n\t"
2139 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2142 "cmp %rax,(%rsp)\n\t"
2143 "jnle .Lamd64_le_fallthru\n\t"
2144 "lea 0x8(%rsp),%rsp\n\t"
2146 /* jmp, but don't trust the assembler to choose the right jump */
2147 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2148 ".Lamd64_le_fallthru:\n\t"
2149 "lea 0x8(%rsp),%rsp\n\t"
2159 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2162 "cmp %rax,(%rsp)\n\t"
2163 "jng .Lamd64_gt_fallthru\n\t"
2164 "lea 0x8(%rsp),%rsp\n\t"
2166 /* jmp, but don't trust the assembler to choose the right jump */
2167 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2168 ".Lamd64_gt_fallthru:\n\t"
2169 "lea 0x8(%rsp),%rsp\n\t"
2179 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2182 "cmp %rax,(%rsp)\n\t"
2183 "jnge .Lamd64_ge_fallthru\n\t"
2184 ".Lamd64_ge_jump:\n\t"
2185 "lea 0x8(%rsp),%rsp\n\t"
2187 /* jmp, but don't trust the assembler to choose the right jump */
2188 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2189 ".Lamd64_ge_fallthru:\n\t"
2190 "lea 0x8(%rsp),%rsp\n\t"
2199 struct emit_ops amd64_emit_ops
=
2201 amd64_emit_prologue
,
2202 amd64_emit_epilogue
,
2207 amd64_emit_rsh_signed
,
2208 amd64_emit_rsh_unsigned
,
2216 amd64_emit_less_signed
,
2217 amd64_emit_less_unsigned
,
2221 amd64_write_goto_address
,
2226 amd64_emit_stack_flush
,
2227 amd64_emit_zero_ext
,
2229 amd64_emit_stack_adjust
,
2230 amd64_emit_int_call_1
,
2231 amd64_emit_void_call_2
,
2240 #endif /* __x86_64__ */
2243 i386_emit_prologue (void)
2245 EMIT_ASM32 (i386_prologue
,
2249 /* At this point, the raw regs base address is at 8(%ebp), and the
2250 value pointer is at 12(%ebp). */
2254 i386_emit_epilogue (void)
2256 EMIT_ASM32 (i386_epilogue
,
2257 "mov 12(%ebp),%ecx\n\t"
2258 "mov %eax,(%ecx)\n\t"
2259 "mov %ebx,0x4(%ecx)\n\t"
2267 i386_emit_add (void)
2269 EMIT_ASM32 (i386_add
,
2270 "add (%esp),%eax\n\t"
2271 "adc 0x4(%esp),%ebx\n\t"
2272 "lea 0x8(%esp),%esp");
2276 i386_emit_sub (void)
2278 EMIT_ASM32 (i386_sub
,
2279 "subl %eax,(%esp)\n\t"
2280 "sbbl %ebx,4(%esp)\n\t"
2286 i386_emit_mul (void)
2292 i386_emit_lsh (void)
2298 i386_emit_rsh_signed (void)
2304 i386_emit_rsh_unsigned (void)
2310 i386_emit_ext (int arg
)
2315 EMIT_ASM32 (i386_ext_8
,
2318 "movl %eax,%ebx\n\t"
2322 EMIT_ASM32 (i386_ext_16
,
2324 "movl %eax,%ebx\n\t"
2328 EMIT_ASM32 (i386_ext_32
,
2329 "movl %eax,%ebx\n\t"
2338 i386_emit_log_not (void)
2340 EMIT_ASM32 (i386_log_not
,
2342 "test %eax,%eax\n\t"
2349 i386_emit_bit_and (void)
2351 EMIT_ASM32 (i386_and
,
2352 "and (%esp),%eax\n\t"
2353 "and 0x4(%esp),%ebx\n\t"
2354 "lea 0x8(%esp),%esp");
2358 i386_emit_bit_or (void)
2360 EMIT_ASM32 (i386_or
,
2361 "or (%esp),%eax\n\t"
2362 "or 0x4(%esp),%ebx\n\t"
2363 "lea 0x8(%esp),%esp");
2367 i386_emit_bit_xor (void)
2369 EMIT_ASM32 (i386_xor
,
2370 "xor (%esp),%eax\n\t"
2371 "xor 0x4(%esp),%ebx\n\t"
2372 "lea 0x8(%esp),%esp");
2376 i386_emit_bit_not (void)
2378 EMIT_ASM32 (i386_bit_not
,
2379 "xor $0xffffffff,%eax\n\t"
2380 "xor $0xffffffff,%ebx\n\t");
2384 i386_emit_equal (void)
2386 EMIT_ASM32 (i386_equal
,
2387 "cmpl %ebx,4(%esp)\n\t"
2388 "jne .Li386_equal_false\n\t"
2389 "cmpl %eax,(%esp)\n\t"
2390 "je .Li386_equal_true\n\t"
2391 ".Li386_equal_false:\n\t"
2393 "jmp .Li386_equal_end\n\t"
2394 ".Li386_equal_true:\n\t"
2396 ".Li386_equal_end:\n\t"
2398 "lea 0x8(%esp),%esp");
2402 i386_emit_less_signed (void)
2404 EMIT_ASM32 (i386_less_signed
,
2405 "cmpl %ebx,4(%esp)\n\t"
2406 "jl .Li386_less_signed_true\n\t"
2407 "jne .Li386_less_signed_false\n\t"
2408 "cmpl %eax,(%esp)\n\t"
2409 "jl .Li386_less_signed_true\n\t"
2410 ".Li386_less_signed_false:\n\t"
2412 "jmp .Li386_less_signed_end\n\t"
2413 ".Li386_less_signed_true:\n\t"
2415 ".Li386_less_signed_end:\n\t"
2417 "lea 0x8(%esp),%esp");
2421 i386_emit_less_unsigned (void)
2423 EMIT_ASM32 (i386_less_unsigned
,
2424 "cmpl %ebx,4(%esp)\n\t"
2425 "jb .Li386_less_unsigned_true\n\t"
2426 "jne .Li386_less_unsigned_false\n\t"
2427 "cmpl %eax,(%esp)\n\t"
2428 "jb .Li386_less_unsigned_true\n\t"
2429 ".Li386_less_unsigned_false:\n\t"
2431 "jmp .Li386_less_unsigned_end\n\t"
2432 ".Li386_less_unsigned_true:\n\t"
2434 ".Li386_less_unsigned_end:\n\t"
2436 "lea 0x8(%esp),%esp");
2440 i386_emit_ref (int size
)
2445 EMIT_ASM32 (i386_ref1
,
2449 EMIT_ASM32 (i386_ref2
,
2453 EMIT_ASM32 (i386_ref4
,
2454 "movl (%eax),%eax");
2457 EMIT_ASM32 (i386_ref8
,
2458 "movl 4(%eax),%ebx\n\t"
2459 "movl (%eax),%eax");
2465 i386_emit_if_goto (int *offset_p
, int *size_p
)
2467 EMIT_ASM32 (i386_if_goto
,
2473 /* Don't trust the assembler to choose the right jump */
2474 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2477 *offset_p
= 11; /* be sure that this matches the sequence above */
2483 i386_emit_goto (int *offset_p
, int *size_p
)
2485 EMIT_ASM32 (i386_goto
,
2486 /* Don't trust the assembler to choose the right jump */
2487 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2495 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2497 int diff
= (to
- (from
+ size
));
2498 unsigned char buf
[sizeof (int)];
2500 /* We're only doing 4-byte sizes at the moment. */
2507 memcpy (buf
, &diff
, sizeof (int));
2508 target_write_memory (from
, buf
, sizeof (int));
2512 i386_emit_const (LONGEST num
)
2514 unsigned char buf
[16];
2516 CORE_ADDR buildaddr
= current_insn_ptr
;
2519 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2520 lo
= num
& 0xffffffff;
2521 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2523 hi
= ((num
>> 32) & 0xffffffff);
2526 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2527 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2532 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2534 append_insns (&buildaddr
, i
, buf
);
2535 current_insn_ptr
= buildaddr
;
2539 i386_emit_call (CORE_ADDR fn
)
2541 unsigned char buf
[16];
2543 CORE_ADDR buildaddr
;
2545 buildaddr
= current_insn_ptr
;
2547 buf
[i
++] = 0xe8; /* call <reladdr> */
2548 offset
= ((int) fn
) - (buildaddr
+ 5);
2549 memcpy (buf
+ 1, &offset
, 4);
2550 append_insns (&buildaddr
, 5, buf
);
2551 current_insn_ptr
= buildaddr
;
2555 i386_emit_reg (int reg
)
2557 unsigned char buf
[16];
2559 CORE_ADDR buildaddr
;
2561 EMIT_ASM32 (i386_reg_a
,
2563 buildaddr
= current_insn_ptr
;
2565 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2566 memcpy (&buf
[i
], ®
, sizeof (reg
));
2568 append_insns (&buildaddr
, i
, buf
);
2569 current_insn_ptr
= buildaddr
;
2570 EMIT_ASM32 (i386_reg_b
,
2571 "mov %eax,4(%esp)\n\t"
2572 "mov 8(%ebp),%eax\n\t"
2574 i386_emit_call (get_raw_reg_func_addr ());
2575 EMIT_ASM32 (i386_reg_c
,
2577 "lea 0x8(%esp),%esp");
2581 i386_emit_pop (void)
2583 EMIT_ASM32 (i386_pop
,
2589 i386_emit_stack_flush (void)
2591 EMIT_ASM32 (i386_stack_flush
,
2597 i386_emit_zero_ext (int arg
)
2602 EMIT_ASM32 (i386_zero_ext_8
,
2603 "and $0xff,%eax\n\t"
2607 EMIT_ASM32 (i386_zero_ext_16
,
2608 "and $0xffff,%eax\n\t"
2612 EMIT_ASM32 (i386_zero_ext_32
,
2621 i386_emit_swap (void)
2623 EMIT_ASM32 (i386_swap
,
2633 i386_emit_stack_adjust (int n
)
2635 unsigned char buf
[16];
2637 CORE_ADDR buildaddr
= current_insn_ptr
;
2640 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2644 append_insns (&buildaddr
, i
, buf
);
2645 current_insn_ptr
= buildaddr
;
2648 /* FN's prototype is `LONGEST(*fn)(int)'. */
2651 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2653 unsigned char buf
[16];
2655 CORE_ADDR buildaddr
;
2657 EMIT_ASM32 (i386_int_call_1_a
,
2658 /* Reserve a bit of stack space. */
2660 /* Put the one argument on the stack. */
2661 buildaddr
= current_insn_ptr
;
2663 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2666 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2668 append_insns (&buildaddr
, i
, buf
);
2669 current_insn_ptr
= buildaddr
;
2670 i386_emit_call (fn
);
2671 EMIT_ASM32 (i386_int_call_1_c
,
2673 "lea 0x8(%esp),%esp");
2676 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2679 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2681 unsigned char buf
[16];
2683 CORE_ADDR buildaddr
;
2685 EMIT_ASM32 (i386_void_call_2_a
,
2686 /* Preserve %eax only; we don't have to worry about %ebx. */
2688 /* Reserve a bit of stack space for arguments. */
2689 "sub $0x10,%esp\n\t"
2690 /* Copy "top" to the second argument position. (Note that
2691 we can't assume function won't scribble on its
2692 arguments, so don't try to restore from this.) */
2693 "mov %eax,4(%esp)\n\t"
2694 "mov %ebx,8(%esp)");
2695 /* Put the first argument on the stack. */
2696 buildaddr
= current_insn_ptr
;
2698 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2701 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2703 append_insns (&buildaddr
, i
, buf
);
2704 current_insn_ptr
= buildaddr
;
2705 i386_emit_call (fn
);
2706 EMIT_ASM32 (i386_void_call_2_b
,
2707 "lea 0x10(%esp),%esp\n\t"
2708 /* Restore original stack top. */
2714 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2717 /* Check low half first, more likely to be decider */
2718 "cmpl %eax,(%esp)\n\t"
2719 "jne .Leq_fallthru\n\t"
2720 "cmpl %ebx,4(%esp)\n\t"
2721 "jne .Leq_fallthru\n\t"
2722 "lea 0x8(%esp),%esp\n\t"
2725 /* jmp, but don't trust the assembler to choose the right jump */
2726 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2727 ".Leq_fallthru:\n\t"
2728 "lea 0x8(%esp),%esp\n\t"
2739 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2742 /* Check low half first, more likely to be decider */
2743 "cmpl %eax,(%esp)\n\t"
2745 "cmpl %ebx,4(%esp)\n\t"
2746 "je .Lne_fallthru\n\t"
2748 "lea 0x8(%esp),%esp\n\t"
2751 /* jmp, but don't trust the assembler to choose the right jump */
2752 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2753 ".Lne_fallthru:\n\t"
2754 "lea 0x8(%esp),%esp\n\t"
2765 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2768 "cmpl %ebx,4(%esp)\n\t"
2770 "jne .Llt_fallthru\n\t"
2771 "cmpl %eax,(%esp)\n\t"
2772 "jnl .Llt_fallthru\n\t"
2774 "lea 0x8(%esp),%esp\n\t"
2777 /* jmp, but don't trust the assembler to choose the right jump */
2778 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2779 ".Llt_fallthru:\n\t"
2780 "lea 0x8(%esp),%esp\n\t"
2791 i386_emit_le_goto (int *offset_p
, int *size_p
)
2794 "cmpl %ebx,4(%esp)\n\t"
2796 "jne .Lle_fallthru\n\t"
2797 "cmpl %eax,(%esp)\n\t"
2798 "jnle .Lle_fallthru\n\t"
2800 "lea 0x8(%esp),%esp\n\t"
2803 /* jmp, but don't trust the assembler to choose the right jump */
2804 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2805 ".Lle_fallthru:\n\t"
2806 "lea 0x8(%esp),%esp\n\t"
2817 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2820 "cmpl %ebx,4(%esp)\n\t"
2822 "jne .Lgt_fallthru\n\t"
2823 "cmpl %eax,(%esp)\n\t"
2824 "jng .Lgt_fallthru\n\t"
2826 "lea 0x8(%esp),%esp\n\t"
2829 /* jmp, but don't trust the assembler to choose the right jump */
2830 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2831 ".Lgt_fallthru:\n\t"
2832 "lea 0x8(%esp),%esp\n\t"
2843 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2846 "cmpl %ebx,4(%esp)\n\t"
2848 "jne .Lge_fallthru\n\t"
2849 "cmpl %eax,(%esp)\n\t"
2850 "jnge .Lge_fallthru\n\t"
2852 "lea 0x8(%esp),%esp\n\t"
2855 /* jmp, but don't trust the assembler to choose the right jump */
2856 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2857 ".Lge_fallthru:\n\t"
2858 "lea 0x8(%esp),%esp\n\t"
2868 struct emit_ops i386_emit_ops
=
2876 i386_emit_rsh_signed
,
2877 i386_emit_rsh_unsigned
,
2885 i386_emit_less_signed
,
2886 i386_emit_less_unsigned
,
2890 i386_write_goto_address
,
2895 i386_emit_stack_flush
,
2898 i386_emit_stack_adjust
,
2899 i386_emit_int_call_1
,
2900 i386_emit_void_call_2
,
2910 static struct emit_ops
*
2914 if (is_64bit_tdesc ())
2915 return &amd64_emit_ops
;
2918 return &i386_emit_ops
;
2921 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2924 x86_target::sw_breakpoint_from_kind (int kind
, int *size
)
2926 *size
= x86_breakpoint_len
;
2927 return x86_breakpoint
;
2931 x86_supports_range_stepping (void)
2936 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2940 x86_supports_hardware_single_step (void)
2946 x86_get_ipa_tdesc_idx (void)
2948 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
2949 const struct target_desc
*tdesc
= regcache
->tdesc
;
2952 return amd64_get_ipa_tdesc_idx (tdesc
);
2955 if (tdesc
== tdesc_i386_linux_no_xml
)
2956 return X86_TDESC_SSE
;
2958 return i386_get_ipa_tdesc_idx (tdesc
);
2961 /* This is initialized assuming an amd64 target.
2962 x86_arch_setup will correct it for i386 or amd64 targets. */
2964 struct linux_target_ops the_low_target
=
2966 x86_linux_process_qsupported
,
2967 x86_supports_tracepoints
,
2968 x86_get_thread_area
,
2969 x86_install_fast_tracepoint_jump_pad
,
2971 x86_get_min_fast_tracepoint_insn_len
,
2972 x86_supports_range_stepping
,
2973 x86_supports_hardware_single_step
,
2974 x86_get_syscall_trapinfo
,
2975 x86_get_ipa_tdesc_idx
,
2978 /* The linux target ops object. */
2980 linux_process_target
*the_linux_target
= &the_x86_target
;
2983 initialize_low_arch (void)
2985 /* Initialize the Linux target descriptions. */
2987 tdesc_amd64_linux_no_xml
= allocate_target_description ();
2988 copy_target_description (tdesc_amd64_linux_no_xml
,
2989 amd64_linux_read_description (X86_XSTATE_SSE_MASK
,
2991 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
2994 tdesc_i386_linux_no_xml
= allocate_target_description ();
2995 copy_target_description (tdesc_i386_linux_no_xml
,
2996 i386_linux_read_description (X86_XSTATE_SSE_MASK
));
2997 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
2999 initialize_regsets_info (&x86_regsets_info
);