1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
31 #include "nat/amd64-linux-siginfo.h"
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
38 #include "elf/common.h"
41 #include "gdbsupport/agent.h"
43 #include "tracepoint.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
51 static struct target_desc
*tdesc_amd64_linux_no_xml
;
53 static struct target_desc
*tdesc_i386_linux_no_xml
;
56 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
59 /* Backward compatibility for gdb without XML support. */
61 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
67 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
74 #include <sys/procfs.h>
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
99 class x86_target
: public linux_process_target
103 /* Update all the target description of all processes; a new GDB
104 connected, and it may or not support xml target descriptions. */
105 void update_xmltarget ();
107 const regs_info
*get_regs_info () override
;
109 const gdb_byte
*sw_breakpoint_from_kind (int kind
, int *size
) override
;
111 bool supports_z_point_type (char z_type
) override
;
115 void low_arch_setup () override
;
117 bool low_cannot_fetch_register (int regno
) override
;
119 bool low_cannot_store_register (int regno
) override
;
121 bool low_supports_breakpoints () override
;
123 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
125 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
127 int low_decr_pc_after_break () override
;
129 bool low_breakpoint_at (CORE_ADDR pc
) override
;
131 int low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
132 int size
, raw_breakpoint
*bp
) override
;
134 int low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
135 int size
, raw_breakpoint
*bp
) override
;
137 bool low_stopped_by_watchpoint () override
;
139 CORE_ADDR
low_stopped_data_address () override
;
141 /* collect_ptrace_register/supply_ptrace_register are not needed in the
142 native i386 case (no registers smaller than an xfer unit), and are not
143 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
145 /* Need to fix up i386 siginfo if host is amd64. */
146 bool low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
147 int direction
) override
;
150 /* The singleton target ops object. */
152 static x86_target the_x86_target
;
154 /* Per-process arch-specific data we want to keep. */
156 struct arch_process_info
158 struct x86_debug_reg_state debug_reg_state
;
163 /* Mapping between the general-purpose registers in `struct user'
164 format and GDB's register array layout.
165 Note that the transfer layout uses 64-bit regs. */
166 static /*const*/ int i386_regmap
[] =
168 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
169 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
170 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
171 DS
* 8, ES
* 8, FS
* 8, GS
* 8
174 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
176 /* So code below doesn't have to care, i386 or amd64. */
177 #define ORIG_EAX ORIG_RAX
180 static const int x86_64_regmap
[] =
182 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
183 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
184 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
185 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
186 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
187 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
188 -1, -1, -1, -1, -1, -1, -1, -1,
189 -1, -1, -1, -1, -1, -1, -1, -1,
190 -1, -1, -1, -1, -1, -1, -1, -1,
192 -1, -1, -1, -1, -1, -1, -1, -1,
194 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
199 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
200 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
201 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
202 -1, -1, -1, -1, -1, -1, -1, -1,
203 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
204 -1, -1, -1, -1, -1, -1, -1, -1,
205 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
206 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
207 -1, -1, -1, -1, -1, -1, -1, -1,
208 -1, -1, -1, -1, -1, -1, -1, -1,
209 -1, -1, -1, -1, -1, -1, -1, -1,
213 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
214 #define X86_64_USER_REGS (GS + 1)
216 #else /* ! __x86_64__ */
218 /* Mapping between the general-purpose registers in `struct user'
219 format and GDB's register array layout. */
220 static /*const*/ int i386_regmap
[] =
222 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
223 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
224 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
225 DS
* 4, ES
* 4, FS
* 4, GS
* 4
228 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
236 /* Returns true if the current inferior belongs to a x86-64 process,
240 is_64bit_tdesc (void)
242 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
244 return register_size (regcache
->tdesc
, 0) == 8;
250 /* Called by libthread_db. */
253 ps_get_thread_area (struct ps_prochandle
*ph
,
254 lwpid_t lwpid
, int idx
, void **base
)
257 int use_64bit
= is_64bit_tdesc ();
264 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
268 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
279 unsigned int desc
[4];
281 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
282 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
285 /* Ensure we properly extend the value to 64-bits for x86_64. */
286 *base
= (void *) (uintptr_t) desc
[1];
291 /* Get the thread area address. This is used to recognize which
292 thread is which when tracing with the in-process agent library. We
293 don't read anything from the address, and treat it as opaque; it's
294 the address itself that we assume is unique per-thread. */
297 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
300 int use_64bit
= is_64bit_tdesc ();
305 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
307 *addr
= (CORE_ADDR
) (uintptr_t) base
;
316 struct lwp_info
*lwp
= find_lwp_pid (ptid_t (lwpid
));
317 struct thread_info
*thr
= get_lwp_thread (lwp
);
318 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
319 unsigned int desc
[4];
321 const int reg_thread_area
= 3; /* bits to scale down register value. */
324 collect_register_by_name (regcache
, "gs", &gs
);
326 idx
= gs
>> reg_thread_area
;
328 if (ptrace (PTRACE_GET_THREAD_AREA
,
330 (void *) (long) idx
, (unsigned long) &desc
) < 0)
341 x86_target::low_cannot_store_register (int regno
)
344 if (is_64bit_tdesc ())
348 return regno
>= I386_NUM_REGS
;
352 x86_target::low_cannot_fetch_register (int regno
)
355 if (is_64bit_tdesc ())
359 return regno
>= I386_NUM_REGS
;
363 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
368 if (register_size (regcache
->tdesc
, 0) == 8)
370 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
371 if (x86_64_regmap
[i
] != -1)
372 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
374 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
377 int lwpid
= lwpid_of (current_thread
);
379 collect_register_by_name (regcache
, "fs_base", &base
);
380 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_FS
);
382 collect_register_by_name (regcache
, "gs_base", &base
);
383 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_GS
);
390 /* 32-bit inferior registers need to be zero-extended.
391 Callers would read uninitialized memory otherwise. */
392 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
395 for (i
= 0; i
< I386_NUM_REGS
; i
++)
396 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
398 collect_register_by_name (regcache
, "orig_eax",
399 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
402 /* Sign extend EAX value to avoid potential syscall restart
405 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
406 for a detailed explanation. */
407 if (register_size (regcache
->tdesc
, 0) == 4)
409 void *ptr
= ((gdb_byte
*) buf
410 + i386_regmap
[find_regno (regcache
->tdesc
, "eax")]);
412 *(int64_t *) ptr
= *(int32_t *) ptr
;
418 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
423 if (register_size (regcache
->tdesc
, 0) == 8)
425 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
426 if (x86_64_regmap
[i
] != -1)
427 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
429 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
432 int lwpid
= lwpid_of (current_thread
);
434 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
435 supply_register_by_name (regcache
, "fs_base", &base
);
437 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_GS
) == 0)
438 supply_register_by_name (regcache
, "gs_base", &base
);
445 for (i
= 0; i
< I386_NUM_REGS
; i
++)
446 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
448 supply_register_by_name (regcache
, "orig_eax",
449 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
453 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
456 i387_cache_to_fxsave (regcache
, buf
);
458 i387_cache_to_fsave (regcache
, buf
);
463 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
466 i387_fxsave_to_cache (regcache
, buf
);
468 i387_fsave_to_cache (regcache
, buf
);
475 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
477 i387_cache_to_fxsave (regcache
, buf
);
481 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
483 i387_fxsave_to_cache (regcache
, buf
);
489 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
491 i387_cache_to_xsave (regcache
, buf
);
495 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
497 i387_xsave_to_cache (regcache
, buf
);
500 /* ??? The non-biarch i386 case stores all the i387 regs twice.
501 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
502 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
503 doesn't work. IWBN to avoid the duplication in the case where it
504 does work. Maybe the arch_setup routine could check whether it works
505 and update the supported regsets accordingly. */
507 static struct regset_info x86_regsets
[] =
509 #ifdef HAVE_PTRACE_GETREGS
510 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
512 x86_fill_gregset
, x86_store_gregset
},
513 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
514 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
516 # ifdef HAVE_PTRACE_GETFPXREGS
517 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
519 x86_fill_fpxregset
, x86_store_fpxregset
},
522 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
524 x86_fill_fpregset
, x86_store_fpregset
},
525 #endif /* HAVE_PTRACE_GETREGS */
530 x86_target::low_supports_breakpoints ()
536 x86_target::low_get_pc (regcache
*regcache
)
538 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
544 collect_register_by_name (regcache
, "rip", &pc
);
545 return (CORE_ADDR
) pc
;
551 collect_register_by_name (regcache
, "eip", &pc
);
552 return (CORE_ADDR
) pc
;
557 x86_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
559 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
565 supply_register_by_name (regcache
, "rip", &newpc
);
571 supply_register_by_name (regcache
, "eip", &newpc
);
576 x86_target::low_decr_pc_after_break ()
582 static const gdb_byte x86_breakpoint
[] = { 0xCC };
583 #define x86_breakpoint_len 1
586 x86_target::low_breakpoint_at (CORE_ADDR pc
)
590 read_memory (pc
, &c
, 1);
597 /* Low-level function vector. */
598 struct x86_dr_low_type x86_dr_low
=
600 x86_linux_dr_set_control
,
601 x86_linux_dr_set_addr
,
602 x86_linux_dr_get_addr
,
603 x86_linux_dr_get_status
,
604 x86_linux_dr_get_control
,
608 /* Breakpoint/Watchpoint support. */
611 x86_target::supports_z_point_type (char z_type
)
617 case Z_PACKET_WRITE_WP
:
618 case Z_PACKET_ACCESS_WP
:
626 x86_target::low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
627 int size
, raw_breakpoint
*bp
)
629 struct process_info
*proc
= current_process ();
633 case raw_bkpt_type_hw
:
634 case raw_bkpt_type_write_wp
:
635 case raw_bkpt_type_access_wp
:
637 enum target_hw_bp_type hw_type
638 = raw_bkpt_type_to_target_hw_bp_type (type
);
639 struct x86_debug_reg_state
*state
640 = &proc
->priv
->arch_private
->debug_reg_state
;
642 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
652 x86_target::low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
653 int size
, raw_breakpoint
*bp
)
655 struct process_info
*proc
= current_process ();
659 case raw_bkpt_type_hw
:
660 case raw_bkpt_type_write_wp
:
661 case raw_bkpt_type_access_wp
:
663 enum target_hw_bp_type hw_type
664 = raw_bkpt_type_to_target_hw_bp_type (type
);
665 struct x86_debug_reg_state
*state
666 = &proc
->priv
->arch_private
->debug_reg_state
;
668 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
677 x86_target::low_stopped_by_watchpoint ()
679 struct process_info
*proc
= current_process ();
680 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
684 x86_target::low_stopped_data_address ()
686 struct process_info
*proc
= current_process ();
688 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
694 /* Called when a new process is created. */
696 static struct arch_process_info
*
697 x86_linux_new_process (void)
699 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
701 x86_low_init_dregs (&info
->debug_reg_state
);
706 /* Called when a process is being deleted. */
709 x86_linux_delete_process (struct arch_process_info
*info
)
714 /* Target routine for linux_new_fork. */
717 x86_linux_new_fork (struct process_info
*parent
, struct process_info
*child
)
719 /* These are allocated by linux_add_process. */
720 gdb_assert (parent
->priv
!= NULL
721 && parent
->priv
->arch_private
!= NULL
);
722 gdb_assert (child
->priv
!= NULL
723 && child
->priv
->arch_private
!= NULL
);
725 /* Linux kernel before 2.6.33 commit
726 72f674d203cd230426437cdcf7dd6f681dad8b0d
727 will inherit hardware debug registers from parent
728 on fork/vfork/clone. Newer Linux kernels create such tasks with
729 zeroed debug registers.
731 GDB core assumes the child inherits the watchpoints/hw
732 breakpoints of the parent, and will remove them all from the
733 forked off process. Copy the debug registers mirrors into the
734 new process so that all breakpoints and watchpoints can be
735 removed together. The debug registers mirror will become zeroed
736 in the end before detaching the forked off process, thus making
737 this compatible with older Linux kernels too. */
739 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
742 /* See nat/x86-dregs.h. */
744 struct x86_debug_reg_state
*
745 x86_debug_reg_state (pid_t pid
)
747 struct process_info
*proc
= find_process_pid (pid
);
749 return &proc
->priv
->arch_private
->debug_reg_state
;
752 /* When GDBSERVER is built as a 64-bit application on linux, the
753 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
754 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
755 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
756 conversion in-place ourselves. */
758 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
759 layout of the inferiors' architecture. Returns true if any
760 conversion was done; false otherwise. If DIRECTION is 1, then copy
761 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
765 x86_target::low_siginfo_fixup (siginfo_t
*ptrace
, gdb_byte
*inf
, int direction
)
768 unsigned int machine
;
769 int tid
= lwpid_of (current_thread
);
770 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
772 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
773 if (!is_64bit_tdesc ())
774 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
776 /* No fixup for native x32 GDB. */
777 else if (!is_elf64
&& sizeof (void *) == 8)
778 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
787 /* Format of XSAVE extended state is:
791 sw_usable_bytes[464..511]
792 xstate_hdr_bytes[512..575]
797 Same memory layout will be used for the coredump NT_X86_XSTATE
798 representing the XSAVE extended state registers.
800 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
801 extended state mask, which is the same as the extended control register
802 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
803 together with the mask saved in the xstate_hdr_bytes to determine what
804 states the processor/OS supports and what state, used or initialized,
805 the process/thread is in. */
806 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
808 /* Does the current host support the GETFPXREGS request? The header
809 file may or may not define it, and even if it is defined, the
810 kernel will return EIO if it's running on a pre-SSE processor. */
811 int have_ptrace_getfpxregs
=
812 #ifdef HAVE_PTRACE_GETFPXREGS
819 /* Get Linux/x86 target description from running target. */
821 static const struct target_desc
*
822 x86_linux_read_description (void)
824 unsigned int machine
;
828 static uint64_t xcr0
;
829 struct regset_info
*regset
;
831 tid
= lwpid_of (current_thread
);
833 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
835 if (sizeof (void *) == 4)
838 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
840 else if (machine
== EM_X86_64
)
841 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
845 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
846 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
848 elf_fpxregset_t fpxregs
;
850 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
852 have_ptrace_getfpxregs
= 0;
853 have_ptrace_getregset
= 0;
854 return i386_linux_read_description (X86_XSTATE_X87
);
857 have_ptrace_getfpxregs
= 1;
863 x86_xcr0
= X86_XSTATE_SSE_MASK
;
867 if (machine
== EM_X86_64
)
868 return tdesc_amd64_linux_no_xml
;
871 return tdesc_i386_linux_no_xml
;
874 if (have_ptrace_getregset
== -1)
876 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
879 iov
.iov_base
= xstateregs
;
880 iov
.iov_len
= sizeof (xstateregs
);
882 /* Check if PTRACE_GETREGSET works. */
883 if (ptrace (PTRACE_GETREGSET
, tid
,
884 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
885 have_ptrace_getregset
= 0;
888 have_ptrace_getregset
= 1;
890 /* Get XCR0 from XSAVE extended state. */
891 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
892 / sizeof (uint64_t))];
894 /* Use PTRACE_GETREGSET if it is available. */
895 for (regset
= x86_regsets
;
896 regset
->fill_function
!= NULL
; regset
++)
897 if (regset
->get_request
== PTRACE_GETREGSET
)
898 regset
->size
= X86_XSTATE_SIZE (xcr0
);
899 else if (regset
->type
!= GENERAL_REGS
)
904 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
905 xcr0_features
= (have_ptrace_getregset
906 && (xcr0
& X86_XSTATE_ALL_MASK
));
911 if (machine
== EM_X86_64
)
914 const target_desc
*tdesc
= NULL
;
918 tdesc
= amd64_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
,
923 tdesc
= amd64_linux_read_description (X86_XSTATE_SSE_MASK
, !is_elf64
);
929 const target_desc
*tdesc
= NULL
;
932 tdesc
= i386_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
);
935 tdesc
= i386_linux_read_description (X86_XSTATE_SSE
);
940 gdb_assert_not_reached ("failed to return tdesc");
943 /* Update all the target description of all processes; a new GDB
944 connected, and it may or not support xml target descriptions. */
947 x86_target::update_xmltarget ()
949 struct thread_info
*saved_thread
= current_thread
;
951 /* Before changing the register cache's internal layout, flush the
952 contents of the current valid caches back to the threads, and
953 release the current regcache objects. */
956 for_each_process ([this] (process_info
*proc
) {
959 /* Look up any thread of this process. */
960 current_thread
= find_any_thread_of_pid (pid
);
965 current_thread
= saved_thread
;
968 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
972 x86_linux_process_qsupported (char **features
, int count
)
976 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
977 with "i386" in qSupported query, it supports x86 XML target
980 for (i
= 0; i
< count
; i
++)
982 const char *feature
= features
[i
];
984 if (startswith (feature
, "xmlRegisters="))
986 char *copy
= xstrdup (feature
+ 13);
989 for (char *p
= strtok_r (copy
, ",", &saveptr
);
991 p
= strtok_r (NULL
, ",", &saveptr
))
993 if (strcmp (p
, "i386") == 0)
1003 the_x86_target
.update_xmltarget ();
1006 /* Common for x86/x86-64. */
1008 static struct regsets_info x86_regsets_info
=
1010 x86_regsets
, /* regsets */
1011 0, /* num_regsets */
1012 NULL
, /* disabled_regsets */
1016 static struct regs_info amd64_linux_regs_info
=
1018 NULL
, /* regset_bitmap */
1019 NULL
, /* usrregs_info */
1023 static struct usrregs_info i386_linux_usrregs_info
=
1029 static struct regs_info i386_linux_regs_info
=
1031 NULL
, /* regset_bitmap */
1032 &i386_linux_usrregs_info
,
1037 x86_target::get_regs_info ()
1040 if (is_64bit_tdesc ())
1041 return &amd64_linux_regs_info
;
1044 return &i386_linux_regs_info
;
1047 /* Initialize the target description for the architecture of the
1051 x86_target::low_arch_setup ()
1053 current_process ()->tdesc
= x86_linux_read_description ();
1056 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1057 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1060 x86_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
1062 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
1068 collect_register_by_name (regcache
, "orig_rax", &l_sysno
);
1069 *sysno
= (int) l_sysno
;
1072 collect_register_by_name (regcache
, "orig_eax", sysno
);
1076 x86_supports_tracepoints (void)
1082 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1084 target_write_memory (*to
, buf
, len
);
1089 push_opcode (unsigned char *buf
, const char *op
)
1091 unsigned char *buf_org
= buf
;
1096 unsigned long ul
= strtoul (op
, &endptr
, 16);
1105 return buf
- buf_org
;
1110 /* Build a jump pad that saves registers and calls a collection
1111 function. Writes a jump instruction to the jump pad to
1112 JJUMPAD_INSN. The caller is responsible to write it in at the
1113 tracepoint address. */
1116 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1117 CORE_ADDR collector
,
1120 CORE_ADDR
*jump_entry
,
1121 CORE_ADDR
*trampoline
,
1122 ULONGEST
*trampoline_size
,
1123 unsigned char *jjump_pad_insn
,
1124 ULONGEST
*jjump_pad_insn_size
,
1125 CORE_ADDR
*adjusted_insn_addr
,
1126 CORE_ADDR
*adjusted_insn_addr_end
,
1129 unsigned char buf
[40];
1133 CORE_ADDR buildaddr
= *jump_entry
;
1135 /* Build the jump pad. */
1137 /* First, do tracepoint data collection. Save registers. */
1139 /* Need to ensure stack pointer saved first. */
1140 buf
[i
++] = 0x54; /* push %rsp */
1141 buf
[i
++] = 0x55; /* push %rbp */
1142 buf
[i
++] = 0x57; /* push %rdi */
1143 buf
[i
++] = 0x56; /* push %rsi */
1144 buf
[i
++] = 0x52; /* push %rdx */
1145 buf
[i
++] = 0x51; /* push %rcx */
1146 buf
[i
++] = 0x53; /* push %rbx */
1147 buf
[i
++] = 0x50; /* push %rax */
1148 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1149 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1150 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1151 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1152 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1153 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1154 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1155 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1156 buf
[i
++] = 0x9c; /* pushfq */
1157 buf
[i
++] = 0x48; /* movabs <addr>,%rdi */
1159 memcpy (buf
+ i
, &tpaddr
, 8);
1161 buf
[i
++] = 0x57; /* push %rdi */
1162 append_insns (&buildaddr
, i
, buf
);
1164 /* Stack space for the collecting_t object. */
1166 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1167 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1168 memcpy (buf
+ i
, &tpoint
, 8);
1170 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1171 i
+= push_opcode (&buf
[i
],
1172 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1173 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1174 append_insns (&buildaddr
, i
, buf
);
1178 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1179 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1181 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1182 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1183 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1184 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1185 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1186 append_insns (&buildaddr
, i
, buf
);
1188 /* Set up the gdb_collect call. */
1189 /* At this point, (stack pointer + 0x18) is the base of our saved
1193 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1194 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1196 /* tpoint address may be 64-bit wide. */
1197 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1198 memcpy (buf
+ i
, &tpoint
, 8);
1200 append_insns (&buildaddr
, i
, buf
);
1202 /* The collector function being in the shared library, may be
1203 >31-bits away off the jump pad. */
1205 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1206 memcpy (buf
+ i
, &collector
, 8);
1208 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1209 append_insns (&buildaddr
, i
, buf
);
1211 /* Clear the spin-lock. */
1213 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1214 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1215 memcpy (buf
+ i
, &lockaddr
, 8);
1217 append_insns (&buildaddr
, i
, buf
);
1219 /* Remove stack that had been used for the collect_t object. */
1221 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1222 append_insns (&buildaddr
, i
, buf
);
1224 /* Restore register state. */
1226 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1230 buf
[i
++] = 0x9d; /* popfq */
1231 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1232 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1233 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1234 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1235 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1236 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1237 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1238 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1239 buf
[i
++] = 0x58; /* pop %rax */
1240 buf
[i
++] = 0x5b; /* pop %rbx */
1241 buf
[i
++] = 0x59; /* pop %rcx */
1242 buf
[i
++] = 0x5a; /* pop %rdx */
1243 buf
[i
++] = 0x5e; /* pop %rsi */
1244 buf
[i
++] = 0x5f; /* pop %rdi */
1245 buf
[i
++] = 0x5d; /* pop %rbp */
1246 buf
[i
++] = 0x5c; /* pop %rsp */
1247 append_insns (&buildaddr
, i
, buf
);
1249 /* Now, adjust the original instruction to execute in the jump
1251 *adjusted_insn_addr
= buildaddr
;
1252 relocate_instruction (&buildaddr
, tpaddr
);
1253 *adjusted_insn_addr_end
= buildaddr
;
1255 /* Finally, write a jump back to the program. */
1257 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1258 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1261 "E.Jump back from jump pad too far from tracepoint "
1262 "(offset 0x%" PRIx64
" > int32).", loffset
);
1266 offset
= (int) loffset
;
1267 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1268 memcpy (buf
+ 1, &offset
, 4);
1269 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1271 /* The jump pad is now built. Wire in a jump to our jump pad. This
1272 is always done last (by our caller actually), so that we can
1273 install fast tracepoints with threads running. This relies on
1274 the agent's atomic write support. */
1275 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1276 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1279 "E.Jump pad too far from tracepoint "
1280 "(offset 0x%" PRIx64
" > int32).", loffset
);
1284 offset
= (int) loffset
;
1286 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1287 memcpy (buf
+ 1, &offset
, 4);
1288 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1289 *jjump_pad_insn_size
= sizeof (jump_insn
);
1291 /* Return the end address of our pad. */
1292 *jump_entry
= buildaddr
;
1297 #endif /* __x86_64__ */
1299 /* Build a jump pad that saves registers and calls a collection
1300 function. Writes a jump instruction to the jump pad to
1301 JJUMPAD_INSN. The caller is responsible to write it in at the
1302 tracepoint address. */
1305 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1306 CORE_ADDR collector
,
1309 CORE_ADDR
*jump_entry
,
1310 CORE_ADDR
*trampoline
,
1311 ULONGEST
*trampoline_size
,
1312 unsigned char *jjump_pad_insn
,
1313 ULONGEST
*jjump_pad_insn_size
,
1314 CORE_ADDR
*adjusted_insn_addr
,
1315 CORE_ADDR
*adjusted_insn_addr_end
,
1318 unsigned char buf
[0x100];
1320 CORE_ADDR buildaddr
= *jump_entry
;
1322 /* Build the jump pad. */
1324 /* First, do tracepoint data collection. Save registers. */
1326 buf
[i
++] = 0x60; /* pushad */
1327 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1328 *((int *)(buf
+ i
)) = (int) tpaddr
;
1330 buf
[i
++] = 0x9c; /* pushf */
1331 buf
[i
++] = 0x1e; /* push %ds */
1332 buf
[i
++] = 0x06; /* push %es */
1333 buf
[i
++] = 0x0f; /* push %fs */
1335 buf
[i
++] = 0x0f; /* push %gs */
1337 buf
[i
++] = 0x16; /* push %ss */
1338 buf
[i
++] = 0x0e; /* push %cs */
1339 append_insns (&buildaddr
, i
, buf
);
1341 /* Stack space for the collecting_t object. */
1343 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1345 /* Build the object. */
1346 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1347 memcpy (buf
+ i
, &tpoint
, 4);
1349 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1351 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1352 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1353 append_insns (&buildaddr
, i
, buf
);
1355 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1356 If we cared for it, this could be using xchg alternatively. */
1359 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1360 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1362 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1364 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1365 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1366 append_insns (&buildaddr
, i
, buf
);
1369 /* Set up arguments to the gdb_collect call. */
1371 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1372 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1373 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1374 append_insns (&buildaddr
, i
, buf
);
1377 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1378 append_insns (&buildaddr
, i
, buf
);
1381 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1382 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1384 append_insns (&buildaddr
, i
, buf
);
1386 buf
[0] = 0xe8; /* call <reladdr> */
1387 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1388 memcpy (buf
+ 1, &offset
, 4);
1389 append_insns (&buildaddr
, 5, buf
);
1390 /* Clean up after the call. */
1391 buf
[0] = 0x83; /* add $0x8,%esp */
1394 append_insns (&buildaddr
, 3, buf
);
1397 /* Clear the spin-lock. This would need the LOCK prefix on older
1400 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1401 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1402 memcpy (buf
+ i
, &lockaddr
, 4);
1404 append_insns (&buildaddr
, i
, buf
);
1407 /* Remove stack that had been used for the collect_t object. */
1409 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1410 append_insns (&buildaddr
, i
, buf
);
1413 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1416 buf
[i
++] = 0x17; /* pop %ss */
1417 buf
[i
++] = 0x0f; /* pop %gs */
1419 buf
[i
++] = 0x0f; /* pop %fs */
1421 buf
[i
++] = 0x07; /* pop %es */
1422 buf
[i
++] = 0x1f; /* pop %ds */
1423 buf
[i
++] = 0x9d; /* popf */
1424 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1427 buf
[i
++] = 0x61; /* popad */
1428 append_insns (&buildaddr
, i
, buf
);
1430 /* Now, adjust the original instruction to execute in the jump
1432 *adjusted_insn_addr
= buildaddr
;
1433 relocate_instruction (&buildaddr
, tpaddr
);
1434 *adjusted_insn_addr_end
= buildaddr
;
1436 /* Write the jump back to the program. */
1437 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1438 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1439 memcpy (buf
+ 1, &offset
, 4);
1440 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1442 /* The jump pad is now built. Wire in a jump to our jump pad. This
1443 is always done last (by our caller actually), so that we can
1444 install fast tracepoints with threads running. This relies on
1445 the agent's atomic write support. */
1448 /* Create a trampoline. */
1449 *trampoline_size
= sizeof (jump_insn
);
1450 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1452 /* No trampoline space available. */
1454 "E.Cannot allocate trampoline space needed for fast "
1455 "tracepoints on 4-byte instructions.");
1459 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1460 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1461 memcpy (buf
+ 1, &offset
, 4);
1462 target_write_memory (*trampoline
, buf
, sizeof (jump_insn
));
1464 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1465 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1466 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1467 memcpy (buf
+ 2, &offset
, 2);
1468 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1469 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1473 /* Else use a 32-bit relative jump instruction. */
1474 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1475 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1476 memcpy (buf
+ 1, &offset
, 4);
1477 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1478 *jjump_pad_insn_size
= sizeof (jump_insn
);
1481 /* Return the end address of our pad. */
1482 *jump_entry
= buildaddr
;
1488 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1489 CORE_ADDR collector
,
1492 CORE_ADDR
*jump_entry
,
1493 CORE_ADDR
*trampoline
,
1494 ULONGEST
*trampoline_size
,
1495 unsigned char *jjump_pad_insn
,
1496 ULONGEST
*jjump_pad_insn_size
,
1497 CORE_ADDR
*adjusted_insn_addr
,
1498 CORE_ADDR
*adjusted_insn_addr_end
,
1502 if (is_64bit_tdesc ())
1503 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1504 collector
, lockaddr
,
1505 orig_size
, jump_entry
,
1506 trampoline
, trampoline_size
,
1508 jjump_pad_insn_size
,
1510 adjusted_insn_addr_end
,
1514 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1515 collector
, lockaddr
,
1516 orig_size
, jump_entry
,
1517 trampoline
, trampoline_size
,
1519 jjump_pad_insn_size
,
1521 adjusted_insn_addr_end
,
1525 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1529 x86_get_min_fast_tracepoint_insn_len (void)
1531 static int warned_about_fast_tracepoints
= 0;
1534 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1535 used for fast tracepoints. */
1536 if (is_64bit_tdesc ())
1540 if (agent_loaded_p ())
1542 char errbuf
[IPA_BUFSIZ
];
1546 /* On x86, if trampolines are available, then 4-byte jump instructions
1547 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1548 with a 4-byte offset are used instead. */
1549 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1553 /* GDB has no channel to explain to user why a shorter fast
1554 tracepoint is not possible, but at least make GDBserver
1555 mention that something has gone awry. */
1556 if (!warned_about_fast_tracepoints
)
1558 warning ("4-byte fast tracepoints not available; %s", errbuf
);
1559 warned_about_fast_tracepoints
= 1;
1566 /* Indicate that the minimum length is currently unknown since the IPA
1567 has not loaded yet. */
1573 add_insns (unsigned char *start
, int len
)
1575 CORE_ADDR buildaddr
= current_insn_ptr
;
1578 debug_printf ("Adding %d bytes of insn at %s\n",
1579 len
, paddress (buildaddr
));
1581 append_insns (&buildaddr
, len
, start
);
1582 current_insn_ptr
= buildaddr
;
1585 /* Our general strategy for emitting code is to avoid specifying raw
1586 bytes whenever possible, and instead copy a block of inline asm
1587 that is embedded in the function. This is a little messy, because
1588 we need to keep the compiler from discarding what looks like dead
1589 code, plus suppress various warnings. */
1591 #define EMIT_ASM(NAME, INSNS) \
1594 extern unsigned char start_ ## NAME, end_ ## NAME; \
1595 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1596 __asm__ ("jmp end_" #NAME "\n" \
1597 "\t" "start_" #NAME ":" \
1599 "\t" "end_" #NAME ":"); \
1604 #define EMIT_ASM32(NAME,INSNS) \
1607 extern unsigned char start_ ## NAME, end_ ## NAME; \
1608 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1609 __asm__ (".code32\n" \
1610 "\t" "jmp end_" #NAME "\n" \
1611 "\t" "start_" #NAME ":\n" \
1613 "\t" "end_" #NAME ":\n" \
1619 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1626 amd64_emit_prologue (void)
1628 EMIT_ASM (amd64_prologue
,
1630 "movq %rsp,%rbp\n\t"
1631 "sub $0x20,%rsp\n\t"
1632 "movq %rdi,-8(%rbp)\n\t"
1633 "movq %rsi,-16(%rbp)");
1638 amd64_emit_epilogue (void)
1640 EMIT_ASM (amd64_epilogue
,
1641 "movq -16(%rbp),%rdi\n\t"
1642 "movq %rax,(%rdi)\n\t"
1649 amd64_emit_add (void)
1651 EMIT_ASM (amd64_add
,
1652 "add (%rsp),%rax\n\t"
1653 "lea 0x8(%rsp),%rsp");
1657 amd64_emit_sub (void)
1659 EMIT_ASM (amd64_sub
,
1660 "sub %rax,(%rsp)\n\t"
1665 amd64_emit_mul (void)
1671 amd64_emit_lsh (void)
1677 amd64_emit_rsh_signed (void)
1683 amd64_emit_rsh_unsigned (void)
1689 amd64_emit_ext (int arg
)
1694 EMIT_ASM (amd64_ext_8
,
1700 EMIT_ASM (amd64_ext_16
,
1705 EMIT_ASM (amd64_ext_32
,
1714 amd64_emit_log_not (void)
1716 EMIT_ASM (amd64_log_not
,
1717 "test %rax,%rax\n\t"
1723 amd64_emit_bit_and (void)
1725 EMIT_ASM (amd64_and
,
1726 "and (%rsp),%rax\n\t"
1727 "lea 0x8(%rsp),%rsp");
1731 amd64_emit_bit_or (void)
1734 "or (%rsp),%rax\n\t"
1735 "lea 0x8(%rsp),%rsp");
1739 amd64_emit_bit_xor (void)
1741 EMIT_ASM (amd64_xor
,
1742 "xor (%rsp),%rax\n\t"
1743 "lea 0x8(%rsp),%rsp");
1747 amd64_emit_bit_not (void)
1749 EMIT_ASM (amd64_bit_not
,
1750 "xorq $0xffffffffffffffff,%rax");
1754 amd64_emit_equal (void)
1756 EMIT_ASM (amd64_equal
,
1757 "cmp %rax,(%rsp)\n\t"
1758 "je .Lamd64_equal_true\n\t"
1760 "jmp .Lamd64_equal_end\n\t"
1761 ".Lamd64_equal_true:\n\t"
1763 ".Lamd64_equal_end:\n\t"
1764 "lea 0x8(%rsp),%rsp");
1768 amd64_emit_less_signed (void)
1770 EMIT_ASM (amd64_less_signed
,
1771 "cmp %rax,(%rsp)\n\t"
1772 "jl .Lamd64_less_signed_true\n\t"
1774 "jmp .Lamd64_less_signed_end\n\t"
1775 ".Lamd64_less_signed_true:\n\t"
1777 ".Lamd64_less_signed_end:\n\t"
1778 "lea 0x8(%rsp),%rsp");
1782 amd64_emit_less_unsigned (void)
1784 EMIT_ASM (amd64_less_unsigned
,
1785 "cmp %rax,(%rsp)\n\t"
1786 "jb .Lamd64_less_unsigned_true\n\t"
1788 "jmp .Lamd64_less_unsigned_end\n\t"
1789 ".Lamd64_less_unsigned_true:\n\t"
1791 ".Lamd64_less_unsigned_end:\n\t"
1792 "lea 0x8(%rsp),%rsp");
1796 amd64_emit_ref (int size
)
1801 EMIT_ASM (amd64_ref1
,
1805 EMIT_ASM (amd64_ref2
,
1809 EMIT_ASM (amd64_ref4
,
1810 "movl (%rax),%eax");
1813 EMIT_ASM (amd64_ref8
,
1814 "movq (%rax),%rax");
1820 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1822 EMIT_ASM (amd64_if_goto
,
1826 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1834 amd64_emit_goto (int *offset_p
, int *size_p
)
1836 EMIT_ASM (amd64_goto
,
1837 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1845 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1847 int diff
= (to
- (from
+ size
));
1848 unsigned char buf
[sizeof (int)];
1856 memcpy (buf
, &diff
, sizeof (int));
1857 target_write_memory (from
, buf
, sizeof (int));
1861 amd64_emit_const (LONGEST num
)
1863 unsigned char buf
[16];
1865 CORE_ADDR buildaddr
= current_insn_ptr
;
1868 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1869 memcpy (&buf
[i
], &num
, sizeof (num
));
1871 append_insns (&buildaddr
, i
, buf
);
1872 current_insn_ptr
= buildaddr
;
1876 amd64_emit_call (CORE_ADDR fn
)
1878 unsigned char buf
[16];
1880 CORE_ADDR buildaddr
;
1883 /* The destination function being in the shared library, may be
1884 >31-bits away off the compiled code pad. */
1886 buildaddr
= current_insn_ptr
;
1888 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1892 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1894 /* Offset is too large for a call. Use callq, but that requires
1895 a register, so avoid it if possible. Use r10, since it is
1896 call-clobbered, we don't have to push/pop it. */
1897 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1899 memcpy (buf
+ i
, &fn
, 8);
1901 buf
[i
++] = 0xff; /* callq *%r10 */
1906 int offset32
= offset64
; /* we know we can't overflow here. */
1908 buf
[i
++] = 0xe8; /* call <reladdr> */
1909 memcpy (buf
+ i
, &offset32
, 4);
1913 append_insns (&buildaddr
, i
, buf
);
1914 current_insn_ptr
= buildaddr
;
1918 amd64_emit_reg (int reg
)
1920 unsigned char buf
[16];
1922 CORE_ADDR buildaddr
;
1924 /* Assume raw_regs is still in %rdi. */
1925 buildaddr
= current_insn_ptr
;
1927 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1928 memcpy (&buf
[i
], ®
, sizeof (reg
));
1930 append_insns (&buildaddr
, i
, buf
);
1931 current_insn_ptr
= buildaddr
;
1932 amd64_emit_call (get_raw_reg_func_addr ());
1936 amd64_emit_pop (void)
1938 EMIT_ASM (amd64_pop
,
1943 amd64_emit_stack_flush (void)
1945 EMIT_ASM (amd64_stack_flush
,
1950 amd64_emit_zero_ext (int arg
)
1955 EMIT_ASM (amd64_zero_ext_8
,
1959 EMIT_ASM (amd64_zero_ext_16
,
1960 "and $0xffff,%rax");
1963 EMIT_ASM (amd64_zero_ext_32
,
1964 "mov $0xffffffff,%rcx\n\t"
1973 amd64_emit_swap (void)
1975 EMIT_ASM (amd64_swap
,
1982 amd64_emit_stack_adjust (int n
)
1984 unsigned char buf
[16];
1986 CORE_ADDR buildaddr
= current_insn_ptr
;
1989 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
1993 /* This only handles adjustments up to 16, but we don't expect any more. */
1995 append_insns (&buildaddr
, i
, buf
);
1996 current_insn_ptr
= buildaddr
;
1999 /* FN's prototype is `LONGEST(*fn)(int)'. */
2002 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2004 unsigned char buf
[16];
2006 CORE_ADDR buildaddr
;
2008 buildaddr
= current_insn_ptr
;
2010 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2011 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2013 append_insns (&buildaddr
, i
, buf
);
2014 current_insn_ptr
= buildaddr
;
2015 amd64_emit_call (fn
);
2018 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2021 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2023 unsigned char buf
[16];
2025 CORE_ADDR buildaddr
;
2027 buildaddr
= current_insn_ptr
;
2029 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2030 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2032 append_insns (&buildaddr
, i
, buf
);
2033 current_insn_ptr
= buildaddr
;
2034 EMIT_ASM (amd64_void_call_2_a
,
2035 /* Save away a copy of the stack top. */
2037 /* Also pass top as the second argument. */
2039 amd64_emit_call (fn
);
2040 EMIT_ASM (amd64_void_call_2_b
,
2041 /* Restore the stack top, %rax may have been trashed. */
2046 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2049 "cmp %rax,(%rsp)\n\t"
2050 "jne .Lamd64_eq_fallthru\n\t"
2051 "lea 0x8(%rsp),%rsp\n\t"
2053 /* jmp, but don't trust the assembler to choose the right jump */
2054 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2055 ".Lamd64_eq_fallthru:\n\t"
2056 "lea 0x8(%rsp),%rsp\n\t"
2066 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2069 "cmp %rax,(%rsp)\n\t"
2070 "je .Lamd64_ne_fallthru\n\t"
2071 "lea 0x8(%rsp),%rsp\n\t"
2073 /* jmp, but don't trust the assembler to choose the right jump */
2074 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2075 ".Lamd64_ne_fallthru:\n\t"
2076 "lea 0x8(%rsp),%rsp\n\t"
2086 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2089 "cmp %rax,(%rsp)\n\t"
2090 "jnl .Lamd64_lt_fallthru\n\t"
2091 "lea 0x8(%rsp),%rsp\n\t"
2093 /* jmp, but don't trust the assembler to choose the right jump */
2094 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2095 ".Lamd64_lt_fallthru:\n\t"
2096 "lea 0x8(%rsp),%rsp\n\t"
2106 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2109 "cmp %rax,(%rsp)\n\t"
2110 "jnle .Lamd64_le_fallthru\n\t"
2111 "lea 0x8(%rsp),%rsp\n\t"
2113 /* jmp, but don't trust the assembler to choose the right jump */
2114 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2115 ".Lamd64_le_fallthru:\n\t"
2116 "lea 0x8(%rsp),%rsp\n\t"
2126 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2129 "cmp %rax,(%rsp)\n\t"
2130 "jng .Lamd64_gt_fallthru\n\t"
2131 "lea 0x8(%rsp),%rsp\n\t"
2133 /* jmp, but don't trust the assembler to choose the right jump */
2134 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2135 ".Lamd64_gt_fallthru:\n\t"
2136 "lea 0x8(%rsp),%rsp\n\t"
2146 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2149 "cmp %rax,(%rsp)\n\t"
2150 "jnge .Lamd64_ge_fallthru\n\t"
2151 ".Lamd64_ge_jump:\n\t"
2152 "lea 0x8(%rsp),%rsp\n\t"
2154 /* jmp, but don't trust the assembler to choose the right jump */
2155 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2156 ".Lamd64_ge_fallthru:\n\t"
2157 "lea 0x8(%rsp),%rsp\n\t"
2166 struct emit_ops amd64_emit_ops
=
2168 amd64_emit_prologue
,
2169 amd64_emit_epilogue
,
2174 amd64_emit_rsh_signed
,
2175 amd64_emit_rsh_unsigned
,
2183 amd64_emit_less_signed
,
2184 amd64_emit_less_unsigned
,
2188 amd64_write_goto_address
,
2193 amd64_emit_stack_flush
,
2194 amd64_emit_zero_ext
,
2196 amd64_emit_stack_adjust
,
2197 amd64_emit_int_call_1
,
2198 amd64_emit_void_call_2
,
2207 #endif /* __x86_64__ */
2210 i386_emit_prologue (void)
2212 EMIT_ASM32 (i386_prologue
,
2216 /* At this point, the raw regs base address is at 8(%ebp), and the
2217 value pointer is at 12(%ebp). */
2221 i386_emit_epilogue (void)
2223 EMIT_ASM32 (i386_epilogue
,
2224 "mov 12(%ebp),%ecx\n\t"
2225 "mov %eax,(%ecx)\n\t"
2226 "mov %ebx,0x4(%ecx)\n\t"
2234 i386_emit_add (void)
2236 EMIT_ASM32 (i386_add
,
2237 "add (%esp),%eax\n\t"
2238 "adc 0x4(%esp),%ebx\n\t"
2239 "lea 0x8(%esp),%esp");
2243 i386_emit_sub (void)
2245 EMIT_ASM32 (i386_sub
,
2246 "subl %eax,(%esp)\n\t"
2247 "sbbl %ebx,4(%esp)\n\t"
2253 i386_emit_mul (void)
2259 i386_emit_lsh (void)
2265 i386_emit_rsh_signed (void)
2271 i386_emit_rsh_unsigned (void)
2277 i386_emit_ext (int arg
)
2282 EMIT_ASM32 (i386_ext_8
,
2285 "movl %eax,%ebx\n\t"
2289 EMIT_ASM32 (i386_ext_16
,
2291 "movl %eax,%ebx\n\t"
2295 EMIT_ASM32 (i386_ext_32
,
2296 "movl %eax,%ebx\n\t"
2305 i386_emit_log_not (void)
2307 EMIT_ASM32 (i386_log_not
,
2309 "test %eax,%eax\n\t"
2316 i386_emit_bit_and (void)
2318 EMIT_ASM32 (i386_and
,
2319 "and (%esp),%eax\n\t"
2320 "and 0x4(%esp),%ebx\n\t"
2321 "lea 0x8(%esp),%esp");
2325 i386_emit_bit_or (void)
2327 EMIT_ASM32 (i386_or
,
2328 "or (%esp),%eax\n\t"
2329 "or 0x4(%esp),%ebx\n\t"
2330 "lea 0x8(%esp),%esp");
2334 i386_emit_bit_xor (void)
2336 EMIT_ASM32 (i386_xor
,
2337 "xor (%esp),%eax\n\t"
2338 "xor 0x4(%esp),%ebx\n\t"
2339 "lea 0x8(%esp),%esp");
2343 i386_emit_bit_not (void)
2345 EMIT_ASM32 (i386_bit_not
,
2346 "xor $0xffffffff,%eax\n\t"
2347 "xor $0xffffffff,%ebx\n\t");
2351 i386_emit_equal (void)
2353 EMIT_ASM32 (i386_equal
,
2354 "cmpl %ebx,4(%esp)\n\t"
2355 "jne .Li386_equal_false\n\t"
2356 "cmpl %eax,(%esp)\n\t"
2357 "je .Li386_equal_true\n\t"
2358 ".Li386_equal_false:\n\t"
2360 "jmp .Li386_equal_end\n\t"
2361 ".Li386_equal_true:\n\t"
2363 ".Li386_equal_end:\n\t"
2365 "lea 0x8(%esp),%esp");
2369 i386_emit_less_signed (void)
2371 EMIT_ASM32 (i386_less_signed
,
2372 "cmpl %ebx,4(%esp)\n\t"
2373 "jl .Li386_less_signed_true\n\t"
2374 "jne .Li386_less_signed_false\n\t"
2375 "cmpl %eax,(%esp)\n\t"
2376 "jl .Li386_less_signed_true\n\t"
2377 ".Li386_less_signed_false:\n\t"
2379 "jmp .Li386_less_signed_end\n\t"
2380 ".Li386_less_signed_true:\n\t"
2382 ".Li386_less_signed_end:\n\t"
2384 "lea 0x8(%esp),%esp");
2388 i386_emit_less_unsigned (void)
2390 EMIT_ASM32 (i386_less_unsigned
,
2391 "cmpl %ebx,4(%esp)\n\t"
2392 "jb .Li386_less_unsigned_true\n\t"
2393 "jne .Li386_less_unsigned_false\n\t"
2394 "cmpl %eax,(%esp)\n\t"
2395 "jb .Li386_less_unsigned_true\n\t"
2396 ".Li386_less_unsigned_false:\n\t"
2398 "jmp .Li386_less_unsigned_end\n\t"
2399 ".Li386_less_unsigned_true:\n\t"
2401 ".Li386_less_unsigned_end:\n\t"
2403 "lea 0x8(%esp),%esp");
2407 i386_emit_ref (int size
)
2412 EMIT_ASM32 (i386_ref1
,
2416 EMIT_ASM32 (i386_ref2
,
2420 EMIT_ASM32 (i386_ref4
,
2421 "movl (%eax),%eax");
2424 EMIT_ASM32 (i386_ref8
,
2425 "movl 4(%eax),%ebx\n\t"
2426 "movl (%eax),%eax");
2432 i386_emit_if_goto (int *offset_p
, int *size_p
)
2434 EMIT_ASM32 (i386_if_goto
,
2440 /* Don't trust the assembler to choose the right jump */
2441 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2444 *offset_p
= 11; /* be sure that this matches the sequence above */
2450 i386_emit_goto (int *offset_p
, int *size_p
)
2452 EMIT_ASM32 (i386_goto
,
2453 /* Don't trust the assembler to choose the right jump */
2454 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2462 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2464 int diff
= (to
- (from
+ size
));
2465 unsigned char buf
[sizeof (int)];
2467 /* We're only doing 4-byte sizes at the moment. */
2474 memcpy (buf
, &diff
, sizeof (int));
2475 target_write_memory (from
, buf
, sizeof (int));
2479 i386_emit_const (LONGEST num
)
2481 unsigned char buf
[16];
2483 CORE_ADDR buildaddr
= current_insn_ptr
;
2486 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2487 lo
= num
& 0xffffffff;
2488 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2490 hi
= ((num
>> 32) & 0xffffffff);
2493 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2494 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2499 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2501 append_insns (&buildaddr
, i
, buf
);
2502 current_insn_ptr
= buildaddr
;
2506 i386_emit_call (CORE_ADDR fn
)
2508 unsigned char buf
[16];
2510 CORE_ADDR buildaddr
;
2512 buildaddr
= current_insn_ptr
;
2514 buf
[i
++] = 0xe8; /* call <reladdr> */
2515 offset
= ((int) fn
) - (buildaddr
+ 5);
2516 memcpy (buf
+ 1, &offset
, 4);
2517 append_insns (&buildaddr
, 5, buf
);
2518 current_insn_ptr
= buildaddr
;
2522 i386_emit_reg (int reg
)
2524 unsigned char buf
[16];
2526 CORE_ADDR buildaddr
;
2528 EMIT_ASM32 (i386_reg_a
,
2530 buildaddr
= current_insn_ptr
;
2532 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2533 memcpy (&buf
[i
], ®
, sizeof (reg
));
2535 append_insns (&buildaddr
, i
, buf
);
2536 current_insn_ptr
= buildaddr
;
2537 EMIT_ASM32 (i386_reg_b
,
2538 "mov %eax,4(%esp)\n\t"
2539 "mov 8(%ebp),%eax\n\t"
2541 i386_emit_call (get_raw_reg_func_addr ());
2542 EMIT_ASM32 (i386_reg_c
,
2544 "lea 0x8(%esp),%esp");
2548 i386_emit_pop (void)
2550 EMIT_ASM32 (i386_pop
,
2556 i386_emit_stack_flush (void)
2558 EMIT_ASM32 (i386_stack_flush
,
2564 i386_emit_zero_ext (int arg
)
2569 EMIT_ASM32 (i386_zero_ext_8
,
2570 "and $0xff,%eax\n\t"
2574 EMIT_ASM32 (i386_zero_ext_16
,
2575 "and $0xffff,%eax\n\t"
2579 EMIT_ASM32 (i386_zero_ext_32
,
2588 i386_emit_swap (void)
2590 EMIT_ASM32 (i386_swap
,
2600 i386_emit_stack_adjust (int n
)
2602 unsigned char buf
[16];
2604 CORE_ADDR buildaddr
= current_insn_ptr
;
2607 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2611 append_insns (&buildaddr
, i
, buf
);
2612 current_insn_ptr
= buildaddr
;
2615 /* FN's prototype is `LONGEST(*fn)(int)'. */
2618 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2620 unsigned char buf
[16];
2622 CORE_ADDR buildaddr
;
2624 EMIT_ASM32 (i386_int_call_1_a
,
2625 /* Reserve a bit of stack space. */
2627 /* Put the one argument on the stack. */
2628 buildaddr
= current_insn_ptr
;
2630 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2633 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2635 append_insns (&buildaddr
, i
, buf
);
2636 current_insn_ptr
= buildaddr
;
2637 i386_emit_call (fn
);
2638 EMIT_ASM32 (i386_int_call_1_c
,
2640 "lea 0x8(%esp),%esp");
2643 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2646 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2648 unsigned char buf
[16];
2650 CORE_ADDR buildaddr
;
2652 EMIT_ASM32 (i386_void_call_2_a
,
2653 /* Preserve %eax only; we don't have to worry about %ebx. */
2655 /* Reserve a bit of stack space for arguments. */
2656 "sub $0x10,%esp\n\t"
2657 /* Copy "top" to the second argument position. (Note that
2658 we can't assume function won't scribble on its
2659 arguments, so don't try to restore from this.) */
2660 "mov %eax,4(%esp)\n\t"
2661 "mov %ebx,8(%esp)");
2662 /* Put the first argument on the stack. */
2663 buildaddr
= current_insn_ptr
;
2665 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2668 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2670 append_insns (&buildaddr
, i
, buf
);
2671 current_insn_ptr
= buildaddr
;
2672 i386_emit_call (fn
);
2673 EMIT_ASM32 (i386_void_call_2_b
,
2674 "lea 0x10(%esp),%esp\n\t"
2675 /* Restore original stack top. */
2681 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2684 /* Check low half first, more likely to be decider */
2685 "cmpl %eax,(%esp)\n\t"
2686 "jne .Leq_fallthru\n\t"
2687 "cmpl %ebx,4(%esp)\n\t"
2688 "jne .Leq_fallthru\n\t"
2689 "lea 0x8(%esp),%esp\n\t"
2692 /* jmp, but don't trust the assembler to choose the right jump */
2693 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2694 ".Leq_fallthru:\n\t"
2695 "lea 0x8(%esp),%esp\n\t"
2706 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2709 /* Check low half first, more likely to be decider */
2710 "cmpl %eax,(%esp)\n\t"
2712 "cmpl %ebx,4(%esp)\n\t"
2713 "je .Lne_fallthru\n\t"
2715 "lea 0x8(%esp),%esp\n\t"
2718 /* jmp, but don't trust the assembler to choose the right jump */
2719 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2720 ".Lne_fallthru:\n\t"
2721 "lea 0x8(%esp),%esp\n\t"
2732 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2735 "cmpl %ebx,4(%esp)\n\t"
2737 "jne .Llt_fallthru\n\t"
2738 "cmpl %eax,(%esp)\n\t"
2739 "jnl .Llt_fallthru\n\t"
2741 "lea 0x8(%esp),%esp\n\t"
2744 /* jmp, but don't trust the assembler to choose the right jump */
2745 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2746 ".Llt_fallthru:\n\t"
2747 "lea 0x8(%esp),%esp\n\t"
2758 i386_emit_le_goto (int *offset_p
, int *size_p
)
2761 "cmpl %ebx,4(%esp)\n\t"
2763 "jne .Lle_fallthru\n\t"
2764 "cmpl %eax,(%esp)\n\t"
2765 "jnle .Lle_fallthru\n\t"
2767 "lea 0x8(%esp),%esp\n\t"
2770 /* jmp, but don't trust the assembler to choose the right jump */
2771 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2772 ".Lle_fallthru:\n\t"
2773 "lea 0x8(%esp),%esp\n\t"
2784 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2787 "cmpl %ebx,4(%esp)\n\t"
2789 "jne .Lgt_fallthru\n\t"
2790 "cmpl %eax,(%esp)\n\t"
2791 "jng .Lgt_fallthru\n\t"
2793 "lea 0x8(%esp),%esp\n\t"
2796 /* jmp, but don't trust the assembler to choose the right jump */
2797 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2798 ".Lgt_fallthru:\n\t"
2799 "lea 0x8(%esp),%esp\n\t"
2810 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2813 "cmpl %ebx,4(%esp)\n\t"
2815 "jne .Lge_fallthru\n\t"
2816 "cmpl %eax,(%esp)\n\t"
2817 "jnge .Lge_fallthru\n\t"
2819 "lea 0x8(%esp),%esp\n\t"
2822 /* jmp, but don't trust the assembler to choose the right jump */
2823 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2824 ".Lge_fallthru:\n\t"
2825 "lea 0x8(%esp),%esp\n\t"
2835 struct emit_ops i386_emit_ops
=
2843 i386_emit_rsh_signed
,
2844 i386_emit_rsh_unsigned
,
2852 i386_emit_less_signed
,
2853 i386_emit_less_unsigned
,
2857 i386_write_goto_address
,
2862 i386_emit_stack_flush
,
2865 i386_emit_stack_adjust
,
2866 i386_emit_int_call_1
,
2867 i386_emit_void_call_2
,
2877 static struct emit_ops
*
2881 if (is_64bit_tdesc ())
2882 return &amd64_emit_ops
;
2885 return &i386_emit_ops
;
2888 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2891 x86_target::sw_breakpoint_from_kind (int kind
, int *size
)
2893 *size
= x86_breakpoint_len
;
2894 return x86_breakpoint
;
2898 x86_supports_range_stepping (void)
2903 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2907 x86_supports_hardware_single_step (void)
2913 x86_get_ipa_tdesc_idx (void)
2915 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
2916 const struct target_desc
*tdesc
= regcache
->tdesc
;
2919 return amd64_get_ipa_tdesc_idx (tdesc
);
2922 if (tdesc
== tdesc_i386_linux_no_xml
)
2923 return X86_TDESC_SSE
;
2925 return i386_get_ipa_tdesc_idx (tdesc
);
2928 /* This is initialized assuming an amd64 target.
2929 x86_arch_setup will correct it for i386 or amd64 targets. */
2931 struct linux_target_ops the_low_target
=
2933 x86_linux_new_process
,
2934 x86_linux_delete_process
,
2935 x86_linux_new_thread
,
2936 x86_linux_delete_thread
,
2938 x86_linux_prepare_to_resume
,
2939 x86_linux_process_qsupported
,
2940 x86_supports_tracepoints
,
2941 x86_get_thread_area
,
2942 x86_install_fast_tracepoint_jump_pad
,
2944 x86_get_min_fast_tracepoint_insn_len
,
2945 x86_supports_range_stepping
,
2946 x86_supports_hardware_single_step
,
2947 x86_get_syscall_trapinfo
,
2948 x86_get_ipa_tdesc_idx
,
2951 /* The linux target ops object. */
2953 linux_process_target
*the_linux_target
= &the_x86_target
;
2956 initialize_low_arch (void)
2958 /* Initialize the Linux target descriptions. */
2960 tdesc_amd64_linux_no_xml
= allocate_target_description ();
2961 copy_target_description (tdesc_amd64_linux_no_xml
,
2962 amd64_linux_read_description (X86_XSTATE_SSE_MASK
,
2964 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
2967 tdesc_i386_linux_no_xml
= allocate_target_description ();
2968 copy_target_description (tdesc_i386_linux_no_xml
,
2969 i386_linux_read_description (X86_XSTATE_SSE_MASK
));
2970 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
2972 initialize_regsets_info (&x86_regsets_info
);