1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2017 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
31 #include "nat/amd64-linux-siginfo.h"
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
38 #include "elf/common.h"
43 #include "tracepoint.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
51 static struct target_desc
*tdesc_amd64_linux_no_xml
;
53 static struct target_desc
*tdesc_i386_linux_no_xml
;
56 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
59 /* Backward compatibility for gdb without XML support. */
61 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
67 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
74 #include <sys/procfs.h>
75 #include "nat/gdb_ptrace.h"
78 #ifndef PTRACE_GET_THREAD_AREA
79 #define PTRACE_GET_THREAD_AREA 25
82 /* This definition comes from prctl.h, but some kernels may not have it. */
83 #ifndef PTRACE_ARCH_PRCTL
84 #define PTRACE_ARCH_PRCTL 30
87 /* The following definitions come from prctl.h, but may be absent
88 for certain configurations. */
90 #define ARCH_SET_GS 0x1001
91 #define ARCH_SET_FS 0x1002
92 #define ARCH_GET_FS 0x1003
93 #define ARCH_GET_GS 0x1004
96 /* Per-process arch-specific data we want to keep. */
98 struct arch_process_info
100 struct x86_debug_reg_state debug_reg_state
;
105 /* Mapping between the general-purpose registers in `struct user'
106 format and GDB's register array layout.
107 Note that the transfer layout uses 64-bit regs. */
108 static /*const*/ int i386_regmap
[] =
110 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
111 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
112 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
113 DS
* 8, ES
* 8, FS
* 8, GS
* 8
116 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
118 /* So code below doesn't have to care, i386 or amd64. */
119 #define ORIG_EAX ORIG_RAX
122 static const int x86_64_regmap
[] =
124 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
125 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
126 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
127 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
128 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
129 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
130 -1, -1, -1, -1, -1, -1, -1, -1,
131 -1, -1, -1, -1, -1, -1, -1, -1,
132 -1, -1, -1, -1, -1, -1, -1, -1,
134 -1, -1, -1, -1, -1, -1, -1, -1,
136 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
141 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
142 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
143 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
144 -1, -1, -1, -1, -1, -1, -1, -1,
145 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
146 -1, -1, -1, -1, -1, -1, -1, -1,
147 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
148 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
149 -1, -1, -1, -1, -1, -1, -1, -1,
150 -1, -1, -1, -1, -1, -1, -1, -1,
151 -1, -1, -1, -1, -1, -1, -1, -1,
155 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
156 #define X86_64_USER_REGS (GS + 1)
158 #else /* ! __x86_64__ */
160 /* Mapping between the general-purpose registers in `struct user'
161 format and GDB's register array layout. */
162 static /*const*/ int i386_regmap
[] =
164 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
165 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
166 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
167 DS
* 4, ES
* 4, FS
* 4, GS
* 4
170 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
178 /* Returns true if the current inferior belongs to a x86-64 process,
182 is_64bit_tdesc (void)
184 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
186 return register_size (regcache
->tdesc
, 0) == 8;
192 /* Called by libthread_db. */
195 ps_get_thread_area (struct ps_prochandle
*ph
,
196 lwpid_t lwpid
, int idx
, void **base
)
199 int use_64bit
= is_64bit_tdesc ();
206 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
210 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
221 unsigned int desc
[4];
223 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
224 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
227 /* Ensure we properly extend the value to 64-bits for x86_64. */
228 *base
= (void *) (uintptr_t) desc
[1];
233 /* Get the thread area address. This is used to recognize which
234 thread is which when tracing with the in-process agent library. We
235 don't read anything from the address, and treat it as opaque; it's
236 the address itself that we assume is unique per-thread. */
239 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
242 int use_64bit
= is_64bit_tdesc ();
247 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
249 *addr
= (CORE_ADDR
) (uintptr_t) base
;
258 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
259 struct thread_info
*thr
= get_lwp_thread (lwp
);
260 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
261 unsigned int desc
[4];
263 const int reg_thread_area
= 3; /* bits to scale down register value. */
266 collect_register_by_name (regcache
, "gs", &gs
);
268 idx
= gs
>> reg_thread_area
;
270 if (ptrace (PTRACE_GET_THREAD_AREA
,
272 (void *) (long) idx
, (unsigned long) &desc
) < 0)
283 x86_cannot_store_register (int regno
)
286 if (is_64bit_tdesc ())
290 return regno
>= I386_NUM_REGS
;
294 x86_cannot_fetch_register (int regno
)
297 if (is_64bit_tdesc ())
301 return regno
>= I386_NUM_REGS
;
305 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
310 if (register_size (regcache
->tdesc
, 0) == 8)
312 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
313 if (x86_64_regmap
[i
] != -1)
314 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
316 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
319 int lwpid
= lwpid_of (current_thread
);
321 collect_register_by_name (regcache
, "fs_base", &base
);
322 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_FS
);
324 collect_register_by_name (regcache
, "gs_base", &base
);
325 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_GS
);
332 /* 32-bit inferior registers need to be zero-extended.
333 Callers would read uninitialized memory otherwise. */
334 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
337 for (i
= 0; i
< I386_NUM_REGS
; i
++)
338 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
340 collect_register_by_name (regcache
, "orig_eax",
341 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
345 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
350 if (register_size (regcache
->tdesc
, 0) == 8)
352 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
353 if (x86_64_regmap
[i
] != -1)
354 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
356 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
359 int lwpid
= lwpid_of (current_thread
);
361 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
362 supply_register_by_name (regcache
, "fs_base", &base
);
364 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_GS
) == 0)
365 supply_register_by_name (regcache
, "gs_base", &base
);
372 for (i
= 0; i
< I386_NUM_REGS
; i
++)
373 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
375 supply_register_by_name (regcache
, "orig_eax",
376 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
380 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
383 i387_cache_to_fxsave (regcache
, buf
);
385 i387_cache_to_fsave (regcache
, buf
);
390 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
393 i387_fxsave_to_cache (regcache
, buf
);
395 i387_fsave_to_cache (regcache
, buf
);
402 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
404 i387_cache_to_fxsave (regcache
, buf
);
408 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
410 i387_fxsave_to_cache (regcache
, buf
);
416 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
418 i387_cache_to_xsave (regcache
, buf
);
422 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
424 i387_xsave_to_cache (regcache
, buf
);
427 /* ??? The non-biarch i386 case stores all the i387 regs twice.
428 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
429 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
430 doesn't work. IWBN to avoid the duplication in the case where it
431 does work. Maybe the arch_setup routine could check whether it works
432 and update the supported regsets accordingly. */
434 static struct regset_info x86_regsets
[] =
436 #ifdef HAVE_PTRACE_GETREGS
437 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
439 x86_fill_gregset
, x86_store_gregset
},
440 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
441 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
443 # ifdef HAVE_PTRACE_GETFPXREGS
444 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
446 x86_fill_fpxregset
, x86_store_fpxregset
},
449 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
451 x86_fill_fpregset
, x86_store_fpregset
},
452 #endif /* HAVE_PTRACE_GETREGS */
457 x86_get_pc (struct regcache
*regcache
)
459 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
465 collect_register_by_name (regcache
, "rip", &pc
);
466 return (CORE_ADDR
) pc
;
472 collect_register_by_name (regcache
, "eip", &pc
);
473 return (CORE_ADDR
) pc
;
478 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
480 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
486 supply_register_by_name (regcache
, "rip", &newpc
);
492 supply_register_by_name (regcache
, "eip", &newpc
);
496 static const gdb_byte x86_breakpoint
[] = { 0xCC };
497 #define x86_breakpoint_len 1
500 x86_breakpoint_at (CORE_ADDR pc
)
504 (*the_target
->read_memory
) (pc
, &c
, 1);
511 /* Low-level function vector. */
512 struct x86_dr_low_type x86_dr_low
=
514 x86_linux_dr_set_control
,
515 x86_linux_dr_set_addr
,
516 x86_linux_dr_get_addr
,
517 x86_linux_dr_get_status
,
518 x86_linux_dr_get_control
,
522 /* Breakpoint/Watchpoint support. */
525 x86_supports_z_point_type (char z_type
)
531 case Z_PACKET_WRITE_WP
:
532 case Z_PACKET_ACCESS_WP
:
540 x86_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
541 int size
, struct raw_breakpoint
*bp
)
543 struct process_info
*proc
= current_process ();
547 case raw_bkpt_type_hw
:
548 case raw_bkpt_type_write_wp
:
549 case raw_bkpt_type_access_wp
:
551 enum target_hw_bp_type hw_type
552 = raw_bkpt_type_to_target_hw_bp_type (type
);
553 struct x86_debug_reg_state
*state
554 = &proc
->priv
->arch_private
->debug_reg_state
;
556 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
566 x86_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
567 int size
, struct raw_breakpoint
*bp
)
569 struct process_info
*proc
= current_process ();
573 case raw_bkpt_type_hw
:
574 case raw_bkpt_type_write_wp
:
575 case raw_bkpt_type_access_wp
:
577 enum target_hw_bp_type hw_type
578 = raw_bkpt_type_to_target_hw_bp_type (type
);
579 struct x86_debug_reg_state
*state
580 = &proc
->priv
->arch_private
->debug_reg_state
;
582 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
591 x86_stopped_by_watchpoint (void)
593 struct process_info
*proc
= current_process ();
594 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
598 x86_stopped_data_address (void)
600 struct process_info
*proc
= current_process ();
602 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
608 /* Called when a new process is created. */
610 static struct arch_process_info
*
611 x86_linux_new_process (void)
613 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
615 x86_low_init_dregs (&info
->debug_reg_state
);
620 /* Target routine for linux_new_fork. */
623 x86_linux_new_fork (struct process_info
*parent
, struct process_info
*child
)
625 /* These are allocated by linux_add_process. */
626 gdb_assert (parent
->priv
!= NULL
627 && parent
->priv
->arch_private
!= NULL
);
628 gdb_assert (child
->priv
!= NULL
629 && child
->priv
->arch_private
!= NULL
);
631 /* Linux kernel before 2.6.33 commit
632 72f674d203cd230426437cdcf7dd6f681dad8b0d
633 will inherit hardware debug registers from parent
634 on fork/vfork/clone. Newer Linux kernels create such tasks with
635 zeroed debug registers.
637 GDB core assumes the child inherits the watchpoints/hw
638 breakpoints of the parent, and will remove them all from the
639 forked off process. Copy the debug registers mirrors into the
640 new process so that all breakpoints and watchpoints can be
641 removed together. The debug registers mirror will become zeroed
642 in the end before detaching the forked off process, thus making
643 this compatible with older Linux kernels too. */
645 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
648 /* See nat/x86-dregs.h. */
650 struct x86_debug_reg_state
*
651 x86_debug_reg_state (pid_t pid
)
653 struct process_info
*proc
= find_process_pid (pid
);
655 return &proc
->priv
->arch_private
->debug_reg_state
;
658 /* When GDBSERVER is built as a 64-bit application on linux, the
659 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
660 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
661 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
662 conversion in-place ourselves. */
664 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
665 layout of the inferiors' architecture. Returns true if any
666 conversion was done; false otherwise. If DIRECTION is 1, then copy
667 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
671 x86_siginfo_fixup (siginfo_t
*ptrace
, gdb_byte
*inf
, int direction
)
674 unsigned int machine
;
675 int tid
= lwpid_of (current_thread
);
676 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
678 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
679 if (!is_64bit_tdesc ())
680 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
682 /* No fixup for native x32 GDB. */
683 else if (!is_elf64
&& sizeof (void *) == 8)
684 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
693 /* Format of XSAVE extended state is:
697 sw_usable_bytes[464..511]
698 xstate_hdr_bytes[512..575]
703 Same memory layout will be used for the coredump NT_X86_XSTATE
704 representing the XSAVE extended state registers.
706 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
707 extended state mask, which is the same as the extended control register
708 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
709 together with the mask saved in the xstate_hdr_bytes to determine what
710 states the processor/OS supports and what state, used or initialized,
711 the process/thread is in. */
712 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
714 /* Does the current host support the GETFPXREGS request? The header
715 file may or may not define it, and even if it is defined, the
716 kernel will return EIO if it's running on a pre-SSE processor. */
717 int have_ptrace_getfpxregs
=
718 #ifdef HAVE_PTRACE_GETFPXREGS
725 /* Get Linux/x86 target description from running target. */
727 static const struct target_desc
*
728 x86_linux_read_description (void)
730 unsigned int machine
;
734 static uint64_t xcr0
;
735 struct regset_info
*regset
;
737 tid
= lwpid_of (current_thread
);
739 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
741 if (sizeof (void *) == 4)
744 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
746 else if (machine
== EM_X86_64
)
747 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
751 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
752 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
754 elf_fpxregset_t fpxregs
;
756 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
758 have_ptrace_getfpxregs
= 0;
759 have_ptrace_getregset
= 0;
760 return i386_linux_read_description (X86_XSTATE_X87
);
763 have_ptrace_getfpxregs
= 1;
769 x86_xcr0
= X86_XSTATE_SSE_MASK
;
773 if (machine
== EM_X86_64
)
774 return tdesc_amd64_linux_no_xml
;
777 return tdesc_i386_linux_no_xml
;
780 if (have_ptrace_getregset
== -1)
782 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
785 iov
.iov_base
= xstateregs
;
786 iov
.iov_len
= sizeof (xstateregs
);
788 /* Check if PTRACE_GETREGSET works. */
789 if (ptrace (PTRACE_GETREGSET
, tid
,
790 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
791 have_ptrace_getregset
= 0;
794 have_ptrace_getregset
= 1;
796 /* Get XCR0 from XSAVE extended state. */
797 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
798 / sizeof (uint64_t))];
800 /* Use PTRACE_GETREGSET if it is available. */
801 for (regset
= x86_regsets
;
802 regset
->fill_function
!= NULL
; regset
++)
803 if (regset
->get_request
== PTRACE_GETREGSET
)
804 regset
->size
= X86_XSTATE_SIZE (xcr0
);
805 else if (regset
->type
!= GENERAL_REGS
)
810 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
811 xcr0_features
= (have_ptrace_getregset
812 && (xcr0
& X86_XSTATE_ALL_MASK
));
817 if (machine
== EM_X86_64
)
820 const target_desc
*tdesc
= NULL
;
824 tdesc
= amd64_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
,
829 tdesc
= amd64_linux_read_description (X86_XSTATE_SSE_MASK
, !is_elf64
);
835 const target_desc
*tdesc
= NULL
;
838 tdesc
= i386_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
);
841 tdesc
= i386_linux_read_description (X86_XSTATE_SSE
);
846 gdb_assert_not_reached ("failed to return tdesc");
849 /* Callback for find_inferior. Stops iteration when a thread with a
850 given PID is found. */
853 same_process_callback (struct inferior_list_entry
*entry
, void *data
)
855 int pid
= *(int *) data
;
857 return (ptid_get_pid (entry
->id
) == pid
);
860 /* Callback for for_each_inferior. Calls the arch_setup routine for
864 x86_arch_setup_process_callback (struct inferior_list_entry
*entry
)
866 int pid
= ptid_get_pid (entry
->id
);
868 /* Look up any thread of this processes. */
870 = (struct thread_info
*) find_inferior (&all_threads
,
871 same_process_callback
, &pid
);
873 the_low_target
.arch_setup ();
876 /* Update all the target description of all processes; a new GDB
877 connected, and it may or not support xml target descriptions. */
880 x86_linux_update_xmltarget (void)
882 struct thread_info
*saved_thread
= current_thread
;
884 /* Before changing the register cache's internal layout, flush the
885 contents of the current valid caches back to the threads, and
886 release the current regcache objects. */
889 for_each_inferior (&all_processes
, x86_arch_setup_process_callback
);
891 current_thread
= saved_thread
;
894 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
898 x86_linux_process_qsupported (char **features
, int count
)
902 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
903 with "i386" in qSupported query, it supports x86 XML target
906 for (i
= 0; i
< count
; i
++)
908 const char *feature
= features
[i
];
910 if (startswith (feature
, "xmlRegisters="))
912 char *copy
= xstrdup (feature
+ 13);
915 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
917 if (strcmp (p
, "i386") == 0)
927 x86_linux_update_xmltarget ();
930 /* Common for x86/x86-64. */
932 static struct regsets_info x86_regsets_info
=
934 x86_regsets
, /* regsets */
936 NULL
, /* disabled_regsets */
940 static struct regs_info amd64_linux_regs_info
=
942 NULL
, /* regset_bitmap */
943 NULL
, /* usrregs_info */
947 static struct usrregs_info i386_linux_usrregs_info
=
953 static struct regs_info i386_linux_regs_info
=
955 NULL
, /* regset_bitmap */
956 &i386_linux_usrregs_info
,
960 const struct regs_info
*
961 x86_linux_regs_info (void)
964 if (is_64bit_tdesc ())
965 return &amd64_linux_regs_info
;
968 return &i386_linux_regs_info
;
971 /* Initialize the target description for the architecture of the
975 x86_arch_setup (void)
977 current_process ()->tdesc
= x86_linux_read_description ();
980 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
981 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
984 x86_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
986 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
992 collect_register_by_name (regcache
, "orig_rax", &l_sysno
);
993 *sysno
= (int) l_sysno
;
996 collect_register_by_name (regcache
, "orig_eax", sysno
);
1000 x86_supports_tracepoints (void)
1006 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1008 write_inferior_memory (*to
, buf
, len
);
1013 push_opcode (unsigned char *buf
, const char *op
)
1015 unsigned char *buf_org
= buf
;
1020 unsigned long ul
= strtoul (op
, &endptr
, 16);
1029 return buf
- buf_org
;
1034 /* Build a jump pad that saves registers and calls a collection
1035 function. Writes a jump instruction to the jump pad to
1036 JJUMPAD_INSN. The caller is responsible to write it in at the
1037 tracepoint address. */
1040 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1041 CORE_ADDR collector
,
1044 CORE_ADDR
*jump_entry
,
1045 CORE_ADDR
*trampoline
,
1046 ULONGEST
*trampoline_size
,
1047 unsigned char *jjump_pad_insn
,
1048 ULONGEST
*jjump_pad_insn_size
,
1049 CORE_ADDR
*adjusted_insn_addr
,
1050 CORE_ADDR
*adjusted_insn_addr_end
,
1053 unsigned char buf
[40];
1057 CORE_ADDR buildaddr
= *jump_entry
;
1059 /* Build the jump pad. */
1061 /* First, do tracepoint data collection. Save registers. */
1063 /* Need to ensure stack pointer saved first. */
1064 buf
[i
++] = 0x54; /* push %rsp */
1065 buf
[i
++] = 0x55; /* push %rbp */
1066 buf
[i
++] = 0x57; /* push %rdi */
1067 buf
[i
++] = 0x56; /* push %rsi */
1068 buf
[i
++] = 0x52; /* push %rdx */
1069 buf
[i
++] = 0x51; /* push %rcx */
1070 buf
[i
++] = 0x53; /* push %rbx */
1071 buf
[i
++] = 0x50; /* push %rax */
1072 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1073 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1074 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1075 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1076 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1077 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1078 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1079 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1080 buf
[i
++] = 0x9c; /* pushfq */
1081 buf
[i
++] = 0x48; /* movabs <addr>,%rdi */
1083 memcpy (buf
+ i
, &tpaddr
, 8);
1085 buf
[i
++] = 0x57; /* push %rdi */
1086 append_insns (&buildaddr
, i
, buf
);
1088 /* Stack space for the collecting_t object. */
1090 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1091 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1092 memcpy (buf
+ i
, &tpoint
, 8);
1094 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1095 i
+= push_opcode (&buf
[i
],
1096 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1097 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1098 append_insns (&buildaddr
, i
, buf
);
1102 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1103 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1105 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1106 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1107 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1108 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1109 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1110 append_insns (&buildaddr
, i
, buf
);
1112 /* Set up the gdb_collect call. */
1113 /* At this point, (stack pointer + 0x18) is the base of our saved
1117 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1118 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1120 /* tpoint address may be 64-bit wide. */
1121 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1122 memcpy (buf
+ i
, &tpoint
, 8);
1124 append_insns (&buildaddr
, i
, buf
);
1126 /* The collector function being in the shared library, may be
1127 >31-bits away off the jump pad. */
1129 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1130 memcpy (buf
+ i
, &collector
, 8);
1132 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1133 append_insns (&buildaddr
, i
, buf
);
1135 /* Clear the spin-lock. */
1137 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1138 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1139 memcpy (buf
+ i
, &lockaddr
, 8);
1141 append_insns (&buildaddr
, i
, buf
);
1143 /* Remove stack that had been used for the collect_t object. */
1145 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1146 append_insns (&buildaddr
, i
, buf
);
1148 /* Restore register state. */
1150 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1154 buf
[i
++] = 0x9d; /* popfq */
1155 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1156 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1157 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1158 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1159 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1160 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1161 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1162 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1163 buf
[i
++] = 0x58; /* pop %rax */
1164 buf
[i
++] = 0x5b; /* pop %rbx */
1165 buf
[i
++] = 0x59; /* pop %rcx */
1166 buf
[i
++] = 0x5a; /* pop %rdx */
1167 buf
[i
++] = 0x5e; /* pop %rsi */
1168 buf
[i
++] = 0x5f; /* pop %rdi */
1169 buf
[i
++] = 0x5d; /* pop %rbp */
1170 buf
[i
++] = 0x5c; /* pop %rsp */
1171 append_insns (&buildaddr
, i
, buf
);
1173 /* Now, adjust the original instruction to execute in the jump
1175 *adjusted_insn_addr
= buildaddr
;
1176 relocate_instruction (&buildaddr
, tpaddr
);
1177 *adjusted_insn_addr_end
= buildaddr
;
1179 /* Finally, write a jump back to the program. */
1181 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1182 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1185 "E.Jump back from jump pad too far from tracepoint "
1186 "(offset 0x%" PRIx64
" > int32).", loffset
);
1190 offset
= (int) loffset
;
1191 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1192 memcpy (buf
+ 1, &offset
, 4);
1193 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1195 /* The jump pad is now built. Wire in a jump to our jump pad. This
1196 is always done last (by our caller actually), so that we can
1197 install fast tracepoints with threads running. This relies on
1198 the agent's atomic write support. */
1199 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1200 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1203 "E.Jump pad too far from tracepoint "
1204 "(offset 0x%" PRIx64
" > int32).", loffset
);
1208 offset
= (int) loffset
;
1210 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1211 memcpy (buf
+ 1, &offset
, 4);
1212 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1213 *jjump_pad_insn_size
= sizeof (jump_insn
);
1215 /* Return the end address of our pad. */
1216 *jump_entry
= buildaddr
;
1221 #endif /* __x86_64__ */
1223 /* Build a jump pad that saves registers and calls a collection
1224 function. Writes a jump instruction to the jump pad to
1225 JJUMPAD_INSN. The caller is responsible to write it in at the
1226 tracepoint address. */
1229 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1230 CORE_ADDR collector
,
1233 CORE_ADDR
*jump_entry
,
1234 CORE_ADDR
*trampoline
,
1235 ULONGEST
*trampoline_size
,
1236 unsigned char *jjump_pad_insn
,
1237 ULONGEST
*jjump_pad_insn_size
,
1238 CORE_ADDR
*adjusted_insn_addr
,
1239 CORE_ADDR
*adjusted_insn_addr_end
,
1242 unsigned char buf
[0x100];
1244 CORE_ADDR buildaddr
= *jump_entry
;
1246 /* Build the jump pad. */
1248 /* First, do tracepoint data collection. Save registers. */
1250 buf
[i
++] = 0x60; /* pushad */
1251 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1252 *((int *)(buf
+ i
)) = (int) tpaddr
;
1254 buf
[i
++] = 0x9c; /* pushf */
1255 buf
[i
++] = 0x1e; /* push %ds */
1256 buf
[i
++] = 0x06; /* push %es */
1257 buf
[i
++] = 0x0f; /* push %fs */
1259 buf
[i
++] = 0x0f; /* push %gs */
1261 buf
[i
++] = 0x16; /* push %ss */
1262 buf
[i
++] = 0x0e; /* push %cs */
1263 append_insns (&buildaddr
, i
, buf
);
1265 /* Stack space for the collecting_t object. */
1267 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1269 /* Build the object. */
1270 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1271 memcpy (buf
+ i
, &tpoint
, 4);
1273 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1275 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1276 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1277 append_insns (&buildaddr
, i
, buf
);
1279 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1280 If we cared for it, this could be using xchg alternatively. */
1283 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1284 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1286 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1288 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1289 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1290 append_insns (&buildaddr
, i
, buf
);
1293 /* Set up arguments to the gdb_collect call. */
1295 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1296 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1297 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1298 append_insns (&buildaddr
, i
, buf
);
1301 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1302 append_insns (&buildaddr
, i
, buf
);
1305 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1306 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1308 append_insns (&buildaddr
, i
, buf
);
1310 buf
[0] = 0xe8; /* call <reladdr> */
1311 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1312 memcpy (buf
+ 1, &offset
, 4);
1313 append_insns (&buildaddr
, 5, buf
);
1314 /* Clean up after the call. */
1315 buf
[0] = 0x83; /* add $0x8,%esp */
1318 append_insns (&buildaddr
, 3, buf
);
1321 /* Clear the spin-lock. This would need the LOCK prefix on older
1324 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1325 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1326 memcpy (buf
+ i
, &lockaddr
, 4);
1328 append_insns (&buildaddr
, i
, buf
);
1331 /* Remove stack that had been used for the collect_t object. */
1333 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1334 append_insns (&buildaddr
, i
, buf
);
1337 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1340 buf
[i
++] = 0x17; /* pop %ss */
1341 buf
[i
++] = 0x0f; /* pop %gs */
1343 buf
[i
++] = 0x0f; /* pop %fs */
1345 buf
[i
++] = 0x07; /* pop %es */
1346 buf
[i
++] = 0x1f; /* pop %ds */
1347 buf
[i
++] = 0x9d; /* popf */
1348 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1351 buf
[i
++] = 0x61; /* popad */
1352 append_insns (&buildaddr
, i
, buf
);
1354 /* Now, adjust the original instruction to execute in the jump
1356 *adjusted_insn_addr
= buildaddr
;
1357 relocate_instruction (&buildaddr
, tpaddr
);
1358 *adjusted_insn_addr_end
= buildaddr
;
1360 /* Write the jump back to the program. */
1361 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1362 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1363 memcpy (buf
+ 1, &offset
, 4);
1364 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1366 /* The jump pad is now built. Wire in a jump to our jump pad. This
1367 is always done last (by our caller actually), so that we can
1368 install fast tracepoints with threads running. This relies on
1369 the agent's atomic write support. */
1372 /* Create a trampoline. */
1373 *trampoline_size
= sizeof (jump_insn
);
1374 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1376 /* No trampoline space available. */
1378 "E.Cannot allocate trampoline space needed for fast "
1379 "tracepoints on 4-byte instructions.");
1383 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1384 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1385 memcpy (buf
+ 1, &offset
, 4);
1386 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1388 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1389 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1390 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1391 memcpy (buf
+ 2, &offset
, 2);
1392 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1393 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1397 /* Else use a 32-bit relative jump instruction. */
1398 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1399 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1400 memcpy (buf
+ 1, &offset
, 4);
1401 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1402 *jjump_pad_insn_size
= sizeof (jump_insn
);
1405 /* Return the end address of our pad. */
1406 *jump_entry
= buildaddr
;
1412 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1413 CORE_ADDR collector
,
1416 CORE_ADDR
*jump_entry
,
1417 CORE_ADDR
*trampoline
,
1418 ULONGEST
*trampoline_size
,
1419 unsigned char *jjump_pad_insn
,
1420 ULONGEST
*jjump_pad_insn_size
,
1421 CORE_ADDR
*adjusted_insn_addr
,
1422 CORE_ADDR
*adjusted_insn_addr_end
,
1426 if (is_64bit_tdesc ())
1427 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1428 collector
, lockaddr
,
1429 orig_size
, jump_entry
,
1430 trampoline
, trampoline_size
,
1432 jjump_pad_insn_size
,
1434 adjusted_insn_addr_end
,
1438 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1439 collector
, lockaddr
,
1440 orig_size
, jump_entry
,
1441 trampoline
, trampoline_size
,
1443 jjump_pad_insn_size
,
1445 adjusted_insn_addr_end
,
1449 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1453 x86_get_min_fast_tracepoint_insn_len (void)
1455 static int warned_about_fast_tracepoints
= 0;
1458 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1459 used for fast tracepoints. */
1460 if (is_64bit_tdesc ())
1464 if (agent_loaded_p ())
1466 char errbuf
[IPA_BUFSIZ
];
1470 /* On x86, if trampolines are available, then 4-byte jump instructions
1471 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1472 with a 4-byte offset are used instead. */
1473 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1477 /* GDB has no channel to explain to user why a shorter fast
1478 tracepoint is not possible, but at least make GDBserver
1479 mention that something has gone awry. */
1480 if (!warned_about_fast_tracepoints
)
1482 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
1483 warned_about_fast_tracepoints
= 1;
1490 /* Indicate that the minimum length is currently unknown since the IPA
1491 has not loaded yet. */
1497 add_insns (unsigned char *start
, int len
)
1499 CORE_ADDR buildaddr
= current_insn_ptr
;
1502 debug_printf ("Adding %d bytes of insn at %s\n",
1503 len
, paddress (buildaddr
));
1505 append_insns (&buildaddr
, len
, start
);
1506 current_insn_ptr
= buildaddr
;
1509 /* Our general strategy for emitting code is to avoid specifying raw
1510 bytes whenever possible, and instead copy a block of inline asm
1511 that is embedded in the function. This is a little messy, because
1512 we need to keep the compiler from discarding what looks like dead
1513 code, plus suppress various warnings. */
1515 #define EMIT_ASM(NAME, INSNS) \
1518 extern unsigned char start_ ## NAME, end_ ## NAME; \
1519 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1520 __asm__ ("jmp end_" #NAME "\n" \
1521 "\t" "start_" #NAME ":" \
1523 "\t" "end_" #NAME ":"); \
1528 #define EMIT_ASM32(NAME,INSNS) \
1531 extern unsigned char start_ ## NAME, end_ ## NAME; \
1532 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1533 __asm__ (".code32\n" \
1534 "\t" "jmp end_" #NAME "\n" \
1535 "\t" "start_" #NAME ":\n" \
1537 "\t" "end_" #NAME ":\n" \
1543 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1550 amd64_emit_prologue (void)
1552 EMIT_ASM (amd64_prologue
,
1554 "movq %rsp,%rbp\n\t"
1555 "sub $0x20,%rsp\n\t"
1556 "movq %rdi,-8(%rbp)\n\t"
1557 "movq %rsi,-16(%rbp)");
1562 amd64_emit_epilogue (void)
1564 EMIT_ASM (amd64_epilogue
,
1565 "movq -16(%rbp),%rdi\n\t"
1566 "movq %rax,(%rdi)\n\t"
1573 amd64_emit_add (void)
1575 EMIT_ASM (amd64_add
,
1576 "add (%rsp),%rax\n\t"
1577 "lea 0x8(%rsp),%rsp");
1581 amd64_emit_sub (void)
1583 EMIT_ASM (amd64_sub
,
1584 "sub %rax,(%rsp)\n\t"
1589 amd64_emit_mul (void)
1595 amd64_emit_lsh (void)
1601 amd64_emit_rsh_signed (void)
1607 amd64_emit_rsh_unsigned (void)
1613 amd64_emit_ext (int arg
)
1618 EMIT_ASM (amd64_ext_8
,
1624 EMIT_ASM (amd64_ext_16
,
1629 EMIT_ASM (amd64_ext_32
,
1638 amd64_emit_log_not (void)
1640 EMIT_ASM (amd64_log_not
,
1641 "test %rax,%rax\n\t"
1647 amd64_emit_bit_and (void)
1649 EMIT_ASM (amd64_and
,
1650 "and (%rsp),%rax\n\t"
1651 "lea 0x8(%rsp),%rsp");
1655 amd64_emit_bit_or (void)
1658 "or (%rsp),%rax\n\t"
1659 "lea 0x8(%rsp),%rsp");
1663 amd64_emit_bit_xor (void)
1665 EMIT_ASM (amd64_xor
,
1666 "xor (%rsp),%rax\n\t"
1667 "lea 0x8(%rsp),%rsp");
1671 amd64_emit_bit_not (void)
1673 EMIT_ASM (amd64_bit_not
,
1674 "xorq $0xffffffffffffffff,%rax");
1678 amd64_emit_equal (void)
1680 EMIT_ASM (amd64_equal
,
1681 "cmp %rax,(%rsp)\n\t"
1682 "je .Lamd64_equal_true\n\t"
1684 "jmp .Lamd64_equal_end\n\t"
1685 ".Lamd64_equal_true:\n\t"
1687 ".Lamd64_equal_end:\n\t"
1688 "lea 0x8(%rsp),%rsp");
1692 amd64_emit_less_signed (void)
1694 EMIT_ASM (amd64_less_signed
,
1695 "cmp %rax,(%rsp)\n\t"
1696 "jl .Lamd64_less_signed_true\n\t"
1698 "jmp .Lamd64_less_signed_end\n\t"
1699 ".Lamd64_less_signed_true:\n\t"
1701 ".Lamd64_less_signed_end:\n\t"
1702 "lea 0x8(%rsp),%rsp");
1706 amd64_emit_less_unsigned (void)
1708 EMIT_ASM (amd64_less_unsigned
,
1709 "cmp %rax,(%rsp)\n\t"
1710 "jb .Lamd64_less_unsigned_true\n\t"
1712 "jmp .Lamd64_less_unsigned_end\n\t"
1713 ".Lamd64_less_unsigned_true:\n\t"
1715 ".Lamd64_less_unsigned_end:\n\t"
1716 "lea 0x8(%rsp),%rsp");
1720 amd64_emit_ref (int size
)
1725 EMIT_ASM (amd64_ref1
,
1729 EMIT_ASM (amd64_ref2
,
1733 EMIT_ASM (amd64_ref4
,
1734 "movl (%rax),%eax");
1737 EMIT_ASM (amd64_ref8
,
1738 "movq (%rax),%rax");
1744 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1746 EMIT_ASM (amd64_if_goto
,
1750 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1758 amd64_emit_goto (int *offset_p
, int *size_p
)
1760 EMIT_ASM (amd64_goto
,
1761 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1769 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1771 int diff
= (to
- (from
+ size
));
1772 unsigned char buf
[sizeof (int)];
1780 memcpy (buf
, &diff
, sizeof (int));
1781 write_inferior_memory (from
, buf
, sizeof (int));
1785 amd64_emit_const (LONGEST num
)
1787 unsigned char buf
[16];
1789 CORE_ADDR buildaddr
= current_insn_ptr
;
1792 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1793 memcpy (&buf
[i
], &num
, sizeof (num
));
1795 append_insns (&buildaddr
, i
, buf
);
1796 current_insn_ptr
= buildaddr
;
1800 amd64_emit_call (CORE_ADDR fn
)
1802 unsigned char buf
[16];
1804 CORE_ADDR buildaddr
;
1807 /* The destination function being in the shared library, may be
1808 >31-bits away off the compiled code pad. */
1810 buildaddr
= current_insn_ptr
;
1812 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1816 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1818 /* Offset is too large for a call. Use callq, but that requires
1819 a register, so avoid it if possible. Use r10, since it is
1820 call-clobbered, we don't have to push/pop it. */
1821 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1823 memcpy (buf
+ i
, &fn
, 8);
1825 buf
[i
++] = 0xff; /* callq *%r10 */
1830 int offset32
= offset64
; /* we know we can't overflow here. */
1832 buf
[i
++] = 0xe8; /* call <reladdr> */
1833 memcpy (buf
+ i
, &offset32
, 4);
1837 append_insns (&buildaddr
, i
, buf
);
1838 current_insn_ptr
= buildaddr
;
1842 amd64_emit_reg (int reg
)
1844 unsigned char buf
[16];
1846 CORE_ADDR buildaddr
;
1848 /* Assume raw_regs is still in %rdi. */
1849 buildaddr
= current_insn_ptr
;
1851 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1852 memcpy (&buf
[i
], ®
, sizeof (reg
));
1854 append_insns (&buildaddr
, i
, buf
);
1855 current_insn_ptr
= buildaddr
;
1856 amd64_emit_call (get_raw_reg_func_addr ());
1860 amd64_emit_pop (void)
1862 EMIT_ASM (amd64_pop
,
1867 amd64_emit_stack_flush (void)
1869 EMIT_ASM (amd64_stack_flush
,
1874 amd64_emit_zero_ext (int arg
)
1879 EMIT_ASM (amd64_zero_ext_8
,
1883 EMIT_ASM (amd64_zero_ext_16
,
1884 "and $0xffff,%rax");
1887 EMIT_ASM (amd64_zero_ext_32
,
1888 "mov $0xffffffff,%rcx\n\t"
1897 amd64_emit_swap (void)
1899 EMIT_ASM (amd64_swap
,
1906 amd64_emit_stack_adjust (int n
)
1908 unsigned char buf
[16];
1910 CORE_ADDR buildaddr
= current_insn_ptr
;
1913 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
1917 /* This only handles adjustments up to 16, but we don't expect any more. */
1919 append_insns (&buildaddr
, i
, buf
);
1920 current_insn_ptr
= buildaddr
;
1923 /* FN's prototype is `LONGEST(*fn)(int)'. */
1926 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
1928 unsigned char buf
[16];
1930 CORE_ADDR buildaddr
;
1932 buildaddr
= current_insn_ptr
;
1934 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1935 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
1937 append_insns (&buildaddr
, i
, buf
);
1938 current_insn_ptr
= buildaddr
;
1939 amd64_emit_call (fn
);
1942 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1945 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
1947 unsigned char buf
[16];
1949 CORE_ADDR buildaddr
;
1951 buildaddr
= current_insn_ptr
;
1953 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1954 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
1956 append_insns (&buildaddr
, i
, buf
);
1957 current_insn_ptr
= buildaddr
;
1958 EMIT_ASM (amd64_void_call_2_a
,
1959 /* Save away a copy of the stack top. */
1961 /* Also pass top as the second argument. */
1963 amd64_emit_call (fn
);
1964 EMIT_ASM (amd64_void_call_2_b
,
1965 /* Restore the stack top, %rax may have been trashed. */
1970 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
1973 "cmp %rax,(%rsp)\n\t"
1974 "jne .Lamd64_eq_fallthru\n\t"
1975 "lea 0x8(%rsp),%rsp\n\t"
1977 /* jmp, but don't trust the assembler to choose the right jump */
1978 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
1979 ".Lamd64_eq_fallthru:\n\t"
1980 "lea 0x8(%rsp),%rsp\n\t"
1990 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
1993 "cmp %rax,(%rsp)\n\t"
1994 "je .Lamd64_ne_fallthru\n\t"
1995 "lea 0x8(%rsp),%rsp\n\t"
1997 /* jmp, but don't trust the assembler to choose the right jump */
1998 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
1999 ".Lamd64_ne_fallthru:\n\t"
2000 "lea 0x8(%rsp),%rsp\n\t"
2010 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2013 "cmp %rax,(%rsp)\n\t"
2014 "jnl .Lamd64_lt_fallthru\n\t"
2015 "lea 0x8(%rsp),%rsp\n\t"
2017 /* jmp, but don't trust the assembler to choose the right jump */
2018 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2019 ".Lamd64_lt_fallthru:\n\t"
2020 "lea 0x8(%rsp),%rsp\n\t"
2030 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2033 "cmp %rax,(%rsp)\n\t"
2034 "jnle .Lamd64_le_fallthru\n\t"
2035 "lea 0x8(%rsp),%rsp\n\t"
2037 /* jmp, but don't trust the assembler to choose the right jump */
2038 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2039 ".Lamd64_le_fallthru:\n\t"
2040 "lea 0x8(%rsp),%rsp\n\t"
2050 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2053 "cmp %rax,(%rsp)\n\t"
2054 "jng .Lamd64_gt_fallthru\n\t"
2055 "lea 0x8(%rsp),%rsp\n\t"
2057 /* jmp, but don't trust the assembler to choose the right jump */
2058 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2059 ".Lamd64_gt_fallthru:\n\t"
2060 "lea 0x8(%rsp),%rsp\n\t"
2070 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2073 "cmp %rax,(%rsp)\n\t"
2074 "jnge .Lamd64_ge_fallthru\n\t"
2075 ".Lamd64_ge_jump:\n\t"
2076 "lea 0x8(%rsp),%rsp\n\t"
2078 /* jmp, but don't trust the assembler to choose the right jump */
2079 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2080 ".Lamd64_ge_fallthru:\n\t"
2081 "lea 0x8(%rsp),%rsp\n\t"
2090 struct emit_ops amd64_emit_ops
=
2092 amd64_emit_prologue
,
2093 amd64_emit_epilogue
,
2098 amd64_emit_rsh_signed
,
2099 amd64_emit_rsh_unsigned
,
2107 amd64_emit_less_signed
,
2108 amd64_emit_less_unsigned
,
2112 amd64_write_goto_address
,
2117 amd64_emit_stack_flush
,
2118 amd64_emit_zero_ext
,
2120 amd64_emit_stack_adjust
,
2121 amd64_emit_int_call_1
,
2122 amd64_emit_void_call_2
,
2131 #endif /* __x86_64__ */
2134 i386_emit_prologue (void)
2136 EMIT_ASM32 (i386_prologue
,
2140 /* At this point, the raw regs base address is at 8(%ebp), and the
2141 value pointer is at 12(%ebp). */
2145 i386_emit_epilogue (void)
2147 EMIT_ASM32 (i386_epilogue
,
2148 "mov 12(%ebp),%ecx\n\t"
2149 "mov %eax,(%ecx)\n\t"
2150 "mov %ebx,0x4(%ecx)\n\t"
2158 i386_emit_add (void)
2160 EMIT_ASM32 (i386_add
,
2161 "add (%esp),%eax\n\t"
2162 "adc 0x4(%esp),%ebx\n\t"
2163 "lea 0x8(%esp),%esp");
2167 i386_emit_sub (void)
2169 EMIT_ASM32 (i386_sub
,
2170 "subl %eax,(%esp)\n\t"
2171 "sbbl %ebx,4(%esp)\n\t"
2177 i386_emit_mul (void)
2183 i386_emit_lsh (void)
2189 i386_emit_rsh_signed (void)
2195 i386_emit_rsh_unsigned (void)
2201 i386_emit_ext (int arg
)
2206 EMIT_ASM32 (i386_ext_8
,
2209 "movl %eax,%ebx\n\t"
2213 EMIT_ASM32 (i386_ext_16
,
2215 "movl %eax,%ebx\n\t"
2219 EMIT_ASM32 (i386_ext_32
,
2220 "movl %eax,%ebx\n\t"
2229 i386_emit_log_not (void)
2231 EMIT_ASM32 (i386_log_not
,
2233 "test %eax,%eax\n\t"
2240 i386_emit_bit_and (void)
2242 EMIT_ASM32 (i386_and
,
2243 "and (%esp),%eax\n\t"
2244 "and 0x4(%esp),%ebx\n\t"
2245 "lea 0x8(%esp),%esp");
2249 i386_emit_bit_or (void)
2251 EMIT_ASM32 (i386_or
,
2252 "or (%esp),%eax\n\t"
2253 "or 0x4(%esp),%ebx\n\t"
2254 "lea 0x8(%esp),%esp");
2258 i386_emit_bit_xor (void)
2260 EMIT_ASM32 (i386_xor
,
2261 "xor (%esp),%eax\n\t"
2262 "xor 0x4(%esp),%ebx\n\t"
2263 "lea 0x8(%esp),%esp");
2267 i386_emit_bit_not (void)
2269 EMIT_ASM32 (i386_bit_not
,
2270 "xor $0xffffffff,%eax\n\t"
2271 "xor $0xffffffff,%ebx\n\t");
2275 i386_emit_equal (void)
2277 EMIT_ASM32 (i386_equal
,
2278 "cmpl %ebx,4(%esp)\n\t"
2279 "jne .Li386_equal_false\n\t"
2280 "cmpl %eax,(%esp)\n\t"
2281 "je .Li386_equal_true\n\t"
2282 ".Li386_equal_false:\n\t"
2284 "jmp .Li386_equal_end\n\t"
2285 ".Li386_equal_true:\n\t"
2287 ".Li386_equal_end:\n\t"
2289 "lea 0x8(%esp),%esp");
2293 i386_emit_less_signed (void)
2295 EMIT_ASM32 (i386_less_signed
,
2296 "cmpl %ebx,4(%esp)\n\t"
2297 "jl .Li386_less_signed_true\n\t"
2298 "jne .Li386_less_signed_false\n\t"
2299 "cmpl %eax,(%esp)\n\t"
2300 "jl .Li386_less_signed_true\n\t"
2301 ".Li386_less_signed_false:\n\t"
2303 "jmp .Li386_less_signed_end\n\t"
2304 ".Li386_less_signed_true:\n\t"
2306 ".Li386_less_signed_end:\n\t"
2308 "lea 0x8(%esp),%esp");
2312 i386_emit_less_unsigned (void)
2314 EMIT_ASM32 (i386_less_unsigned
,
2315 "cmpl %ebx,4(%esp)\n\t"
2316 "jb .Li386_less_unsigned_true\n\t"
2317 "jne .Li386_less_unsigned_false\n\t"
2318 "cmpl %eax,(%esp)\n\t"
2319 "jb .Li386_less_unsigned_true\n\t"
2320 ".Li386_less_unsigned_false:\n\t"
2322 "jmp .Li386_less_unsigned_end\n\t"
2323 ".Li386_less_unsigned_true:\n\t"
2325 ".Li386_less_unsigned_end:\n\t"
2327 "lea 0x8(%esp),%esp");
2331 i386_emit_ref (int size
)
2336 EMIT_ASM32 (i386_ref1
,
2340 EMIT_ASM32 (i386_ref2
,
2344 EMIT_ASM32 (i386_ref4
,
2345 "movl (%eax),%eax");
2348 EMIT_ASM32 (i386_ref8
,
2349 "movl 4(%eax),%ebx\n\t"
2350 "movl (%eax),%eax");
2356 i386_emit_if_goto (int *offset_p
, int *size_p
)
2358 EMIT_ASM32 (i386_if_goto
,
2364 /* Don't trust the assembler to choose the right jump */
2365 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2368 *offset_p
= 11; /* be sure that this matches the sequence above */
2374 i386_emit_goto (int *offset_p
, int *size_p
)
2376 EMIT_ASM32 (i386_goto
,
2377 /* Don't trust the assembler to choose the right jump */
2378 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2386 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2388 int diff
= (to
- (from
+ size
));
2389 unsigned char buf
[sizeof (int)];
2391 /* We're only doing 4-byte sizes at the moment. */
2398 memcpy (buf
, &diff
, sizeof (int));
2399 write_inferior_memory (from
, buf
, sizeof (int));
2403 i386_emit_const (LONGEST num
)
2405 unsigned char buf
[16];
2407 CORE_ADDR buildaddr
= current_insn_ptr
;
2410 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2411 lo
= num
& 0xffffffff;
2412 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2414 hi
= ((num
>> 32) & 0xffffffff);
2417 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2418 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2423 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2425 append_insns (&buildaddr
, i
, buf
);
2426 current_insn_ptr
= buildaddr
;
2430 i386_emit_call (CORE_ADDR fn
)
2432 unsigned char buf
[16];
2434 CORE_ADDR buildaddr
;
2436 buildaddr
= current_insn_ptr
;
2438 buf
[i
++] = 0xe8; /* call <reladdr> */
2439 offset
= ((int) fn
) - (buildaddr
+ 5);
2440 memcpy (buf
+ 1, &offset
, 4);
2441 append_insns (&buildaddr
, 5, buf
);
2442 current_insn_ptr
= buildaddr
;
2446 i386_emit_reg (int reg
)
2448 unsigned char buf
[16];
2450 CORE_ADDR buildaddr
;
2452 EMIT_ASM32 (i386_reg_a
,
2454 buildaddr
= current_insn_ptr
;
2456 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2457 memcpy (&buf
[i
], ®
, sizeof (reg
));
2459 append_insns (&buildaddr
, i
, buf
);
2460 current_insn_ptr
= buildaddr
;
2461 EMIT_ASM32 (i386_reg_b
,
2462 "mov %eax,4(%esp)\n\t"
2463 "mov 8(%ebp),%eax\n\t"
2465 i386_emit_call (get_raw_reg_func_addr ());
2466 EMIT_ASM32 (i386_reg_c
,
2468 "lea 0x8(%esp),%esp");
2472 i386_emit_pop (void)
2474 EMIT_ASM32 (i386_pop
,
2480 i386_emit_stack_flush (void)
2482 EMIT_ASM32 (i386_stack_flush
,
2488 i386_emit_zero_ext (int arg
)
2493 EMIT_ASM32 (i386_zero_ext_8
,
2494 "and $0xff,%eax\n\t"
2498 EMIT_ASM32 (i386_zero_ext_16
,
2499 "and $0xffff,%eax\n\t"
2503 EMIT_ASM32 (i386_zero_ext_32
,
2512 i386_emit_swap (void)
2514 EMIT_ASM32 (i386_swap
,
2524 i386_emit_stack_adjust (int n
)
2526 unsigned char buf
[16];
2528 CORE_ADDR buildaddr
= current_insn_ptr
;
2531 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2535 append_insns (&buildaddr
, i
, buf
);
2536 current_insn_ptr
= buildaddr
;
2539 /* FN's prototype is `LONGEST(*fn)(int)'. */
2542 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2544 unsigned char buf
[16];
2546 CORE_ADDR buildaddr
;
2548 EMIT_ASM32 (i386_int_call_1_a
,
2549 /* Reserve a bit of stack space. */
2551 /* Put the one argument on the stack. */
2552 buildaddr
= current_insn_ptr
;
2554 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2557 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2559 append_insns (&buildaddr
, i
, buf
);
2560 current_insn_ptr
= buildaddr
;
2561 i386_emit_call (fn
);
2562 EMIT_ASM32 (i386_int_call_1_c
,
2564 "lea 0x8(%esp),%esp");
2567 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2570 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2572 unsigned char buf
[16];
2574 CORE_ADDR buildaddr
;
2576 EMIT_ASM32 (i386_void_call_2_a
,
2577 /* Preserve %eax only; we don't have to worry about %ebx. */
2579 /* Reserve a bit of stack space for arguments. */
2580 "sub $0x10,%esp\n\t"
2581 /* Copy "top" to the second argument position. (Note that
2582 we can't assume function won't scribble on its
2583 arguments, so don't try to restore from this.) */
2584 "mov %eax,4(%esp)\n\t"
2585 "mov %ebx,8(%esp)");
2586 /* Put the first argument on the stack. */
2587 buildaddr
= current_insn_ptr
;
2589 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2592 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2594 append_insns (&buildaddr
, i
, buf
);
2595 current_insn_ptr
= buildaddr
;
2596 i386_emit_call (fn
);
2597 EMIT_ASM32 (i386_void_call_2_b
,
2598 "lea 0x10(%esp),%esp\n\t"
2599 /* Restore original stack top. */
2605 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2608 /* Check low half first, more likely to be decider */
2609 "cmpl %eax,(%esp)\n\t"
2610 "jne .Leq_fallthru\n\t"
2611 "cmpl %ebx,4(%esp)\n\t"
2612 "jne .Leq_fallthru\n\t"
2613 "lea 0x8(%esp),%esp\n\t"
2616 /* jmp, but don't trust the assembler to choose the right jump */
2617 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2618 ".Leq_fallthru:\n\t"
2619 "lea 0x8(%esp),%esp\n\t"
2630 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2633 /* Check low half first, more likely to be decider */
2634 "cmpl %eax,(%esp)\n\t"
2636 "cmpl %ebx,4(%esp)\n\t"
2637 "je .Lne_fallthru\n\t"
2639 "lea 0x8(%esp),%esp\n\t"
2642 /* jmp, but don't trust the assembler to choose the right jump */
2643 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2644 ".Lne_fallthru:\n\t"
2645 "lea 0x8(%esp),%esp\n\t"
2656 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2659 "cmpl %ebx,4(%esp)\n\t"
2661 "jne .Llt_fallthru\n\t"
2662 "cmpl %eax,(%esp)\n\t"
2663 "jnl .Llt_fallthru\n\t"
2665 "lea 0x8(%esp),%esp\n\t"
2668 /* jmp, but don't trust the assembler to choose the right jump */
2669 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2670 ".Llt_fallthru:\n\t"
2671 "lea 0x8(%esp),%esp\n\t"
2682 i386_emit_le_goto (int *offset_p
, int *size_p
)
2685 "cmpl %ebx,4(%esp)\n\t"
2687 "jne .Lle_fallthru\n\t"
2688 "cmpl %eax,(%esp)\n\t"
2689 "jnle .Lle_fallthru\n\t"
2691 "lea 0x8(%esp),%esp\n\t"
2694 /* jmp, but don't trust the assembler to choose the right jump */
2695 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2696 ".Lle_fallthru:\n\t"
2697 "lea 0x8(%esp),%esp\n\t"
2708 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2711 "cmpl %ebx,4(%esp)\n\t"
2713 "jne .Lgt_fallthru\n\t"
2714 "cmpl %eax,(%esp)\n\t"
2715 "jng .Lgt_fallthru\n\t"
2717 "lea 0x8(%esp),%esp\n\t"
2720 /* jmp, but don't trust the assembler to choose the right jump */
2721 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2722 ".Lgt_fallthru:\n\t"
2723 "lea 0x8(%esp),%esp\n\t"
2734 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2737 "cmpl %ebx,4(%esp)\n\t"
2739 "jne .Lge_fallthru\n\t"
2740 "cmpl %eax,(%esp)\n\t"
2741 "jnge .Lge_fallthru\n\t"
2743 "lea 0x8(%esp),%esp\n\t"
2746 /* jmp, but don't trust the assembler to choose the right jump */
2747 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2748 ".Lge_fallthru:\n\t"
2749 "lea 0x8(%esp),%esp\n\t"
2759 struct emit_ops i386_emit_ops
=
2767 i386_emit_rsh_signed
,
2768 i386_emit_rsh_unsigned
,
2776 i386_emit_less_signed
,
2777 i386_emit_less_unsigned
,
2781 i386_write_goto_address
,
2786 i386_emit_stack_flush
,
2789 i386_emit_stack_adjust
,
2790 i386_emit_int_call_1
,
2791 i386_emit_void_call_2
,
2801 static struct emit_ops
*
2805 if (is_64bit_tdesc ())
2806 return &amd64_emit_ops
;
2809 return &i386_emit_ops
;
2812 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2814 static const gdb_byte
*
2815 x86_sw_breakpoint_from_kind (int kind
, int *size
)
2817 *size
= x86_breakpoint_len
;
2818 return x86_breakpoint
;
2822 x86_supports_range_stepping (void)
2827 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2831 x86_supports_hardware_single_step (void)
2837 x86_get_ipa_tdesc_idx (void)
2839 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
2840 const struct target_desc
*tdesc
= regcache
->tdesc
;
2843 return amd64_get_ipa_tdesc_idx (tdesc
);
2846 if (tdesc
== tdesc_i386_linux_no_xml
)
2847 return X86_TDESC_SSE
;
2849 return i386_get_ipa_tdesc_idx (tdesc
);
2852 /* This is initialized assuming an amd64 target.
2853 x86_arch_setup will correct it for i386 or amd64 targets. */
2855 struct linux_target_ops the_low_target
=
2858 x86_linux_regs_info
,
2859 x86_cannot_fetch_register
,
2860 x86_cannot_store_register
,
2861 NULL
, /* fetch_register */
2864 NULL
, /* breakpoint_kind_from_pc */
2865 x86_sw_breakpoint_from_kind
,
2869 x86_supports_z_point_type
,
2872 x86_stopped_by_watchpoint
,
2873 x86_stopped_data_address
,
2874 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2875 native i386 case (no registers smaller than an xfer unit), and are not
2876 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2879 /* need to fix up i386 siginfo if host is amd64 */
2881 x86_linux_new_process
,
2882 x86_linux_new_thread
,
2884 x86_linux_prepare_to_resume
,
2885 x86_linux_process_qsupported
,
2886 x86_supports_tracepoints
,
2887 x86_get_thread_area
,
2888 x86_install_fast_tracepoint_jump_pad
,
2890 x86_get_min_fast_tracepoint_insn_len
,
2891 x86_supports_range_stepping
,
2892 NULL
, /* breakpoint_kind_from_current_state */
2893 x86_supports_hardware_single_step
,
2894 x86_get_syscall_trapinfo
,
2895 x86_get_ipa_tdesc_idx
,
2899 initialize_low_arch (void)
2901 /* Initialize the Linux target descriptions. */
2903 tdesc_amd64_linux_no_xml
= XNEW (struct target_desc
);
2904 copy_target_description (tdesc_amd64_linux_no_xml
,
2905 amd64_linux_read_description (X86_XSTATE_SSE_MASK
,
2907 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
2911 initialize_low_tdesc ();
2914 tdesc_i386_linux_no_xml
= XNEW (struct target_desc
);
2915 copy_target_description (tdesc_i386_linux_no_xml
,
2916 i386_linux_read_description (X86_XSTATE_SSE_MASK
));
2917 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
2919 initialize_regsets_info (&x86_regsets_info
);