1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
31 #include "nat/amd64-linux-siginfo.h"
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
38 #include "elf/common.h"
41 #include "gdbsupport/agent.h"
43 #include "tracepoint.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
51 static struct target_desc
*tdesc_amd64_linux_no_xml
;
53 static struct target_desc
*tdesc_i386_linux_no_xml
;
56 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
59 /* Backward compatibility for gdb without XML support. */
61 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
67 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
74 #include <sys/procfs.h>
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
99 class x86_target
: public linux_process_target
103 const regs_info
*get_regs_info () override
;
105 const gdb_byte
*sw_breakpoint_from_kind (int kind
, int *size
) override
;
107 bool supports_z_point_type (char z_type
) override
;
109 void process_qsupported (char **features
, int count
) override
;
111 bool supports_tracepoints () override
;
115 void low_arch_setup () override
;
117 bool low_cannot_fetch_register (int regno
) override
;
119 bool low_cannot_store_register (int regno
) override
;
121 bool low_supports_breakpoints () override
;
123 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
125 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
127 int low_decr_pc_after_break () override
;
129 bool low_breakpoint_at (CORE_ADDR pc
) override
;
131 int low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
132 int size
, raw_breakpoint
*bp
) override
;
134 int low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
135 int size
, raw_breakpoint
*bp
) override
;
137 bool low_stopped_by_watchpoint () override
;
139 CORE_ADDR
low_stopped_data_address () override
;
141 /* collect_ptrace_register/supply_ptrace_register are not needed in the
142 native i386 case (no registers smaller than an xfer unit), and are not
143 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
145 /* Need to fix up i386 siginfo if host is amd64. */
146 bool low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
147 int direction
) override
;
149 arch_process_info
*low_new_process () override
;
151 void low_delete_process (arch_process_info
*info
) override
;
153 void low_new_thread (lwp_info
*) override
;
155 void low_delete_thread (arch_lwp_info
*) override
;
157 void low_new_fork (process_info
*parent
, process_info
*child
) override
;
159 void low_prepare_to_resume (lwp_info
*lwp
) override
;
161 int low_get_thread_area (int lwpid
, CORE_ADDR
*addrp
) override
;
165 /* Update all the target description of all processes; a new GDB
166 connected, and it may or not support xml target descriptions. */
167 void update_xmltarget ();
170 /* The singleton target ops object. */
172 static x86_target the_x86_target
;
174 /* Per-process arch-specific data we want to keep. */
176 struct arch_process_info
178 struct x86_debug_reg_state debug_reg_state
;
183 /* Mapping between the general-purpose registers in `struct user'
184 format and GDB's register array layout.
185 Note that the transfer layout uses 64-bit regs. */
186 static /*const*/ int i386_regmap
[] =
188 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
189 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
190 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
191 DS
* 8, ES
* 8, FS
* 8, GS
* 8
194 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
196 /* So code below doesn't have to care, i386 or amd64. */
197 #define ORIG_EAX ORIG_RAX
200 static const int x86_64_regmap
[] =
202 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
203 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
204 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
205 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
206 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
207 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
208 -1, -1, -1, -1, -1, -1, -1, -1,
209 -1, -1, -1, -1, -1, -1, -1, -1,
210 -1, -1, -1, -1, -1, -1, -1, -1,
212 -1, -1, -1, -1, -1, -1, -1, -1,
214 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
219 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
220 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
221 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
222 -1, -1, -1, -1, -1, -1, -1, -1,
223 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
224 -1, -1, -1, -1, -1, -1, -1, -1,
225 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
226 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
227 -1, -1, -1, -1, -1, -1, -1, -1,
228 -1, -1, -1, -1, -1, -1, -1, -1,
229 -1, -1, -1, -1, -1, -1, -1, -1,
233 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
234 #define X86_64_USER_REGS (GS + 1)
236 #else /* ! __x86_64__ */
238 /* Mapping between the general-purpose registers in `struct user'
239 format and GDB's register array layout. */
240 static /*const*/ int i386_regmap
[] =
242 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
243 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
244 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
245 DS
* 4, ES
* 4, FS
* 4, GS
* 4
248 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
256 /* Returns true if the current inferior belongs to a x86-64 process,
260 is_64bit_tdesc (void)
262 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
264 return register_size (regcache
->tdesc
, 0) == 8;
270 /* Called by libthread_db. */
273 ps_get_thread_area (struct ps_prochandle
*ph
,
274 lwpid_t lwpid
, int idx
, void **base
)
277 int use_64bit
= is_64bit_tdesc ();
284 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
288 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
299 unsigned int desc
[4];
301 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
302 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
305 /* Ensure we properly extend the value to 64-bits for x86_64. */
306 *base
= (void *) (uintptr_t) desc
[1];
311 /* Get the thread area address. This is used to recognize which
312 thread is which when tracing with the in-process agent library. We
313 don't read anything from the address, and treat it as opaque; it's
314 the address itself that we assume is unique per-thread. */
317 x86_target::low_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
320 int use_64bit
= is_64bit_tdesc ();
325 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
327 *addr
= (CORE_ADDR
) (uintptr_t) base
;
336 struct lwp_info
*lwp
= find_lwp_pid (ptid_t (lwpid
));
337 struct thread_info
*thr
= get_lwp_thread (lwp
);
338 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
339 unsigned int desc
[4];
341 const int reg_thread_area
= 3; /* bits to scale down register value. */
344 collect_register_by_name (regcache
, "gs", &gs
);
346 idx
= gs
>> reg_thread_area
;
348 if (ptrace (PTRACE_GET_THREAD_AREA
,
350 (void *) (long) idx
, (unsigned long) &desc
) < 0)
361 x86_target::low_cannot_store_register (int regno
)
364 if (is_64bit_tdesc ())
368 return regno
>= I386_NUM_REGS
;
372 x86_target::low_cannot_fetch_register (int regno
)
375 if (is_64bit_tdesc ())
379 return regno
>= I386_NUM_REGS
;
383 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
388 if (register_size (regcache
->tdesc
, 0) == 8)
390 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
391 if (x86_64_regmap
[i
] != -1)
392 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
394 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
397 int lwpid
= lwpid_of (current_thread
);
399 collect_register_by_name (regcache
, "fs_base", &base
);
400 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_FS
);
402 collect_register_by_name (regcache
, "gs_base", &base
);
403 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_GS
);
410 /* 32-bit inferior registers need to be zero-extended.
411 Callers would read uninitialized memory otherwise. */
412 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
415 for (i
= 0; i
< I386_NUM_REGS
; i
++)
416 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
418 collect_register_by_name (regcache
, "orig_eax",
419 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
422 /* Sign extend EAX value to avoid potential syscall restart
425 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
426 for a detailed explanation. */
427 if (register_size (regcache
->tdesc
, 0) == 4)
429 void *ptr
= ((gdb_byte
*) buf
430 + i386_regmap
[find_regno (regcache
->tdesc
, "eax")]);
432 *(int64_t *) ptr
= *(int32_t *) ptr
;
438 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
443 if (register_size (regcache
->tdesc
, 0) == 8)
445 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
446 if (x86_64_regmap
[i
] != -1)
447 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
449 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
452 int lwpid
= lwpid_of (current_thread
);
454 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
455 supply_register_by_name (regcache
, "fs_base", &base
);
457 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_GS
) == 0)
458 supply_register_by_name (regcache
, "gs_base", &base
);
465 for (i
= 0; i
< I386_NUM_REGS
; i
++)
466 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
468 supply_register_by_name (regcache
, "orig_eax",
469 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
473 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
476 i387_cache_to_fxsave (regcache
, buf
);
478 i387_cache_to_fsave (regcache
, buf
);
483 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
486 i387_fxsave_to_cache (regcache
, buf
);
488 i387_fsave_to_cache (regcache
, buf
);
495 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
497 i387_cache_to_fxsave (regcache
, buf
);
501 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
503 i387_fxsave_to_cache (regcache
, buf
);
509 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
511 i387_cache_to_xsave (regcache
, buf
);
515 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
517 i387_xsave_to_cache (regcache
, buf
);
520 /* ??? The non-biarch i386 case stores all the i387 regs twice.
521 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
522 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
523 doesn't work. IWBN to avoid the duplication in the case where it
524 does work. Maybe the arch_setup routine could check whether it works
525 and update the supported regsets accordingly. */
527 static struct regset_info x86_regsets
[] =
529 #ifdef HAVE_PTRACE_GETREGS
530 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
532 x86_fill_gregset
, x86_store_gregset
},
533 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
534 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
536 # ifdef HAVE_PTRACE_GETFPXREGS
537 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
539 x86_fill_fpxregset
, x86_store_fpxregset
},
542 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
544 x86_fill_fpregset
, x86_store_fpregset
},
545 #endif /* HAVE_PTRACE_GETREGS */
550 x86_target::low_supports_breakpoints ()
556 x86_target::low_get_pc (regcache
*regcache
)
558 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
564 collect_register_by_name (regcache
, "rip", &pc
);
565 return (CORE_ADDR
) pc
;
571 collect_register_by_name (regcache
, "eip", &pc
);
572 return (CORE_ADDR
) pc
;
577 x86_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
579 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
585 supply_register_by_name (regcache
, "rip", &newpc
);
591 supply_register_by_name (regcache
, "eip", &newpc
);
596 x86_target::low_decr_pc_after_break ()
602 static const gdb_byte x86_breakpoint
[] = { 0xCC };
603 #define x86_breakpoint_len 1
606 x86_target::low_breakpoint_at (CORE_ADDR pc
)
610 read_memory (pc
, &c
, 1);
617 /* Low-level function vector. */
618 struct x86_dr_low_type x86_dr_low
=
620 x86_linux_dr_set_control
,
621 x86_linux_dr_set_addr
,
622 x86_linux_dr_get_addr
,
623 x86_linux_dr_get_status
,
624 x86_linux_dr_get_control
,
628 /* Breakpoint/Watchpoint support. */
631 x86_target::supports_z_point_type (char z_type
)
637 case Z_PACKET_WRITE_WP
:
638 case Z_PACKET_ACCESS_WP
:
646 x86_target::low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
647 int size
, raw_breakpoint
*bp
)
649 struct process_info
*proc
= current_process ();
653 case raw_bkpt_type_hw
:
654 case raw_bkpt_type_write_wp
:
655 case raw_bkpt_type_access_wp
:
657 enum target_hw_bp_type hw_type
658 = raw_bkpt_type_to_target_hw_bp_type (type
);
659 struct x86_debug_reg_state
*state
660 = &proc
->priv
->arch_private
->debug_reg_state
;
662 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
672 x86_target::low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
673 int size
, raw_breakpoint
*bp
)
675 struct process_info
*proc
= current_process ();
679 case raw_bkpt_type_hw
:
680 case raw_bkpt_type_write_wp
:
681 case raw_bkpt_type_access_wp
:
683 enum target_hw_bp_type hw_type
684 = raw_bkpt_type_to_target_hw_bp_type (type
);
685 struct x86_debug_reg_state
*state
686 = &proc
->priv
->arch_private
->debug_reg_state
;
688 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
697 x86_target::low_stopped_by_watchpoint ()
699 struct process_info
*proc
= current_process ();
700 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
704 x86_target::low_stopped_data_address ()
706 struct process_info
*proc
= current_process ();
708 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
714 /* Called when a new process is created. */
717 x86_target::low_new_process ()
719 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
721 x86_low_init_dregs (&info
->debug_reg_state
);
726 /* Called when a process is being deleted. */
729 x86_target::low_delete_process (arch_process_info
*info
)
735 x86_target::low_new_thread (lwp_info
*lwp
)
737 /* This comes from nat/. */
738 x86_linux_new_thread (lwp
);
742 x86_target::low_delete_thread (arch_lwp_info
*alwp
)
744 /* This comes from nat/. */
745 x86_linux_delete_thread (alwp
);
748 /* Target routine for new_fork. */
751 x86_target::low_new_fork (process_info
*parent
, process_info
*child
)
753 /* These are allocated by linux_add_process. */
754 gdb_assert (parent
->priv
!= NULL
755 && parent
->priv
->arch_private
!= NULL
);
756 gdb_assert (child
->priv
!= NULL
757 && child
->priv
->arch_private
!= NULL
);
759 /* Linux kernel before 2.6.33 commit
760 72f674d203cd230426437cdcf7dd6f681dad8b0d
761 will inherit hardware debug registers from parent
762 on fork/vfork/clone. Newer Linux kernels create such tasks with
763 zeroed debug registers.
765 GDB core assumes the child inherits the watchpoints/hw
766 breakpoints of the parent, and will remove them all from the
767 forked off process. Copy the debug registers mirrors into the
768 new process so that all breakpoints and watchpoints can be
769 removed together. The debug registers mirror will become zeroed
770 in the end before detaching the forked off process, thus making
771 this compatible with older Linux kernels too. */
773 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
777 x86_target::low_prepare_to_resume (lwp_info
*lwp
)
779 /* This comes from nat/. */
780 x86_linux_prepare_to_resume (lwp
);
783 /* See nat/x86-dregs.h. */
785 struct x86_debug_reg_state
*
786 x86_debug_reg_state (pid_t pid
)
788 struct process_info
*proc
= find_process_pid (pid
);
790 return &proc
->priv
->arch_private
->debug_reg_state
;
793 /* When GDBSERVER is built as a 64-bit application on linux, the
794 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
795 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
796 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
797 conversion in-place ourselves. */
799 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
800 layout of the inferiors' architecture. Returns true if any
801 conversion was done; false otherwise. If DIRECTION is 1, then copy
802 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
806 x86_target::low_siginfo_fixup (siginfo_t
*ptrace
, gdb_byte
*inf
, int direction
)
809 unsigned int machine
;
810 int tid
= lwpid_of (current_thread
);
811 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
813 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
814 if (!is_64bit_tdesc ())
815 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
817 /* No fixup for native x32 GDB. */
818 else if (!is_elf64
&& sizeof (void *) == 8)
819 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
828 /* Format of XSAVE extended state is:
832 sw_usable_bytes[464..511]
833 xstate_hdr_bytes[512..575]
838 Same memory layout will be used for the coredump NT_X86_XSTATE
839 representing the XSAVE extended state registers.
841 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
842 extended state mask, which is the same as the extended control register
843 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
844 together with the mask saved in the xstate_hdr_bytes to determine what
845 states the processor/OS supports and what state, used or initialized,
846 the process/thread is in. */
847 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
849 /* Does the current host support the GETFPXREGS request? The header
850 file may or may not define it, and even if it is defined, the
851 kernel will return EIO if it's running on a pre-SSE processor. */
852 int have_ptrace_getfpxregs
=
853 #ifdef HAVE_PTRACE_GETFPXREGS
860 /* Get Linux/x86 target description from running target. */
862 static const struct target_desc
*
863 x86_linux_read_description (void)
865 unsigned int machine
;
869 static uint64_t xcr0
;
870 struct regset_info
*regset
;
872 tid
= lwpid_of (current_thread
);
874 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
876 if (sizeof (void *) == 4)
879 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
881 else if (machine
== EM_X86_64
)
882 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
886 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
887 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
889 elf_fpxregset_t fpxregs
;
891 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
893 have_ptrace_getfpxregs
= 0;
894 have_ptrace_getregset
= 0;
895 return i386_linux_read_description (X86_XSTATE_X87
);
898 have_ptrace_getfpxregs
= 1;
904 x86_xcr0
= X86_XSTATE_SSE_MASK
;
908 if (machine
== EM_X86_64
)
909 return tdesc_amd64_linux_no_xml
;
912 return tdesc_i386_linux_no_xml
;
915 if (have_ptrace_getregset
== -1)
917 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
920 iov
.iov_base
= xstateregs
;
921 iov
.iov_len
= sizeof (xstateregs
);
923 /* Check if PTRACE_GETREGSET works. */
924 if (ptrace (PTRACE_GETREGSET
, tid
,
925 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
926 have_ptrace_getregset
= 0;
929 have_ptrace_getregset
= 1;
931 /* Get XCR0 from XSAVE extended state. */
932 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
933 / sizeof (uint64_t))];
935 /* Use PTRACE_GETREGSET if it is available. */
936 for (regset
= x86_regsets
;
937 regset
->fill_function
!= NULL
; regset
++)
938 if (regset
->get_request
== PTRACE_GETREGSET
)
939 regset
->size
= X86_XSTATE_SIZE (xcr0
);
940 else if (regset
->type
!= GENERAL_REGS
)
945 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
946 xcr0_features
= (have_ptrace_getregset
947 && (xcr0
& X86_XSTATE_ALL_MASK
));
952 if (machine
== EM_X86_64
)
955 const target_desc
*tdesc
= NULL
;
959 tdesc
= amd64_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
,
964 tdesc
= amd64_linux_read_description (X86_XSTATE_SSE_MASK
, !is_elf64
);
970 const target_desc
*tdesc
= NULL
;
973 tdesc
= i386_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
);
976 tdesc
= i386_linux_read_description (X86_XSTATE_SSE
);
981 gdb_assert_not_reached ("failed to return tdesc");
984 /* Update all the target description of all processes; a new GDB
985 connected, and it may or not support xml target descriptions. */
988 x86_target::update_xmltarget ()
990 struct thread_info
*saved_thread
= current_thread
;
992 /* Before changing the register cache's internal layout, flush the
993 contents of the current valid caches back to the threads, and
994 release the current regcache objects. */
997 for_each_process ([this] (process_info
*proc
) {
1000 /* Look up any thread of this process. */
1001 current_thread
= find_any_thread_of_pid (pid
);
1006 current_thread
= saved_thread
;
1009 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1010 PTRACE_GETREGSET. */
1013 x86_target::process_qsupported (char **features
, int count
)
1017 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1018 with "i386" in qSupported query, it supports x86 XML target
1021 for (i
= 0; i
< count
; i
++)
1023 const char *feature
= features
[i
];
1025 if (startswith (feature
, "xmlRegisters="))
1027 char *copy
= xstrdup (feature
+ 13);
1030 for (char *p
= strtok_r (copy
, ",", &saveptr
);
1032 p
= strtok_r (NULL
, ",", &saveptr
))
1034 if (strcmp (p
, "i386") == 0)
1044 update_xmltarget ();
1047 /* Common for x86/x86-64. */
1049 static struct regsets_info x86_regsets_info
=
1051 x86_regsets
, /* regsets */
1052 0, /* num_regsets */
1053 NULL
, /* disabled_regsets */
1057 static struct regs_info amd64_linux_regs_info
=
1059 NULL
, /* regset_bitmap */
1060 NULL
, /* usrregs_info */
1064 static struct usrregs_info i386_linux_usrregs_info
=
1070 static struct regs_info i386_linux_regs_info
=
1072 NULL
, /* regset_bitmap */
1073 &i386_linux_usrregs_info
,
1078 x86_target::get_regs_info ()
1081 if (is_64bit_tdesc ())
1082 return &amd64_linux_regs_info
;
1085 return &i386_linux_regs_info
;
1088 /* Initialize the target description for the architecture of the
1092 x86_target::low_arch_setup ()
1094 current_process ()->tdesc
= x86_linux_read_description ();
1097 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1098 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1101 x86_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
1103 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
1109 collect_register_by_name (regcache
, "orig_rax", &l_sysno
);
1110 *sysno
= (int) l_sysno
;
1113 collect_register_by_name (regcache
, "orig_eax", sysno
);
1117 x86_target::supports_tracepoints ()
1123 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1125 target_write_memory (*to
, buf
, len
);
1130 push_opcode (unsigned char *buf
, const char *op
)
1132 unsigned char *buf_org
= buf
;
1137 unsigned long ul
= strtoul (op
, &endptr
, 16);
1146 return buf
- buf_org
;
1151 /* Build a jump pad that saves registers and calls a collection
1152 function. Writes a jump instruction to the jump pad to
1153 JJUMPAD_INSN. The caller is responsible to write it in at the
1154 tracepoint address. */
1157 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1158 CORE_ADDR collector
,
1161 CORE_ADDR
*jump_entry
,
1162 CORE_ADDR
*trampoline
,
1163 ULONGEST
*trampoline_size
,
1164 unsigned char *jjump_pad_insn
,
1165 ULONGEST
*jjump_pad_insn_size
,
1166 CORE_ADDR
*adjusted_insn_addr
,
1167 CORE_ADDR
*adjusted_insn_addr_end
,
1170 unsigned char buf
[40];
1174 CORE_ADDR buildaddr
= *jump_entry
;
1176 /* Build the jump pad. */
1178 /* First, do tracepoint data collection. Save registers. */
1180 /* Need to ensure stack pointer saved first. */
1181 buf
[i
++] = 0x54; /* push %rsp */
1182 buf
[i
++] = 0x55; /* push %rbp */
1183 buf
[i
++] = 0x57; /* push %rdi */
1184 buf
[i
++] = 0x56; /* push %rsi */
1185 buf
[i
++] = 0x52; /* push %rdx */
1186 buf
[i
++] = 0x51; /* push %rcx */
1187 buf
[i
++] = 0x53; /* push %rbx */
1188 buf
[i
++] = 0x50; /* push %rax */
1189 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1190 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1191 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1192 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1193 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1194 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1195 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1196 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1197 buf
[i
++] = 0x9c; /* pushfq */
1198 buf
[i
++] = 0x48; /* movabs <addr>,%rdi */
1200 memcpy (buf
+ i
, &tpaddr
, 8);
1202 buf
[i
++] = 0x57; /* push %rdi */
1203 append_insns (&buildaddr
, i
, buf
);
1205 /* Stack space for the collecting_t object. */
1207 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1208 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1209 memcpy (buf
+ i
, &tpoint
, 8);
1211 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1212 i
+= push_opcode (&buf
[i
],
1213 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1214 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1215 append_insns (&buildaddr
, i
, buf
);
1219 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1220 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1222 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1223 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1224 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1225 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1226 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1227 append_insns (&buildaddr
, i
, buf
);
1229 /* Set up the gdb_collect call. */
1230 /* At this point, (stack pointer + 0x18) is the base of our saved
1234 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1235 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1237 /* tpoint address may be 64-bit wide. */
1238 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1239 memcpy (buf
+ i
, &tpoint
, 8);
1241 append_insns (&buildaddr
, i
, buf
);
1243 /* The collector function being in the shared library, may be
1244 >31-bits away off the jump pad. */
1246 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1247 memcpy (buf
+ i
, &collector
, 8);
1249 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1250 append_insns (&buildaddr
, i
, buf
);
1252 /* Clear the spin-lock. */
1254 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1255 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1256 memcpy (buf
+ i
, &lockaddr
, 8);
1258 append_insns (&buildaddr
, i
, buf
);
1260 /* Remove stack that had been used for the collect_t object. */
1262 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1263 append_insns (&buildaddr
, i
, buf
);
1265 /* Restore register state. */
1267 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1271 buf
[i
++] = 0x9d; /* popfq */
1272 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1273 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1274 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1275 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1276 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1277 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1278 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1279 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1280 buf
[i
++] = 0x58; /* pop %rax */
1281 buf
[i
++] = 0x5b; /* pop %rbx */
1282 buf
[i
++] = 0x59; /* pop %rcx */
1283 buf
[i
++] = 0x5a; /* pop %rdx */
1284 buf
[i
++] = 0x5e; /* pop %rsi */
1285 buf
[i
++] = 0x5f; /* pop %rdi */
1286 buf
[i
++] = 0x5d; /* pop %rbp */
1287 buf
[i
++] = 0x5c; /* pop %rsp */
1288 append_insns (&buildaddr
, i
, buf
);
1290 /* Now, adjust the original instruction to execute in the jump
1292 *adjusted_insn_addr
= buildaddr
;
1293 relocate_instruction (&buildaddr
, tpaddr
);
1294 *adjusted_insn_addr_end
= buildaddr
;
1296 /* Finally, write a jump back to the program. */
1298 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1299 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1302 "E.Jump back from jump pad too far from tracepoint "
1303 "(offset 0x%" PRIx64
" > int32).", loffset
);
1307 offset
= (int) loffset
;
1308 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1309 memcpy (buf
+ 1, &offset
, 4);
1310 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1312 /* The jump pad is now built. Wire in a jump to our jump pad. This
1313 is always done last (by our caller actually), so that we can
1314 install fast tracepoints with threads running. This relies on
1315 the agent's atomic write support. */
1316 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1317 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1320 "E.Jump pad too far from tracepoint "
1321 "(offset 0x%" PRIx64
" > int32).", loffset
);
1325 offset
= (int) loffset
;
1327 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1328 memcpy (buf
+ 1, &offset
, 4);
1329 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1330 *jjump_pad_insn_size
= sizeof (jump_insn
);
1332 /* Return the end address of our pad. */
1333 *jump_entry
= buildaddr
;
1338 #endif /* __x86_64__ */
1340 /* Build a jump pad that saves registers and calls a collection
1341 function. Writes a jump instruction to the jump pad to
1342 JJUMPAD_INSN. The caller is responsible to write it in at the
1343 tracepoint address. */
1346 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1347 CORE_ADDR collector
,
1350 CORE_ADDR
*jump_entry
,
1351 CORE_ADDR
*trampoline
,
1352 ULONGEST
*trampoline_size
,
1353 unsigned char *jjump_pad_insn
,
1354 ULONGEST
*jjump_pad_insn_size
,
1355 CORE_ADDR
*adjusted_insn_addr
,
1356 CORE_ADDR
*adjusted_insn_addr_end
,
1359 unsigned char buf
[0x100];
1361 CORE_ADDR buildaddr
= *jump_entry
;
1363 /* Build the jump pad. */
1365 /* First, do tracepoint data collection. Save registers. */
1367 buf
[i
++] = 0x60; /* pushad */
1368 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1369 *((int *)(buf
+ i
)) = (int) tpaddr
;
1371 buf
[i
++] = 0x9c; /* pushf */
1372 buf
[i
++] = 0x1e; /* push %ds */
1373 buf
[i
++] = 0x06; /* push %es */
1374 buf
[i
++] = 0x0f; /* push %fs */
1376 buf
[i
++] = 0x0f; /* push %gs */
1378 buf
[i
++] = 0x16; /* push %ss */
1379 buf
[i
++] = 0x0e; /* push %cs */
1380 append_insns (&buildaddr
, i
, buf
);
1382 /* Stack space for the collecting_t object. */
1384 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1386 /* Build the object. */
1387 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1388 memcpy (buf
+ i
, &tpoint
, 4);
1390 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1392 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1393 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1394 append_insns (&buildaddr
, i
, buf
);
1396 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1397 If we cared for it, this could be using xchg alternatively. */
1400 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1401 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1403 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1405 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1406 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1407 append_insns (&buildaddr
, i
, buf
);
1410 /* Set up arguments to the gdb_collect call. */
1412 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1413 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1414 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1415 append_insns (&buildaddr
, i
, buf
);
1418 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1419 append_insns (&buildaddr
, i
, buf
);
1422 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1423 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1425 append_insns (&buildaddr
, i
, buf
);
1427 buf
[0] = 0xe8; /* call <reladdr> */
1428 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1429 memcpy (buf
+ 1, &offset
, 4);
1430 append_insns (&buildaddr
, 5, buf
);
1431 /* Clean up after the call. */
1432 buf
[0] = 0x83; /* add $0x8,%esp */
1435 append_insns (&buildaddr
, 3, buf
);
1438 /* Clear the spin-lock. This would need the LOCK prefix on older
1441 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1442 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1443 memcpy (buf
+ i
, &lockaddr
, 4);
1445 append_insns (&buildaddr
, i
, buf
);
1448 /* Remove stack that had been used for the collect_t object. */
1450 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1451 append_insns (&buildaddr
, i
, buf
);
1454 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1457 buf
[i
++] = 0x17; /* pop %ss */
1458 buf
[i
++] = 0x0f; /* pop %gs */
1460 buf
[i
++] = 0x0f; /* pop %fs */
1462 buf
[i
++] = 0x07; /* pop %es */
1463 buf
[i
++] = 0x1f; /* pop %ds */
1464 buf
[i
++] = 0x9d; /* popf */
1465 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1468 buf
[i
++] = 0x61; /* popad */
1469 append_insns (&buildaddr
, i
, buf
);
1471 /* Now, adjust the original instruction to execute in the jump
1473 *adjusted_insn_addr
= buildaddr
;
1474 relocate_instruction (&buildaddr
, tpaddr
);
1475 *adjusted_insn_addr_end
= buildaddr
;
1477 /* Write the jump back to the program. */
1478 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1479 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1480 memcpy (buf
+ 1, &offset
, 4);
1481 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1483 /* The jump pad is now built. Wire in a jump to our jump pad. This
1484 is always done last (by our caller actually), so that we can
1485 install fast tracepoints with threads running. This relies on
1486 the agent's atomic write support. */
1489 /* Create a trampoline. */
1490 *trampoline_size
= sizeof (jump_insn
);
1491 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1493 /* No trampoline space available. */
1495 "E.Cannot allocate trampoline space needed for fast "
1496 "tracepoints on 4-byte instructions.");
1500 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1501 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1502 memcpy (buf
+ 1, &offset
, 4);
1503 target_write_memory (*trampoline
, buf
, sizeof (jump_insn
));
1505 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1506 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1507 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1508 memcpy (buf
+ 2, &offset
, 2);
1509 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1510 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1514 /* Else use a 32-bit relative jump instruction. */
1515 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1516 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1517 memcpy (buf
+ 1, &offset
, 4);
1518 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1519 *jjump_pad_insn_size
= sizeof (jump_insn
);
1522 /* Return the end address of our pad. */
1523 *jump_entry
= buildaddr
;
1529 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1530 CORE_ADDR collector
,
1533 CORE_ADDR
*jump_entry
,
1534 CORE_ADDR
*trampoline
,
1535 ULONGEST
*trampoline_size
,
1536 unsigned char *jjump_pad_insn
,
1537 ULONGEST
*jjump_pad_insn_size
,
1538 CORE_ADDR
*adjusted_insn_addr
,
1539 CORE_ADDR
*adjusted_insn_addr_end
,
1543 if (is_64bit_tdesc ())
1544 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1545 collector
, lockaddr
,
1546 orig_size
, jump_entry
,
1547 trampoline
, trampoline_size
,
1549 jjump_pad_insn_size
,
1551 adjusted_insn_addr_end
,
1555 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1556 collector
, lockaddr
,
1557 orig_size
, jump_entry
,
1558 trampoline
, trampoline_size
,
1560 jjump_pad_insn_size
,
1562 adjusted_insn_addr_end
,
1566 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1570 x86_get_min_fast_tracepoint_insn_len (void)
1572 static int warned_about_fast_tracepoints
= 0;
1575 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1576 used for fast tracepoints. */
1577 if (is_64bit_tdesc ())
1581 if (agent_loaded_p ())
1583 char errbuf
[IPA_BUFSIZ
];
1587 /* On x86, if trampolines are available, then 4-byte jump instructions
1588 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1589 with a 4-byte offset are used instead. */
1590 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1594 /* GDB has no channel to explain to user why a shorter fast
1595 tracepoint is not possible, but at least make GDBserver
1596 mention that something has gone awry. */
1597 if (!warned_about_fast_tracepoints
)
1599 warning ("4-byte fast tracepoints not available; %s", errbuf
);
1600 warned_about_fast_tracepoints
= 1;
1607 /* Indicate that the minimum length is currently unknown since the IPA
1608 has not loaded yet. */
1614 add_insns (unsigned char *start
, int len
)
1616 CORE_ADDR buildaddr
= current_insn_ptr
;
1619 debug_printf ("Adding %d bytes of insn at %s\n",
1620 len
, paddress (buildaddr
));
1622 append_insns (&buildaddr
, len
, start
);
1623 current_insn_ptr
= buildaddr
;
1626 /* Our general strategy for emitting code is to avoid specifying raw
1627 bytes whenever possible, and instead copy a block of inline asm
1628 that is embedded in the function. This is a little messy, because
1629 we need to keep the compiler from discarding what looks like dead
1630 code, plus suppress various warnings. */
1632 #define EMIT_ASM(NAME, INSNS) \
1635 extern unsigned char start_ ## NAME, end_ ## NAME; \
1636 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1637 __asm__ ("jmp end_" #NAME "\n" \
1638 "\t" "start_" #NAME ":" \
1640 "\t" "end_" #NAME ":"); \
1645 #define EMIT_ASM32(NAME,INSNS) \
1648 extern unsigned char start_ ## NAME, end_ ## NAME; \
1649 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1650 __asm__ (".code32\n" \
1651 "\t" "jmp end_" #NAME "\n" \
1652 "\t" "start_" #NAME ":\n" \
1654 "\t" "end_" #NAME ":\n" \
1660 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1667 amd64_emit_prologue (void)
1669 EMIT_ASM (amd64_prologue
,
1671 "movq %rsp,%rbp\n\t"
1672 "sub $0x20,%rsp\n\t"
1673 "movq %rdi,-8(%rbp)\n\t"
1674 "movq %rsi,-16(%rbp)");
1679 amd64_emit_epilogue (void)
1681 EMIT_ASM (amd64_epilogue
,
1682 "movq -16(%rbp),%rdi\n\t"
1683 "movq %rax,(%rdi)\n\t"
1690 amd64_emit_add (void)
1692 EMIT_ASM (amd64_add
,
1693 "add (%rsp),%rax\n\t"
1694 "lea 0x8(%rsp),%rsp");
1698 amd64_emit_sub (void)
1700 EMIT_ASM (amd64_sub
,
1701 "sub %rax,(%rsp)\n\t"
1706 amd64_emit_mul (void)
1712 amd64_emit_lsh (void)
1718 amd64_emit_rsh_signed (void)
1724 amd64_emit_rsh_unsigned (void)
1730 amd64_emit_ext (int arg
)
1735 EMIT_ASM (amd64_ext_8
,
1741 EMIT_ASM (amd64_ext_16
,
1746 EMIT_ASM (amd64_ext_32
,
1755 amd64_emit_log_not (void)
1757 EMIT_ASM (amd64_log_not
,
1758 "test %rax,%rax\n\t"
1764 amd64_emit_bit_and (void)
1766 EMIT_ASM (amd64_and
,
1767 "and (%rsp),%rax\n\t"
1768 "lea 0x8(%rsp),%rsp");
1772 amd64_emit_bit_or (void)
1775 "or (%rsp),%rax\n\t"
1776 "lea 0x8(%rsp),%rsp");
1780 amd64_emit_bit_xor (void)
1782 EMIT_ASM (amd64_xor
,
1783 "xor (%rsp),%rax\n\t"
1784 "lea 0x8(%rsp),%rsp");
1788 amd64_emit_bit_not (void)
1790 EMIT_ASM (amd64_bit_not
,
1791 "xorq $0xffffffffffffffff,%rax");
1795 amd64_emit_equal (void)
1797 EMIT_ASM (amd64_equal
,
1798 "cmp %rax,(%rsp)\n\t"
1799 "je .Lamd64_equal_true\n\t"
1801 "jmp .Lamd64_equal_end\n\t"
1802 ".Lamd64_equal_true:\n\t"
1804 ".Lamd64_equal_end:\n\t"
1805 "lea 0x8(%rsp),%rsp");
1809 amd64_emit_less_signed (void)
1811 EMIT_ASM (amd64_less_signed
,
1812 "cmp %rax,(%rsp)\n\t"
1813 "jl .Lamd64_less_signed_true\n\t"
1815 "jmp .Lamd64_less_signed_end\n\t"
1816 ".Lamd64_less_signed_true:\n\t"
1818 ".Lamd64_less_signed_end:\n\t"
1819 "lea 0x8(%rsp),%rsp");
1823 amd64_emit_less_unsigned (void)
1825 EMIT_ASM (amd64_less_unsigned
,
1826 "cmp %rax,(%rsp)\n\t"
1827 "jb .Lamd64_less_unsigned_true\n\t"
1829 "jmp .Lamd64_less_unsigned_end\n\t"
1830 ".Lamd64_less_unsigned_true:\n\t"
1832 ".Lamd64_less_unsigned_end:\n\t"
1833 "lea 0x8(%rsp),%rsp");
1837 amd64_emit_ref (int size
)
1842 EMIT_ASM (amd64_ref1
,
1846 EMIT_ASM (amd64_ref2
,
1850 EMIT_ASM (amd64_ref4
,
1851 "movl (%rax),%eax");
1854 EMIT_ASM (amd64_ref8
,
1855 "movq (%rax),%rax");
1861 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1863 EMIT_ASM (amd64_if_goto
,
1867 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1875 amd64_emit_goto (int *offset_p
, int *size_p
)
1877 EMIT_ASM (amd64_goto
,
1878 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1886 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1888 int diff
= (to
- (from
+ size
));
1889 unsigned char buf
[sizeof (int)];
1897 memcpy (buf
, &diff
, sizeof (int));
1898 target_write_memory (from
, buf
, sizeof (int));
1902 amd64_emit_const (LONGEST num
)
1904 unsigned char buf
[16];
1906 CORE_ADDR buildaddr
= current_insn_ptr
;
1909 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1910 memcpy (&buf
[i
], &num
, sizeof (num
));
1912 append_insns (&buildaddr
, i
, buf
);
1913 current_insn_ptr
= buildaddr
;
1917 amd64_emit_call (CORE_ADDR fn
)
1919 unsigned char buf
[16];
1921 CORE_ADDR buildaddr
;
1924 /* The destination function being in the shared library, may be
1925 >31-bits away off the compiled code pad. */
1927 buildaddr
= current_insn_ptr
;
1929 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1933 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1935 /* Offset is too large for a call. Use callq, but that requires
1936 a register, so avoid it if possible. Use r10, since it is
1937 call-clobbered, we don't have to push/pop it. */
1938 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1940 memcpy (buf
+ i
, &fn
, 8);
1942 buf
[i
++] = 0xff; /* callq *%r10 */
1947 int offset32
= offset64
; /* we know we can't overflow here. */
1949 buf
[i
++] = 0xe8; /* call <reladdr> */
1950 memcpy (buf
+ i
, &offset32
, 4);
1954 append_insns (&buildaddr
, i
, buf
);
1955 current_insn_ptr
= buildaddr
;
1959 amd64_emit_reg (int reg
)
1961 unsigned char buf
[16];
1963 CORE_ADDR buildaddr
;
1965 /* Assume raw_regs is still in %rdi. */
1966 buildaddr
= current_insn_ptr
;
1968 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1969 memcpy (&buf
[i
], ®
, sizeof (reg
));
1971 append_insns (&buildaddr
, i
, buf
);
1972 current_insn_ptr
= buildaddr
;
1973 amd64_emit_call (get_raw_reg_func_addr ());
1977 amd64_emit_pop (void)
1979 EMIT_ASM (amd64_pop
,
1984 amd64_emit_stack_flush (void)
1986 EMIT_ASM (amd64_stack_flush
,
1991 amd64_emit_zero_ext (int arg
)
1996 EMIT_ASM (amd64_zero_ext_8
,
2000 EMIT_ASM (amd64_zero_ext_16
,
2001 "and $0xffff,%rax");
2004 EMIT_ASM (amd64_zero_ext_32
,
2005 "mov $0xffffffff,%rcx\n\t"
2014 amd64_emit_swap (void)
2016 EMIT_ASM (amd64_swap
,
2023 amd64_emit_stack_adjust (int n
)
2025 unsigned char buf
[16];
2027 CORE_ADDR buildaddr
= current_insn_ptr
;
2030 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2034 /* This only handles adjustments up to 16, but we don't expect any more. */
2036 append_insns (&buildaddr
, i
, buf
);
2037 current_insn_ptr
= buildaddr
;
2040 /* FN's prototype is `LONGEST(*fn)(int)'. */
2043 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2045 unsigned char buf
[16];
2047 CORE_ADDR buildaddr
;
2049 buildaddr
= current_insn_ptr
;
2051 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2052 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2054 append_insns (&buildaddr
, i
, buf
);
2055 current_insn_ptr
= buildaddr
;
2056 amd64_emit_call (fn
);
2059 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2062 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2064 unsigned char buf
[16];
2066 CORE_ADDR buildaddr
;
2068 buildaddr
= current_insn_ptr
;
2070 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2071 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2073 append_insns (&buildaddr
, i
, buf
);
2074 current_insn_ptr
= buildaddr
;
2075 EMIT_ASM (amd64_void_call_2_a
,
2076 /* Save away a copy of the stack top. */
2078 /* Also pass top as the second argument. */
2080 amd64_emit_call (fn
);
2081 EMIT_ASM (amd64_void_call_2_b
,
2082 /* Restore the stack top, %rax may have been trashed. */
2087 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2090 "cmp %rax,(%rsp)\n\t"
2091 "jne .Lamd64_eq_fallthru\n\t"
2092 "lea 0x8(%rsp),%rsp\n\t"
2094 /* jmp, but don't trust the assembler to choose the right jump */
2095 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2096 ".Lamd64_eq_fallthru:\n\t"
2097 "lea 0x8(%rsp),%rsp\n\t"
2107 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2110 "cmp %rax,(%rsp)\n\t"
2111 "je .Lamd64_ne_fallthru\n\t"
2112 "lea 0x8(%rsp),%rsp\n\t"
2114 /* jmp, but don't trust the assembler to choose the right jump */
2115 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2116 ".Lamd64_ne_fallthru:\n\t"
2117 "lea 0x8(%rsp),%rsp\n\t"
2127 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2130 "cmp %rax,(%rsp)\n\t"
2131 "jnl .Lamd64_lt_fallthru\n\t"
2132 "lea 0x8(%rsp),%rsp\n\t"
2134 /* jmp, but don't trust the assembler to choose the right jump */
2135 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2136 ".Lamd64_lt_fallthru:\n\t"
2137 "lea 0x8(%rsp),%rsp\n\t"
2147 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2150 "cmp %rax,(%rsp)\n\t"
2151 "jnle .Lamd64_le_fallthru\n\t"
2152 "lea 0x8(%rsp),%rsp\n\t"
2154 /* jmp, but don't trust the assembler to choose the right jump */
2155 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2156 ".Lamd64_le_fallthru:\n\t"
2157 "lea 0x8(%rsp),%rsp\n\t"
2167 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2170 "cmp %rax,(%rsp)\n\t"
2171 "jng .Lamd64_gt_fallthru\n\t"
2172 "lea 0x8(%rsp),%rsp\n\t"
2174 /* jmp, but don't trust the assembler to choose the right jump */
2175 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2176 ".Lamd64_gt_fallthru:\n\t"
2177 "lea 0x8(%rsp),%rsp\n\t"
2187 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2190 "cmp %rax,(%rsp)\n\t"
2191 "jnge .Lamd64_ge_fallthru\n\t"
2192 ".Lamd64_ge_jump:\n\t"
2193 "lea 0x8(%rsp),%rsp\n\t"
2195 /* jmp, but don't trust the assembler to choose the right jump */
2196 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2197 ".Lamd64_ge_fallthru:\n\t"
2198 "lea 0x8(%rsp),%rsp\n\t"
2207 struct emit_ops amd64_emit_ops
=
2209 amd64_emit_prologue
,
2210 amd64_emit_epilogue
,
2215 amd64_emit_rsh_signed
,
2216 amd64_emit_rsh_unsigned
,
2224 amd64_emit_less_signed
,
2225 amd64_emit_less_unsigned
,
2229 amd64_write_goto_address
,
2234 amd64_emit_stack_flush
,
2235 amd64_emit_zero_ext
,
2237 amd64_emit_stack_adjust
,
2238 amd64_emit_int_call_1
,
2239 amd64_emit_void_call_2
,
2248 #endif /* __x86_64__ */
2251 i386_emit_prologue (void)
2253 EMIT_ASM32 (i386_prologue
,
2257 /* At this point, the raw regs base address is at 8(%ebp), and the
2258 value pointer is at 12(%ebp). */
2262 i386_emit_epilogue (void)
2264 EMIT_ASM32 (i386_epilogue
,
2265 "mov 12(%ebp),%ecx\n\t"
2266 "mov %eax,(%ecx)\n\t"
2267 "mov %ebx,0x4(%ecx)\n\t"
2275 i386_emit_add (void)
2277 EMIT_ASM32 (i386_add
,
2278 "add (%esp),%eax\n\t"
2279 "adc 0x4(%esp),%ebx\n\t"
2280 "lea 0x8(%esp),%esp");
2284 i386_emit_sub (void)
2286 EMIT_ASM32 (i386_sub
,
2287 "subl %eax,(%esp)\n\t"
2288 "sbbl %ebx,4(%esp)\n\t"
2294 i386_emit_mul (void)
2300 i386_emit_lsh (void)
2306 i386_emit_rsh_signed (void)
2312 i386_emit_rsh_unsigned (void)
2318 i386_emit_ext (int arg
)
2323 EMIT_ASM32 (i386_ext_8
,
2326 "movl %eax,%ebx\n\t"
2330 EMIT_ASM32 (i386_ext_16
,
2332 "movl %eax,%ebx\n\t"
2336 EMIT_ASM32 (i386_ext_32
,
2337 "movl %eax,%ebx\n\t"
2346 i386_emit_log_not (void)
2348 EMIT_ASM32 (i386_log_not
,
2350 "test %eax,%eax\n\t"
2357 i386_emit_bit_and (void)
2359 EMIT_ASM32 (i386_and
,
2360 "and (%esp),%eax\n\t"
2361 "and 0x4(%esp),%ebx\n\t"
2362 "lea 0x8(%esp),%esp");
2366 i386_emit_bit_or (void)
2368 EMIT_ASM32 (i386_or
,
2369 "or (%esp),%eax\n\t"
2370 "or 0x4(%esp),%ebx\n\t"
2371 "lea 0x8(%esp),%esp");
2375 i386_emit_bit_xor (void)
2377 EMIT_ASM32 (i386_xor
,
2378 "xor (%esp),%eax\n\t"
2379 "xor 0x4(%esp),%ebx\n\t"
2380 "lea 0x8(%esp),%esp");
2384 i386_emit_bit_not (void)
2386 EMIT_ASM32 (i386_bit_not
,
2387 "xor $0xffffffff,%eax\n\t"
2388 "xor $0xffffffff,%ebx\n\t");
2392 i386_emit_equal (void)
2394 EMIT_ASM32 (i386_equal
,
2395 "cmpl %ebx,4(%esp)\n\t"
2396 "jne .Li386_equal_false\n\t"
2397 "cmpl %eax,(%esp)\n\t"
2398 "je .Li386_equal_true\n\t"
2399 ".Li386_equal_false:\n\t"
2401 "jmp .Li386_equal_end\n\t"
2402 ".Li386_equal_true:\n\t"
2404 ".Li386_equal_end:\n\t"
2406 "lea 0x8(%esp),%esp");
2410 i386_emit_less_signed (void)
2412 EMIT_ASM32 (i386_less_signed
,
2413 "cmpl %ebx,4(%esp)\n\t"
2414 "jl .Li386_less_signed_true\n\t"
2415 "jne .Li386_less_signed_false\n\t"
2416 "cmpl %eax,(%esp)\n\t"
2417 "jl .Li386_less_signed_true\n\t"
2418 ".Li386_less_signed_false:\n\t"
2420 "jmp .Li386_less_signed_end\n\t"
2421 ".Li386_less_signed_true:\n\t"
2423 ".Li386_less_signed_end:\n\t"
2425 "lea 0x8(%esp),%esp");
2429 i386_emit_less_unsigned (void)
2431 EMIT_ASM32 (i386_less_unsigned
,
2432 "cmpl %ebx,4(%esp)\n\t"
2433 "jb .Li386_less_unsigned_true\n\t"
2434 "jne .Li386_less_unsigned_false\n\t"
2435 "cmpl %eax,(%esp)\n\t"
2436 "jb .Li386_less_unsigned_true\n\t"
2437 ".Li386_less_unsigned_false:\n\t"
2439 "jmp .Li386_less_unsigned_end\n\t"
2440 ".Li386_less_unsigned_true:\n\t"
2442 ".Li386_less_unsigned_end:\n\t"
2444 "lea 0x8(%esp),%esp");
2448 i386_emit_ref (int size
)
2453 EMIT_ASM32 (i386_ref1
,
2457 EMIT_ASM32 (i386_ref2
,
2461 EMIT_ASM32 (i386_ref4
,
2462 "movl (%eax),%eax");
2465 EMIT_ASM32 (i386_ref8
,
2466 "movl 4(%eax),%ebx\n\t"
2467 "movl (%eax),%eax");
2473 i386_emit_if_goto (int *offset_p
, int *size_p
)
2475 EMIT_ASM32 (i386_if_goto
,
2481 /* Don't trust the assembler to choose the right jump */
2482 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2485 *offset_p
= 11; /* be sure that this matches the sequence above */
2491 i386_emit_goto (int *offset_p
, int *size_p
)
2493 EMIT_ASM32 (i386_goto
,
2494 /* Don't trust the assembler to choose the right jump */
2495 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2503 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2505 int diff
= (to
- (from
+ size
));
2506 unsigned char buf
[sizeof (int)];
2508 /* We're only doing 4-byte sizes at the moment. */
2515 memcpy (buf
, &diff
, sizeof (int));
2516 target_write_memory (from
, buf
, sizeof (int));
2520 i386_emit_const (LONGEST num
)
2522 unsigned char buf
[16];
2524 CORE_ADDR buildaddr
= current_insn_ptr
;
2527 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2528 lo
= num
& 0xffffffff;
2529 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2531 hi
= ((num
>> 32) & 0xffffffff);
2534 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2535 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2540 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2542 append_insns (&buildaddr
, i
, buf
);
2543 current_insn_ptr
= buildaddr
;
2547 i386_emit_call (CORE_ADDR fn
)
2549 unsigned char buf
[16];
2551 CORE_ADDR buildaddr
;
2553 buildaddr
= current_insn_ptr
;
2555 buf
[i
++] = 0xe8; /* call <reladdr> */
2556 offset
= ((int) fn
) - (buildaddr
+ 5);
2557 memcpy (buf
+ 1, &offset
, 4);
2558 append_insns (&buildaddr
, 5, buf
);
2559 current_insn_ptr
= buildaddr
;
2563 i386_emit_reg (int reg
)
2565 unsigned char buf
[16];
2567 CORE_ADDR buildaddr
;
2569 EMIT_ASM32 (i386_reg_a
,
2571 buildaddr
= current_insn_ptr
;
2573 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2574 memcpy (&buf
[i
], ®
, sizeof (reg
));
2576 append_insns (&buildaddr
, i
, buf
);
2577 current_insn_ptr
= buildaddr
;
2578 EMIT_ASM32 (i386_reg_b
,
2579 "mov %eax,4(%esp)\n\t"
2580 "mov 8(%ebp),%eax\n\t"
2582 i386_emit_call (get_raw_reg_func_addr ());
2583 EMIT_ASM32 (i386_reg_c
,
2585 "lea 0x8(%esp),%esp");
2589 i386_emit_pop (void)
2591 EMIT_ASM32 (i386_pop
,
2597 i386_emit_stack_flush (void)
2599 EMIT_ASM32 (i386_stack_flush
,
2605 i386_emit_zero_ext (int arg
)
2610 EMIT_ASM32 (i386_zero_ext_8
,
2611 "and $0xff,%eax\n\t"
2615 EMIT_ASM32 (i386_zero_ext_16
,
2616 "and $0xffff,%eax\n\t"
2620 EMIT_ASM32 (i386_zero_ext_32
,
2629 i386_emit_swap (void)
2631 EMIT_ASM32 (i386_swap
,
2641 i386_emit_stack_adjust (int n
)
2643 unsigned char buf
[16];
2645 CORE_ADDR buildaddr
= current_insn_ptr
;
2648 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2652 append_insns (&buildaddr
, i
, buf
);
2653 current_insn_ptr
= buildaddr
;
2656 /* FN's prototype is `LONGEST(*fn)(int)'. */
2659 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2661 unsigned char buf
[16];
2663 CORE_ADDR buildaddr
;
2665 EMIT_ASM32 (i386_int_call_1_a
,
2666 /* Reserve a bit of stack space. */
2668 /* Put the one argument on the stack. */
2669 buildaddr
= current_insn_ptr
;
2671 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2674 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2676 append_insns (&buildaddr
, i
, buf
);
2677 current_insn_ptr
= buildaddr
;
2678 i386_emit_call (fn
);
2679 EMIT_ASM32 (i386_int_call_1_c
,
2681 "lea 0x8(%esp),%esp");
2684 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2687 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2689 unsigned char buf
[16];
2691 CORE_ADDR buildaddr
;
2693 EMIT_ASM32 (i386_void_call_2_a
,
2694 /* Preserve %eax only; we don't have to worry about %ebx. */
2696 /* Reserve a bit of stack space for arguments. */
2697 "sub $0x10,%esp\n\t"
2698 /* Copy "top" to the second argument position. (Note that
2699 we can't assume function won't scribble on its
2700 arguments, so don't try to restore from this.) */
2701 "mov %eax,4(%esp)\n\t"
2702 "mov %ebx,8(%esp)");
2703 /* Put the first argument on the stack. */
2704 buildaddr
= current_insn_ptr
;
2706 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2709 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2711 append_insns (&buildaddr
, i
, buf
);
2712 current_insn_ptr
= buildaddr
;
2713 i386_emit_call (fn
);
2714 EMIT_ASM32 (i386_void_call_2_b
,
2715 "lea 0x10(%esp),%esp\n\t"
2716 /* Restore original stack top. */
2722 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2725 /* Check low half first, more likely to be decider */
2726 "cmpl %eax,(%esp)\n\t"
2727 "jne .Leq_fallthru\n\t"
2728 "cmpl %ebx,4(%esp)\n\t"
2729 "jne .Leq_fallthru\n\t"
2730 "lea 0x8(%esp),%esp\n\t"
2733 /* jmp, but don't trust the assembler to choose the right jump */
2734 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2735 ".Leq_fallthru:\n\t"
2736 "lea 0x8(%esp),%esp\n\t"
2747 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2750 /* Check low half first, more likely to be decider */
2751 "cmpl %eax,(%esp)\n\t"
2753 "cmpl %ebx,4(%esp)\n\t"
2754 "je .Lne_fallthru\n\t"
2756 "lea 0x8(%esp),%esp\n\t"
2759 /* jmp, but don't trust the assembler to choose the right jump */
2760 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2761 ".Lne_fallthru:\n\t"
2762 "lea 0x8(%esp),%esp\n\t"
2773 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2776 "cmpl %ebx,4(%esp)\n\t"
2778 "jne .Llt_fallthru\n\t"
2779 "cmpl %eax,(%esp)\n\t"
2780 "jnl .Llt_fallthru\n\t"
2782 "lea 0x8(%esp),%esp\n\t"
2785 /* jmp, but don't trust the assembler to choose the right jump */
2786 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2787 ".Llt_fallthru:\n\t"
2788 "lea 0x8(%esp),%esp\n\t"
2799 i386_emit_le_goto (int *offset_p
, int *size_p
)
2802 "cmpl %ebx,4(%esp)\n\t"
2804 "jne .Lle_fallthru\n\t"
2805 "cmpl %eax,(%esp)\n\t"
2806 "jnle .Lle_fallthru\n\t"
2808 "lea 0x8(%esp),%esp\n\t"
2811 /* jmp, but don't trust the assembler to choose the right jump */
2812 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2813 ".Lle_fallthru:\n\t"
2814 "lea 0x8(%esp),%esp\n\t"
2825 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2828 "cmpl %ebx,4(%esp)\n\t"
2830 "jne .Lgt_fallthru\n\t"
2831 "cmpl %eax,(%esp)\n\t"
2832 "jng .Lgt_fallthru\n\t"
2834 "lea 0x8(%esp),%esp\n\t"
2837 /* jmp, but don't trust the assembler to choose the right jump */
2838 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2839 ".Lgt_fallthru:\n\t"
2840 "lea 0x8(%esp),%esp\n\t"
2851 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2854 "cmpl %ebx,4(%esp)\n\t"
2856 "jne .Lge_fallthru\n\t"
2857 "cmpl %eax,(%esp)\n\t"
2858 "jnge .Lge_fallthru\n\t"
2860 "lea 0x8(%esp),%esp\n\t"
2863 /* jmp, but don't trust the assembler to choose the right jump */
2864 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2865 ".Lge_fallthru:\n\t"
2866 "lea 0x8(%esp),%esp\n\t"
2876 struct emit_ops i386_emit_ops
=
2884 i386_emit_rsh_signed
,
2885 i386_emit_rsh_unsigned
,
2893 i386_emit_less_signed
,
2894 i386_emit_less_unsigned
,
2898 i386_write_goto_address
,
2903 i386_emit_stack_flush
,
2906 i386_emit_stack_adjust
,
2907 i386_emit_int_call_1
,
2908 i386_emit_void_call_2
,
2918 static struct emit_ops
*
2922 if (is_64bit_tdesc ())
2923 return &amd64_emit_ops
;
2926 return &i386_emit_ops
;
2929 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2932 x86_target::sw_breakpoint_from_kind (int kind
, int *size
)
2934 *size
= x86_breakpoint_len
;
2935 return x86_breakpoint
;
2939 x86_supports_range_stepping (void)
2944 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2948 x86_supports_hardware_single_step (void)
2954 x86_get_ipa_tdesc_idx (void)
2956 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
2957 const struct target_desc
*tdesc
= regcache
->tdesc
;
2960 return amd64_get_ipa_tdesc_idx (tdesc
);
2963 if (tdesc
== tdesc_i386_linux_no_xml
)
2964 return X86_TDESC_SSE
;
2966 return i386_get_ipa_tdesc_idx (tdesc
);
2969 /* This is initialized assuming an amd64 target.
2970 x86_arch_setup will correct it for i386 or amd64 targets. */
2972 struct linux_target_ops the_low_target
=
2974 x86_install_fast_tracepoint_jump_pad
,
2976 x86_get_min_fast_tracepoint_insn_len
,
2977 x86_supports_range_stepping
,
2978 x86_supports_hardware_single_step
,
2979 x86_get_syscall_trapinfo
,
2980 x86_get_ipa_tdesc_idx
,
2983 /* The linux target ops object. */
2985 linux_process_target
*the_linux_target
= &the_x86_target
;
2988 initialize_low_arch (void)
2990 /* Initialize the Linux target descriptions. */
2992 tdesc_amd64_linux_no_xml
= allocate_target_description ();
2993 copy_target_description (tdesc_amd64_linux_no_xml
,
2994 amd64_linux_read_description (X86_XSTATE_SSE_MASK
,
2996 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
2999 tdesc_i386_linux_no_xml
= allocate_target_description ();
3000 copy_target_description (tdesc_i386_linux_no_xml
,
3001 i386_linux_read_description (X86_XSTATE_SSE_MASK
));
3002 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3004 initialize_regsets_info (&x86_regsets_info
);