1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
31 #include "nat/amd64-linux-siginfo.h"
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
38 #include "elf/common.h"
41 #include "gdbsupport/agent.h"
43 #include "tracepoint.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
51 static struct target_desc
*tdesc_amd64_linux_no_xml
;
53 static struct target_desc
*tdesc_i386_linux_no_xml
;
56 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
59 /* Backward compatibility for gdb without XML support. */
61 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
67 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
74 #include <sys/procfs.h>
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
99 class x86_target
: public linux_process_target
103 const regs_info
*get_regs_info () override
;
105 const gdb_byte
*sw_breakpoint_from_kind (int kind
, int *size
) override
;
107 bool supports_z_point_type (char z_type
) override
;
109 void process_qsupported (char **features
, int count
) override
;
111 bool supports_tracepoints () override
;
113 bool supports_fast_tracepoints () override
;
115 int install_fast_tracepoint_jump_pad
116 (CORE_ADDR tpoint
, CORE_ADDR tpaddr
, CORE_ADDR collector
,
117 CORE_ADDR lockaddr
, ULONGEST orig_size
, CORE_ADDR
*jump_entry
,
118 CORE_ADDR
*trampoline
, ULONGEST
*trampoline_size
,
119 unsigned char *jjump_pad_insn
, ULONGEST
*jjump_pad_insn_size
,
120 CORE_ADDR
*adjusted_insn_addr
, CORE_ADDR
*adjusted_insn_addr_end
,
123 int get_min_fast_tracepoint_insn_len () override
;
127 void low_arch_setup () override
;
129 bool low_cannot_fetch_register (int regno
) override
;
131 bool low_cannot_store_register (int regno
) override
;
133 bool low_supports_breakpoints () override
;
135 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
137 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
139 int low_decr_pc_after_break () override
;
141 bool low_breakpoint_at (CORE_ADDR pc
) override
;
143 int low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
144 int size
, raw_breakpoint
*bp
) override
;
146 int low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
147 int size
, raw_breakpoint
*bp
) override
;
149 bool low_stopped_by_watchpoint () override
;
151 CORE_ADDR
low_stopped_data_address () override
;
153 /* collect_ptrace_register/supply_ptrace_register are not needed in the
154 native i386 case (no registers smaller than an xfer unit), and are not
155 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
157 /* Need to fix up i386 siginfo if host is amd64. */
158 bool low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
159 int direction
) override
;
161 arch_process_info
*low_new_process () override
;
163 void low_delete_process (arch_process_info
*info
) override
;
165 void low_new_thread (lwp_info
*) override
;
167 void low_delete_thread (arch_lwp_info
*) override
;
169 void low_new_fork (process_info
*parent
, process_info
*child
) override
;
171 void low_prepare_to_resume (lwp_info
*lwp
) override
;
173 int low_get_thread_area (int lwpid
, CORE_ADDR
*addrp
) override
;
177 /* Update all the target description of all processes; a new GDB
178 connected, and it may or not support xml target descriptions. */
179 void update_xmltarget ();
182 /* The singleton target ops object. */
184 static x86_target the_x86_target
;
186 /* Per-process arch-specific data we want to keep. */
188 struct arch_process_info
190 struct x86_debug_reg_state debug_reg_state
;
195 /* Mapping between the general-purpose registers in `struct user'
196 format and GDB's register array layout.
197 Note that the transfer layout uses 64-bit regs. */
198 static /*const*/ int i386_regmap
[] =
200 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
201 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
202 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
203 DS
* 8, ES
* 8, FS
* 8, GS
* 8
206 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
208 /* So code below doesn't have to care, i386 or amd64. */
209 #define ORIG_EAX ORIG_RAX
212 static const int x86_64_regmap
[] =
214 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
215 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
216 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
217 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
218 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
219 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
220 -1, -1, -1, -1, -1, -1, -1, -1,
221 -1, -1, -1, -1, -1, -1, -1, -1,
222 -1, -1, -1, -1, -1, -1, -1, -1,
224 -1, -1, -1, -1, -1, -1, -1, -1,
226 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
231 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
232 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
233 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
234 -1, -1, -1, -1, -1, -1, -1, -1,
235 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
236 -1, -1, -1, -1, -1, -1, -1, -1,
237 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
238 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
239 -1, -1, -1, -1, -1, -1, -1, -1,
240 -1, -1, -1, -1, -1, -1, -1, -1,
241 -1, -1, -1, -1, -1, -1, -1, -1,
245 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
246 #define X86_64_USER_REGS (GS + 1)
248 #else /* ! __x86_64__ */
250 /* Mapping between the general-purpose registers in `struct user'
251 format and GDB's register array layout. */
252 static /*const*/ int i386_regmap
[] =
254 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
255 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
256 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
257 DS
* 4, ES
* 4, FS
* 4, GS
* 4
260 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
268 /* Returns true if the current inferior belongs to a x86-64 process,
272 is_64bit_tdesc (void)
274 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
276 return register_size (regcache
->tdesc
, 0) == 8;
282 /* Called by libthread_db. */
285 ps_get_thread_area (struct ps_prochandle
*ph
,
286 lwpid_t lwpid
, int idx
, void **base
)
289 int use_64bit
= is_64bit_tdesc ();
296 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
300 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
311 unsigned int desc
[4];
313 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
314 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
317 /* Ensure we properly extend the value to 64-bits for x86_64. */
318 *base
= (void *) (uintptr_t) desc
[1];
323 /* Get the thread area address. This is used to recognize which
324 thread is which when tracing with the in-process agent library. We
325 don't read anything from the address, and treat it as opaque; it's
326 the address itself that we assume is unique per-thread. */
329 x86_target::low_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
332 int use_64bit
= is_64bit_tdesc ();
337 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
339 *addr
= (CORE_ADDR
) (uintptr_t) base
;
348 struct lwp_info
*lwp
= find_lwp_pid (ptid_t (lwpid
));
349 struct thread_info
*thr
= get_lwp_thread (lwp
);
350 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
351 unsigned int desc
[4];
353 const int reg_thread_area
= 3; /* bits to scale down register value. */
356 collect_register_by_name (regcache
, "gs", &gs
);
358 idx
= gs
>> reg_thread_area
;
360 if (ptrace (PTRACE_GET_THREAD_AREA
,
362 (void *) (long) idx
, (unsigned long) &desc
) < 0)
373 x86_target::low_cannot_store_register (int regno
)
376 if (is_64bit_tdesc ())
380 return regno
>= I386_NUM_REGS
;
384 x86_target::low_cannot_fetch_register (int regno
)
387 if (is_64bit_tdesc ())
391 return regno
>= I386_NUM_REGS
;
395 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
400 if (register_size (regcache
->tdesc
, 0) == 8)
402 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
403 if (x86_64_regmap
[i
] != -1)
404 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
406 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
409 int lwpid
= lwpid_of (current_thread
);
411 collect_register_by_name (regcache
, "fs_base", &base
);
412 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_FS
);
414 collect_register_by_name (regcache
, "gs_base", &base
);
415 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_GS
);
422 /* 32-bit inferior registers need to be zero-extended.
423 Callers would read uninitialized memory otherwise. */
424 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
427 for (i
= 0; i
< I386_NUM_REGS
; i
++)
428 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
430 collect_register_by_name (regcache
, "orig_eax",
431 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
434 /* Sign extend EAX value to avoid potential syscall restart
437 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
438 for a detailed explanation. */
439 if (register_size (regcache
->tdesc
, 0) == 4)
441 void *ptr
= ((gdb_byte
*) buf
442 + i386_regmap
[find_regno (regcache
->tdesc
, "eax")]);
444 *(int64_t *) ptr
= *(int32_t *) ptr
;
450 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
455 if (register_size (regcache
->tdesc
, 0) == 8)
457 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
458 if (x86_64_regmap
[i
] != -1)
459 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
461 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
464 int lwpid
= lwpid_of (current_thread
);
466 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
467 supply_register_by_name (regcache
, "fs_base", &base
);
469 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_GS
) == 0)
470 supply_register_by_name (regcache
, "gs_base", &base
);
477 for (i
= 0; i
< I386_NUM_REGS
; i
++)
478 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
480 supply_register_by_name (regcache
, "orig_eax",
481 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
485 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
488 i387_cache_to_fxsave (regcache
, buf
);
490 i387_cache_to_fsave (regcache
, buf
);
495 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
498 i387_fxsave_to_cache (regcache
, buf
);
500 i387_fsave_to_cache (regcache
, buf
);
507 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
509 i387_cache_to_fxsave (regcache
, buf
);
513 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
515 i387_fxsave_to_cache (regcache
, buf
);
521 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
523 i387_cache_to_xsave (regcache
, buf
);
527 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
529 i387_xsave_to_cache (regcache
, buf
);
532 /* ??? The non-biarch i386 case stores all the i387 regs twice.
533 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
534 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
535 doesn't work. IWBN to avoid the duplication in the case where it
536 does work. Maybe the arch_setup routine could check whether it works
537 and update the supported regsets accordingly. */
539 static struct regset_info x86_regsets
[] =
541 #ifdef HAVE_PTRACE_GETREGS
542 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
544 x86_fill_gregset
, x86_store_gregset
},
545 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
546 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
548 # ifdef HAVE_PTRACE_GETFPXREGS
549 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
551 x86_fill_fpxregset
, x86_store_fpxregset
},
554 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
556 x86_fill_fpregset
, x86_store_fpregset
},
557 #endif /* HAVE_PTRACE_GETREGS */
562 x86_target::low_supports_breakpoints ()
568 x86_target::low_get_pc (regcache
*regcache
)
570 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
576 collect_register_by_name (regcache
, "rip", &pc
);
577 return (CORE_ADDR
) pc
;
583 collect_register_by_name (regcache
, "eip", &pc
);
584 return (CORE_ADDR
) pc
;
589 x86_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
591 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
597 supply_register_by_name (regcache
, "rip", &newpc
);
603 supply_register_by_name (regcache
, "eip", &newpc
);
608 x86_target::low_decr_pc_after_break ()
614 static const gdb_byte x86_breakpoint
[] = { 0xCC };
615 #define x86_breakpoint_len 1
618 x86_target::low_breakpoint_at (CORE_ADDR pc
)
622 read_memory (pc
, &c
, 1);
629 /* Low-level function vector. */
630 struct x86_dr_low_type x86_dr_low
=
632 x86_linux_dr_set_control
,
633 x86_linux_dr_set_addr
,
634 x86_linux_dr_get_addr
,
635 x86_linux_dr_get_status
,
636 x86_linux_dr_get_control
,
640 /* Breakpoint/Watchpoint support. */
643 x86_target::supports_z_point_type (char z_type
)
649 case Z_PACKET_WRITE_WP
:
650 case Z_PACKET_ACCESS_WP
:
658 x86_target::low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
659 int size
, raw_breakpoint
*bp
)
661 struct process_info
*proc
= current_process ();
665 case raw_bkpt_type_hw
:
666 case raw_bkpt_type_write_wp
:
667 case raw_bkpt_type_access_wp
:
669 enum target_hw_bp_type hw_type
670 = raw_bkpt_type_to_target_hw_bp_type (type
);
671 struct x86_debug_reg_state
*state
672 = &proc
->priv
->arch_private
->debug_reg_state
;
674 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
684 x86_target::low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
685 int size
, raw_breakpoint
*bp
)
687 struct process_info
*proc
= current_process ();
691 case raw_bkpt_type_hw
:
692 case raw_bkpt_type_write_wp
:
693 case raw_bkpt_type_access_wp
:
695 enum target_hw_bp_type hw_type
696 = raw_bkpt_type_to_target_hw_bp_type (type
);
697 struct x86_debug_reg_state
*state
698 = &proc
->priv
->arch_private
->debug_reg_state
;
700 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
709 x86_target::low_stopped_by_watchpoint ()
711 struct process_info
*proc
= current_process ();
712 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
716 x86_target::low_stopped_data_address ()
718 struct process_info
*proc
= current_process ();
720 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
726 /* Called when a new process is created. */
729 x86_target::low_new_process ()
731 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
733 x86_low_init_dregs (&info
->debug_reg_state
);
738 /* Called when a process is being deleted. */
741 x86_target::low_delete_process (arch_process_info
*info
)
747 x86_target::low_new_thread (lwp_info
*lwp
)
749 /* This comes from nat/. */
750 x86_linux_new_thread (lwp
);
754 x86_target::low_delete_thread (arch_lwp_info
*alwp
)
756 /* This comes from nat/. */
757 x86_linux_delete_thread (alwp
);
760 /* Target routine for new_fork. */
763 x86_target::low_new_fork (process_info
*parent
, process_info
*child
)
765 /* These are allocated by linux_add_process. */
766 gdb_assert (parent
->priv
!= NULL
767 && parent
->priv
->arch_private
!= NULL
);
768 gdb_assert (child
->priv
!= NULL
769 && child
->priv
->arch_private
!= NULL
);
771 /* Linux kernel before 2.6.33 commit
772 72f674d203cd230426437cdcf7dd6f681dad8b0d
773 will inherit hardware debug registers from parent
774 on fork/vfork/clone. Newer Linux kernels create such tasks with
775 zeroed debug registers.
777 GDB core assumes the child inherits the watchpoints/hw
778 breakpoints of the parent, and will remove them all from the
779 forked off process. Copy the debug registers mirrors into the
780 new process so that all breakpoints and watchpoints can be
781 removed together. The debug registers mirror will become zeroed
782 in the end before detaching the forked off process, thus making
783 this compatible with older Linux kernels too. */
785 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
789 x86_target::low_prepare_to_resume (lwp_info
*lwp
)
791 /* This comes from nat/. */
792 x86_linux_prepare_to_resume (lwp
);
795 /* See nat/x86-dregs.h. */
797 struct x86_debug_reg_state
*
798 x86_debug_reg_state (pid_t pid
)
800 struct process_info
*proc
= find_process_pid (pid
);
802 return &proc
->priv
->arch_private
->debug_reg_state
;
805 /* When GDBSERVER is built as a 64-bit application on linux, the
806 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
807 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
808 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
809 conversion in-place ourselves. */
811 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
812 layout of the inferiors' architecture. Returns true if any
813 conversion was done; false otherwise. If DIRECTION is 1, then copy
814 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
818 x86_target::low_siginfo_fixup (siginfo_t
*ptrace
, gdb_byte
*inf
, int direction
)
821 unsigned int machine
;
822 int tid
= lwpid_of (current_thread
);
823 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
825 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
826 if (!is_64bit_tdesc ())
827 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
829 /* No fixup for native x32 GDB. */
830 else if (!is_elf64
&& sizeof (void *) == 8)
831 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
840 /* Format of XSAVE extended state is:
844 sw_usable_bytes[464..511]
845 xstate_hdr_bytes[512..575]
850 Same memory layout will be used for the coredump NT_X86_XSTATE
851 representing the XSAVE extended state registers.
853 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
854 extended state mask, which is the same as the extended control register
855 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
856 together with the mask saved in the xstate_hdr_bytes to determine what
857 states the processor/OS supports and what state, used or initialized,
858 the process/thread is in. */
859 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
861 /* Does the current host support the GETFPXREGS request? The header
862 file may or may not define it, and even if it is defined, the
863 kernel will return EIO if it's running on a pre-SSE processor. */
864 int have_ptrace_getfpxregs
=
865 #ifdef HAVE_PTRACE_GETFPXREGS
872 /* Get Linux/x86 target description from running target. */
874 static const struct target_desc
*
875 x86_linux_read_description (void)
877 unsigned int machine
;
881 static uint64_t xcr0
;
882 struct regset_info
*regset
;
884 tid
= lwpid_of (current_thread
);
886 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
888 if (sizeof (void *) == 4)
891 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
893 else if (machine
== EM_X86_64
)
894 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
898 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
899 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
901 elf_fpxregset_t fpxregs
;
903 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
905 have_ptrace_getfpxregs
= 0;
906 have_ptrace_getregset
= 0;
907 return i386_linux_read_description (X86_XSTATE_X87
);
910 have_ptrace_getfpxregs
= 1;
916 x86_xcr0
= X86_XSTATE_SSE_MASK
;
920 if (machine
== EM_X86_64
)
921 return tdesc_amd64_linux_no_xml
;
924 return tdesc_i386_linux_no_xml
;
927 if (have_ptrace_getregset
== -1)
929 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
932 iov
.iov_base
= xstateregs
;
933 iov
.iov_len
= sizeof (xstateregs
);
935 /* Check if PTRACE_GETREGSET works. */
936 if (ptrace (PTRACE_GETREGSET
, tid
,
937 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
938 have_ptrace_getregset
= 0;
941 have_ptrace_getregset
= 1;
943 /* Get XCR0 from XSAVE extended state. */
944 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
945 / sizeof (uint64_t))];
947 /* Use PTRACE_GETREGSET if it is available. */
948 for (regset
= x86_regsets
;
949 regset
->fill_function
!= NULL
; regset
++)
950 if (regset
->get_request
== PTRACE_GETREGSET
)
951 regset
->size
= X86_XSTATE_SIZE (xcr0
);
952 else if (regset
->type
!= GENERAL_REGS
)
957 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
958 xcr0_features
= (have_ptrace_getregset
959 && (xcr0
& X86_XSTATE_ALL_MASK
));
964 if (machine
== EM_X86_64
)
967 const target_desc
*tdesc
= NULL
;
971 tdesc
= amd64_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
,
976 tdesc
= amd64_linux_read_description (X86_XSTATE_SSE_MASK
, !is_elf64
);
982 const target_desc
*tdesc
= NULL
;
985 tdesc
= i386_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
);
988 tdesc
= i386_linux_read_description (X86_XSTATE_SSE
);
993 gdb_assert_not_reached ("failed to return tdesc");
996 /* Update all the target description of all processes; a new GDB
997 connected, and it may or not support xml target descriptions. */
1000 x86_target::update_xmltarget ()
1002 struct thread_info
*saved_thread
= current_thread
;
1004 /* Before changing the register cache's internal layout, flush the
1005 contents of the current valid caches back to the threads, and
1006 release the current regcache objects. */
1007 regcache_release ();
1009 for_each_process ([this] (process_info
*proc
) {
1010 int pid
= proc
->pid
;
1012 /* Look up any thread of this process. */
1013 current_thread
= find_any_thread_of_pid (pid
);
1018 current_thread
= saved_thread
;
1021 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1022 PTRACE_GETREGSET. */
1025 x86_target::process_qsupported (char **features
, int count
)
1029 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1030 with "i386" in qSupported query, it supports x86 XML target
1033 for (i
= 0; i
< count
; i
++)
1035 const char *feature
= features
[i
];
1037 if (startswith (feature
, "xmlRegisters="))
1039 char *copy
= xstrdup (feature
+ 13);
1042 for (char *p
= strtok_r (copy
, ",", &saveptr
);
1044 p
= strtok_r (NULL
, ",", &saveptr
))
1046 if (strcmp (p
, "i386") == 0)
1056 update_xmltarget ();
1059 /* Common for x86/x86-64. */
1061 static struct regsets_info x86_regsets_info
=
1063 x86_regsets
, /* regsets */
1064 0, /* num_regsets */
1065 NULL
, /* disabled_regsets */
1069 static struct regs_info amd64_linux_regs_info
=
1071 NULL
, /* regset_bitmap */
1072 NULL
, /* usrregs_info */
1076 static struct usrregs_info i386_linux_usrregs_info
=
1082 static struct regs_info i386_linux_regs_info
=
1084 NULL
, /* regset_bitmap */
1085 &i386_linux_usrregs_info
,
1090 x86_target::get_regs_info ()
1093 if (is_64bit_tdesc ())
1094 return &amd64_linux_regs_info
;
1097 return &i386_linux_regs_info
;
1100 /* Initialize the target description for the architecture of the
1104 x86_target::low_arch_setup ()
1106 current_process ()->tdesc
= x86_linux_read_description ();
1109 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1110 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1113 x86_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
1115 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
1121 collect_register_by_name (regcache
, "orig_rax", &l_sysno
);
1122 *sysno
= (int) l_sysno
;
1125 collect_register_by_name (regcache
, "orig_eax", sysno
);
1129 x86_target::supports_tracepoints ()
1135 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1137 target_write_memory (*to
, buf
, len
);
1142 push_opcode (unsigned char *buf
, const char *op
)
1144 unsigned char *buf_org
= buf
;
1149 unsigned long ul
= strtoul (op
, &endptr
, 16);
1158 return buf
- buf_org
;
1163 /* Build a jump pad that saves registers and calls a collection
1164 function. Writes a jump instruction to the jump pad to
1165 JJUMPAD_INSN. The caller is responsible to write it in at the
1166 tracepoint address. */
1169 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1170 CORE_ADDR collector
,
1173 CORE_ADDR
*jump_entry
,
1174 CORE_ADDR
*trampoline
,
1175 ULONGEST
*trampoline_size
,
1176 unsigned char *jjump_pad_insn
,
1177 ULONGEST
*jjump_pad_insn_size
,
1178 CORE_ADDR
*adjusted_insn_addr
,
1179 CORE_ADDR
*adjusted_insn_addr_end
,
1182 unsigned char buf
[40];
1186 CORE_ADDR buildaddr
= *jump_entry
;
1188 /* Build the jump pad. */
1190 /* First, do tracepoint data collection. Save registers. */
1192 /* Need to ensure stack pointer saved first. */
1193 buf
[i
++] = 0x54; /* push %rsp */
1194 buf
[i
++] = 0x55; /* push %rbp */
1195 buf
[i
++] = 0x57; /* push %rdi */
1196 buf
[i
++] = 0x56; /* push %rsi */
1197 buf
[i
++] = 0x52; /* push %rdx */
1198 buf
[i
++] = 0x51; /* push %rcx */
1199 buf
[i
++] = 0x53; /* push %rbx */
1200 buf
[i
++] = 0x50; /* push %rax */
1201 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1202 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1203 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1204 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1205 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1206 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1207 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1208 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1209 buf
[i
++] = 0x9c; /* pushfq */
1210 buf
[i
++] = 0x48; /* movabs <addr>,%rdi */
1212 memcpy (buf
+ i
, &tpaddr
, 8);
1214 buf
[i
++] = 0x57; /* push %rdi */
1215 append_insns (&buildaddr
, i
, buf
);
1217 /* Stack space for the collecting_t object. */
1219 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1220 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1221 memcpy (buf
+ i
, &tpoint
, 8);
1223 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1224 i
+= push_opcode (&buf
[i
],
1225 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1226 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1227 append_insns (&buildaddr
, i
, buf
);
1231 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1232 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1234 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1235 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1236 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1237 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1238 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1239 append_insns (&buildaddr
, i
, buf
);
1241 /* Set up the gdb_collect call. */
1242 /* At this point, (stack pointer + 0x18) is the base of our saved
1246 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1247 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1249 /* tpoint address may be 64-bit wide. */
1250 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1251 memcpy (buf
+ i
, &tpoint
, 8);
1253 append_insns (&buildaddr
, i
, buf
);
1255 /* The collector function being in the shared library, may be
1256 >31-bits away off the jump pad. */
1258 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1259 memcpy (buf
+ i
, &collector
, 8);
1261 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1262 append_insns (&buildaddr
, i
, buf
);
1264 /* Clear the spin-lock. */
1266 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1267 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1268 memcpy (buf
+ i
, &lockaddr
, 8);
1270 append_insns (&buildaddr
, i
, buf
);
1272 /* Remove stack that had been used for the collect_t object. */
1274 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1275 append_insns (&buildaddr
, i
, buf
);
1277 /* Restore register state. */
1279 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1283 buf
[i
++] = 0x9d; /* popfq */
1284 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1285 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1286 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1287 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1288 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1289 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1290 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1291 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1292 buf
[i
++] = 0x58; /* pop %rax */
1293 buf
[i
++] = 0x5b; /* pop %rbx */
1294 buf
[i
++] = 0x59; /* pop %rcx */
1295 buf
[i
++] = 0x5a; /* pop %rdx */
1296 buf
[i
++] = 0x5e; /* pop %rsi */
1297 buf
[i
++] = 0x5f; /* pop %rdi */
1298 buf
[i
++] = 0x5d; /* pop %rbp */
1299 buf
[i
++] = 0x5c; /* pop %rsp */
1300 append_insns (&buildaddr
, i
, buf
);
1302 /* Now, adjust the original instruction to execute in the jump
1304 *adjusted_insn_addr
= buildaddr
;
1305 relocate_instruction (&buildaddr
, tpaddr
);
1306 *adjusted_insn_addr_end
= buildaddr
;
1308 /* Finally, write a jump back to the program. */
1310 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1311 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1314 "E.Jump back from jump pad too far from tracepoint "
1315 "(offset 0x%" PRIx64
" > int32).", loffset
);
1319 offset
= (int) loffset
;
1320 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1321 memcpy (buf
+ 1, &offset
, 4);
1322 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1324 /* The jump pad is now built. Wire in a jump to our jump pad. This
1325 is always done last (by our caller actually), so that we can
1326 install fast tracepoints with threads running. This relies on
1327 the agent's atomic write support. */
1328 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1329 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1332 "E.Jump pad too far from tracepoint "
1333 "(offset 0x%" PRIx64
" > int32).", loffset
);
1337 offset
= (int) loffset
;
1339 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1340 memcpy (buf
+ 1, &offset
, 4);
1341 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1342 *jjump_pad_insn_size
= sizeof (jump_insn
);
1344 /* Return the end address of our pad. */
1345 *jump_entry
= buildaddr
;
1350 #endif /* __x86_64__ */
1352 /* Build a jump pad that saves registers and calls a collection
1353 function. Writes a jump instruction to the jump pad to
1354 JJUMPAD_INSN. The caller is responsible to write it in at the
1355 tracepoint address. */
1358 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1359 CORE_ADDR collector
,
1362 CORE_ADDR
*jump_entry
,
1363 CORE_ADDR
*trampoline
,
1364 ULONGEST
*trampoline_size
,
1365 unsigned char *jjump_pad_insn
,
1366 ULONGEST
*jjump_pad_insn_size
,
1367 CORE_ADDR
*adjusted_insn_addr
,
1368 CORE_ADDR
*adjusted_insn_addr_end
,
1371 unsigned char buf
[0x100];
1373 CORE_ADDR buildaddr
= *jump_entry
;
1375 /* Build the jump pad. */
1377 /* First, do tracepoint data collection. Save registers. */
1379 buf
[i
++] = 0x60; /* pushad */
1380 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1381 *((int *)(buf
+ i
)) = (int) tpaddr
;
1383 buf
[i
++] = 0x9c; /* pushf */
1384 buf
[i
++] = 0x1e; /* push %ds */
1385 buf
[i
++] = 0x06; /* push %es */
1386 buf
[i
++] = 0x0f; /* push %fs */
1388 buf
[i
++] = 0x0f; /* push %gs */
1390 buf
[i
++] = 0x16; /* push %ss */
1391 buf
[i
++] = 0x0e; /* push %cs */
1392 append_insns (&buildaddr
, i
, buf
);
1394 /* Stack space for the collecting_t object. */
1396 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1398 /* Build the object. */
1399 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1400 memcpy (buf
+ i
, &tpoint
, 4);
1402 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1404 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1405 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1406 append_insns (&buildaddr
, i
, buf
);
1408 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1409 If we cared for it, this could be using xchg alternatively. */
1412 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1413 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1415 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1417 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1418 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1419 append_insns (&buildaddr
, i
, buf
);
1422 /* Set up arguments to the gdb_collect call. */
1424 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1425 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1426 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1427 append_insns (&buildaddr
, i
, buf
);
1430 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1431 append_insns (&buildaddr
, i
, buf
);
1434 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1435 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1437 append_insns (&buildaddr
, i
, buf
);
1439 buf
[0] = 0xe8; /* call <reladdr> */
1440 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1441 memcpy (buf
+ 1, &offset
, 4);
1442 append_insns (&buildaddr
, 5, buf
);
1443 /* Clean up after the call. */
1444 buf
[0] = 0x83; /* add $0x8,%esp */
1447 append_insns (&buildaddr
, 3, buf
);
1450 /* Clear the spin-lock. This would need the LOCK prefix on older
1453 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1454 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1455 memcpy (buf
+ i
, &lockaddr
, 4);
1457 append_insns (&buildaddr
, i
, buf
);
1460 /* Remove stack that had been used for the collect_t object. */
1462 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1463 append_insns (&buildaddr
, i
, buf
);
1466 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1469 buf
[i
++] = 0x17; /* pop %ss */
1470 buf
[i
++] = 0x0f; /* pop %gs */
1472 buf
[i
++] = 0x0f; /* pop %fs */
1474 buf
[i
++] = 0x07; /* pop %es */
1475 buf
[i
++] = 0x1f; /* pop %ds */
1476 buf
[i
++] = 0x9d; /* popf */
1477 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1480 buf
[i
++] = 0x61; /* popad */
1481 append_insns (&buildaddr
, i
, buf
);
1483 /* Now, adjust the original instruction to execute in the jump
1485 *adjusted_insn_addr
= buildaddr
;
1486 relocate_instruction (&buildaddr
, tpaddr
);
1487 *adjusted_insn_addr_end
= buildaddr
;
1489 /* Write the jump back to the program. */
1490 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1491 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1492 memcpy (buf
+ 1, &offset
, 4);
1493 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1495 /* The jump pad is now built. Wire in a jump to our jump pad. This
1496 is always done last (by our caller actually), so that we can
1497 install fast tracepoints with threads running. This relies on
1498 the agent's atomic write support. */
1501 /* Create a trampoline. */
1502 *trampoline_size
= sizeof (jump_insn
);
1503 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1505 /* No trampoline space available. */
1507 "E.Cannot allocate trampoline space needed for fast "
1508 "tracepoints on 4-byte instructions.");
1512 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1513 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1514 memcpy (buf
+ 1, &offset
, 4);
1515 target_write_memory (*trampoline
, buf
, sizeof (jump_insn
));
1517 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1518 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1519 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1520 memcpy (buf
+ 2, &offset
, 2);
1521 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1522 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1526 /* Else use a 32-bit relative jump instruction. */
1527 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1528 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1529 memcpy (buf
+ 1, &offset
, 4);
1530 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1531 *jjump_pad_insn_size
= sizeof (jump_insn
);
1534 /* Return the end address of our pad. */
1535 *jump_entry
= buildaddr
;
1541 x86_target::supports_fast_tracepoints ()
1547 x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
,
1549 CORE_ADDR collector
,
1552 CORE_ADDR
*jump_entry
,
1553 CORE_ADDR
*trampoline
,
1554 ULONGEST
*trampoline_size
,
1555 unsigned char *jjump_pad_insn
,
1556 ULONGEST
*jjump_pad_insn_size
,
1557 CORE_ADDR
*adjusted_insn_addr
,
1558 CORE_ADDR
*adjusted_insn_addr_end
,
1562 if (is_64bit_tdesc ())
1563 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1564 collector
, lockaddr
,
1565 orig_size
, jump_entry
,
1566 trampoline
, trampoline_size
,
1568 jjump_pad_insn_size
,
1570 adjusted_insn_addr_end
,
1574 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1575 collector
, lockaddr
,
1576 orig_size
, jump_entry
,
1577 trampoline
, trampoline_size
,
1579 jjump_pad_insn_size
,
1581 adjusted_insn_addr_end
,
1585 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1589 x86_target::get_min_fast_tracepoint_insn_len ()
1591 static int warned_about_fast_tracepoints
= 0;
1594 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1595 used for fast tracepoints. */
1596 if (is_64bit_tdesc ())
1600 if (agent_loaded_p ())
1602 char errbuf
[IPA_BUFSIZ
];
1606 /* On x86, if trampolines are available, then 4-byte jump instructions
1607 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1608 with a 4-byte offset are used instead. */
1609 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1613 /* GDB has no channel to explain to user why a shorter fast
1614 tracepoint is not possible, but at least make GDBserver
1615 mention that something has gone awry. */
1616 if (!warned_about_fast_tracepoints
)
1618 warning ("4-byte fast tracepoints not available; %s", errbuf
);
1619 warned_about_fast_tracepoints
= 1;
1626 /* Indicate that the minimum length is currently unknown since the IPA
1627 has not loaded yet. */
1633 add_insns (unsigned char *start
, int len
)
1635 CORE_ADDR buildaddr
= current_insn_ptr
;
1638 debug_printf ("Adding %d bytes of insn at %s\n",
1639 len
, paddress (buildaddr
));
1641 append_insns (&buildaddr
, len
, start
);
1642 current_insn_ptr
= buildaddr
;
1645 /* Our general strategy for emitting code is to avoid specifying raw
1646 bytes whenever possible, and instead copy a block of inline asm
1647 that is embedded in the function. This is a little messy, because
1648 we need to keep the compiler from discarding what looks like dead
1649 code, plus suppress various warnings. */
1651 #define EMIT_ASM(NAME, INSNS) \
1654 extern unsigned char start_ ## NAME, end_ ## NAME; \
1655 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1656 __asm__ ("jmp end_" #NAME "\n" \
1657 "\t" "start_" #NAME ":" \
1659 "\t" "end_" #NAME ":"); \
1664 #define EMIT_ASM32(NAME,INSNS) \
1667 extern unsigned char start_ ## NAME, end_ ## NAME; \
1668 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1669 __asm__ (".code32\n" \
1670 "\t" "jmp end_" #NAME "\n" \
1671 "\t" "start_" #NAME ":\n" \
1673 "\t" "end_" #NAME ":\n" \
1679 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1686 amd64_emit_prologue (void)
1688 EMIT_ASM (amd64_prologue
,
1690 "movq %rsp,%rbp\n\t"
1691 "sub $0x20,%rsp\n\t"
1692 "movq %rdi,-8(%rbp)\n\t"
1693 "movq %rsi,-16(%rbp)");
1698 amd64_emit_epilogue (void)
1700 EMIT_ASM (amd64_epilogue
,
1701 "movq -16(%rbp),%rdi\n\t"
1702 "movq %rax,(%rdi)\n\t"
1709 amd64_emit_add (void)
1711 EMIT_ASM (amd64_add
,
1712 "add (%rsp),%rax\n\t"
1713 "lea 0x8(%rsp),%rsp");
1717 amd64_emit_sub (void)
1719 EMIT_ASM (amd64_sub
,
1720 "sub %rax,(%rsp)\n\t"
1725 amd64_emit_mul (void)
1731 amd64_emit_lsh (void)
1737 amd64_emit_rsh_signed (void)
1743 amd64_emit_rsh_unsigned (void)
1749 amd64_emit_ext (int arg
)
1754 EMIT_ASM (amd64_ext_8
,
1760 EMIT_ASM (amd64_ext_16
,
1765 EMIT_ASM (amd64_ext_32
,
1774 amd64_emit_log_not (void)
1776 EMIT_ASM (amd64_log_not
,
1777 "test %rax,%rax\n\t"
1783 amd64_emit_bit_and (void)
1785 EMIT_ASM (amd64_and
,
1786 "and (%rsp),%rax\n\t"
1787 "lea 0x8(%rsp),%rsp");
1791 amd64_emit_bit_or (void)
1794 "or (%rsp),%rax\n\t"
1795 "lea 0x8(%rsp),%rsp");
1799 amd64_emit_bit_xor (void)
1801 EMIT_ASM (amd64_xor
,
1802 "xor (%rsp),%rax\n\t"
1803 "lea 0x8(%rsp),%rsp");
1807 amd64_emit_bit_not (void)
1809 EMIT_ASM (amd64_bit_not
,
1810 "xorq $0xffffffffffffffff,%rax");
1814 amd64_emit_equal (void)
1816 EMIT_ASM (amd64_equal
,
1817 "cmp %rax,(%rsp)\n\t"
1818 "je .Lamd64_equal_true\n\t"
1820 "jmp .Lamd64_equal_end\n\t"
1821 ".Lamd64_equal_true:\n\t"
1823 ".Lamd64_equal_end:\n\t"
1824 "lea 0x8(%rsp),%rsp");
1828 amd64_emit_less_signed (void)
1830 EMIT_ASM (amd64_less_signed
,
1831 "cmp %rax,(%rsp)\n\t"
1832 "jl .Lamd64_less_signed_true\n\t"
1834 "jmp .Lamd64_less_signed_end\n\t"
1835 ".Lamd64_less_signed_true:\n\t"
1837 ".Lamd64_less_signed_end:\n\t"
1838 "lea 0x8(%rsp),%rsp");
1842 amd64_emit_less_unsigned (void)
1844 EMIT_ASM (amd64_less_unsigned
,
1845 "cmp %rax,(%rsp)\n\t"
1846 "jb .Lamd64_less_unsigned_true\n\t"
1848 "jmp .Lamd64_less_unsigned_end\n\t"
1849 ".Lamd64_less_unsigned_true:\n\t"
1851 ".Lamd64_less_unsigned_end:\n\t"
1852 "lea 0x8(%rsp),%rsp");
1856 amd64_emit_ref (int size
)
1861 EMIT_ASM (amd64_ref1
,
1865 EMIT_ASM (amd64_ref2
,
1869 EMIT_ASM (amd64_ref4
,
1870 "movl (%rax),%eax");
1873 EMIT_ASM (amd64_ref8
,
1874 "movq (%rax),%rax");
1880 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1882 EMIT_ASM (amd64_if_goto
,
1886 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1894 amd64_emit_goto (int *offset_p
, int *size_p
)
1896 EMIT_ASM (amd64_goto
,
1897 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1905 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1907 int diff
= (to
- (from
+ size
));
1908 unsigned char buf
[sizeof (int)];
1916 memcpy (buf
, &diff
, sizeof (int));
1917 target_write_memory (from
, buf
, sizeof (int));
1921 amd64_emit_const (LONGEST num
)
1923 unsigned char buf
[16];
1925 CORE_ADDR buildaddr
= current_insn_ptr
;
1928 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1929 memcpy (&buf
[i
], &num
, sizeof (num
));
1931 append_insns (&buildaddr
, i
, buf
);
1932 current_insn_ptr
= buildaddr
;
1936 amd64_emit_call (CORE_ADDR fn
)
1938 unsigned char buf
[16];
1940 CORE_ADDR buildaddr
;
1943 /* The destination function being in the shared library, may be
1944 >31-bits away off the compiled code pad. */
1946 buildaddr
= current_insn_ptr
;
1948 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1952 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1954 /* Offset is too large for a call. Use callq, but that requires
1955 a register, so avoid it if possible. Use r10, since it is
1956 call-clobbered, we don't have to push/pop it. */
1957 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1959 memcpy (buf
+ i
, &fn
, 8);
1961 buf
[i
++] = 0xff; /* callq *%r10 */
1966 int offset32
= offset64
; /* we know we can't overflow here. */
1968 buf
[i
++] = 0xe8; /* call <reladdr> */
1969 memcpy (buf
+ i
, &offset32
, 4);
1973 append_insns (&buildaddr
, i
, buf
);
1974 current_insn_ptr
= buildaddr
;
1978 amd64_emit_reg (int reg
)
1980 unsigned char buf
[16];
1982 CORE_ADDR buildaddr
;
1984 /* Assume raw_regs is still in %rdi. */
1985 buildaddr
= current_insn_ptr
;
1987 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1988 memcpy (&buf
[i
], ®
, sizeof (reg
));
1990 append_insns (&buildaddr
, i
, buf
);
1991 current_insn_ptr
= buildaddr
;
1992 amd64_emit_call (get_raw_reg_func_addr ());
1996 amd64_emit_pop (void)
1998 EMIT_ASM (amd64_pop
,
2003 amd64_emit_stack_flush (void)
2005 EMIT_ASM (amd64_stack_flush
,
2010 amd64_emit_zero_ext (int arg
)
2015 EMIT_ASM (amd64_zero_ext_8
,
2019 EMIT_ASM (amd64_zero_ext_16
,
2020 "and $0xffff,%rax");
2023 EMIT_ASM (amd64_zero_ext_32
,
2024 "mov $0xffffffff,%rcx\n\t"
2033 amd64_emit_swap (void)
2035 EMIT_ASM (amd64_swap
,
2042 amd64_emit_stack_adjust (int n
)
2044 unsigned char buf
[16];
2046 CORE_ADDR buildaddr
= current_insn_ptr
;
2049 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2053 /* This only handles adjustments up to 16, but we don't expect any more. */
2055 append_insns (&buildaddr
, i
, buf
);
2056 current_insn_ptr
= buildaddr
;
2059 /* FN's prototype is `LONGEST(*fn)(int)'. */
2062 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2064 unsigned char buf
[16];
2066 CORE_ADDR buildaddr
;
2068 buildaddr
= current_insn_ptr
;
2070 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2071 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2073 append_insns (&buildaddr
, i
, buf
);
2074 current_insn_ptr
= buildaddr
;
2075 amd64_emit_call (fn
);
2078 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2081 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2083 unsigned char buf
[16];
2085 CORE_ADDR buildaddr
;
2087 buildaddr
= current_insn_ptr
;
2089 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2090 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2092 append_insns (&buildaddr
, i
, buf
);
2093 current_insn_ptr
= buildaddr
;
2094 EMIT_ASM (amd64_void_call_2_a
,
2095 /* Save away a copy of the stack top. */
2097 /* Also pass top as the second argument. */
2099 amd64_emit_call (fn
);
2100 EMIT_ASM (amd64_void_call_2_b
,
2101 /* Restore the stack top, %rax may have been trashed. */
2106 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2109 "cmp %rax,(%rsp)\n\t"
2110 "jne .Lamd64_eq_fallthru\n\t"
2111 "lea 0x8(%rsp),%rsp\n\t"
2113 /* jmp, but don't trust the assembler to choose the right jump */
2114 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2115 ".Lamd64_eq_fallthru:\n\t"
2116 "lea 0x8(%rsp),%rsp\n\t"
2126 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2129 "cmp %rax,(%rsp)\n\t"
2130 "je .Lamd64_ne_fallthru\n\t"
2131 "lea 0x8(%rsp),%rsp\n\t"
2133 /* jmp, but don't trust the assembler to choose the right jump */
2134 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2135 ".Lamd64_ne_fallthru:\n\t"
2136 "lea 0x8(%rsp),%rsp\n\t"
2146 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2149 "cmp %rax,(%rsp)\n\t"
2150 "jnl .Lamd64_lt_fallthru\n\t"
2151 "lea 0x8(%rsp),%rsp\n\t"
2153 /* jmp, but don't trust the assembler to choose the right jump */
2154 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2155 ".Lamd64_lt_fallthru:\n\t"
2156 "lea 0x8(%rsp),%rsp\n\t"
2166 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2169 "cmp %rax,(%rsp)\n\t"
2170 "jnle .Lamd64_le_fallthru\n\t"
2171 "lea 0x8(%rsp),%rsp\n\t"
2173 /* jmp, but don't trust the assembler to choose the right jump */
2174 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2175 ".Lamd64_le_fallthru:\n\t"
2176 "lea 0x8(%rsp),%rsp\n\t"
2186 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2189 "cmp %rax,(%rsp)\n\t"
2190 "jng .Lamd64_gt_fallthru\n\t"
2191 "lea 0x8(%rsp),%rsp\n\t"
2193 /* jmp, but don't trust the assembler to choose the right jump */
2194 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2195 ".Lamd64_gt_fallthru:\n\t"
2196 "lea 0x8(%rsp),%rsp\n\t"
2206 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2209 "cmp %rax,(%rsp)\n\t"
2210 "jnge .Lamd64_ge_fallthru\n\t"
2211 ".Lamd64_ge_jump:\n\t"
2212 "lea 0x8(%rsp),%rsp\n\t"
2214 /* jmp, but don't trust the assembler to choose the right jump */
2215 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2216 ".Lamd64_ge_fallthru:\n\t"
2217 "lea 0x8(%rsp),%rsp\n\t"
2226 struct emit_ops amd64_emit_ops
=
2228 amd64_emit_prologue
,
2229 amd64_emit_epilogue
,
2234 amd64_emit_rsh_signed
,
2235 amd64_emit_rsh_unsigned
,
2243 amd64_emit_less_signed
,
2244 amd64_emit_less_unsigned
,
2248 amd64_write_goto_address
,
2253 amd64_emit_stack_flush
,
2254 amd64_emit_zero_ext
,
2256 amd64_emit_stack_adjust
,
2257 amd64_emit_int_call_1
,
2258 amd64_emit_void_call_2
,
2267 #endif /* __x86_64__ */
2270 i386_emit_prologue (void)
2272 EMIT_ASM32 (i386_prologue
,
2276 /* At this point, the raw regs base address is at 8(%ebp), and the
2277 value pointer is at 12(%ebp). */
2281 i386_emit_epilogue (void)
2283 EMIT_ASM32 (i386_epilogue
,
2284 "mov 12(%ebp),%ecx\n\t"
2285 "mov %eax,(%ecx)\n\t"
2286 "mov %ebx,0x4(%ecx)\n\t"
2294 i386_emit_add (void)
2296 EMIT_ASM32 (i386_add
,
2297 "add (%esp),%eax\n\t"
2298 "adc 0x4(%esp),%ebx\n\t"
2299 "lea 0x8(%esp),%esp");
2303 i386_emit_sub (void)
2305 EMIT_ASM32 (i386_sub
,
2306 "subl %eax,(%esp)\n\t"
2307 "sbbl %ebx,4(%esp)\n\t"
2313 i386_emit_mul (void)
2319 i386_emit_lsh (void)
2325 i386_emit_rsh_signed (void)
2331 i386_emit_rsh_unsigned (void)
2337 i386_emit_ext (int arg
)
2342 EMIT_ASM32 (i386_ext_8
,
2345 "movl %eax,%ebx\n\t"
2349 EMIT_ASM32 (i386_ext_16
,
2351 "movl %eax,%ebx\n\t"
2355 EMIT_ASM32 (i386_ext_32
,
2356 "movl %eax,%ebx\n\t"
2365 i386_emit_log_not (void)
2367 EMIT_ASM32 (i386_log_not
,
2369 "test %eax,%eax\n\t"
2376 i386_emit_bit_and (void)
2378 EMIT_ASM32 (i386_and
,
2379 "and (%esp),%eax\n\t"
2380 "and 0x4(%esp),%ebx\n\t"
2381 "lea 0x8(%esp),%esp");
2385 i386_emit_bit_or (void)
2387 EMIT_ASM32 (i386_or
,
2388 "or (%esp),%eax\n\t"
2389 "or 0x4(%esp),%ebx\n\t"
2390 "lea 0x8(%esp),%esp");
2394 i386_emit_bit_xor (void)
2396 EMIT_ASM32 (i386_xor
,
2397 "xor (%esp),%eax\n\t"
2398 "xor 0x4(%esp),%ebx\n\t"
2399 "lea 0x8(%esp),%esp");
2403 i386_emit_bit_not (void)
2405 EMIT_ASM32 (i386_bit_not
,
2406 "xor $0xffffffff,%eax\n\t"
2407 "xor $0xffffffff,%ebx\n\t");
2411 i386_emit_equal (void)
2413 EMIT_ASM32 (i386_equal
,
2414 "cmpl %ebx,4(%esp)\n\t"
2415 "jne .Li386_equal_false\n\t"
2416 "cmpl %eax,(%esp)\n\t"
2417 "je .Li386_equal_true\n\t"
2418 ".Li386_equal_false:\n\t"
2420 "jmp .Li386_equal_end\n\t"
2421 ".Li386_equal_true:\n\t"
2423 ".Li386_equal_end:\n\t"
2425 "lea 0x8(%esp),%esp");
2429 i386_emit_less_signed (void)
2431 EMIT_ASM32 (i386_less_signed
,
2432 "cmpl %ebx,4(%esp)\n\t"
2433 "jl .Li386_less_signed_true\n\t"
2434 "jne .Li386_less_signed_false\n\t"
2435 "cmpl %eax,(%esp)\n\t"
2436 "jl .Li386_less_signed_true\n\t"
2437 ".Li386_less_signed_false:\n\t"
2439 "jmp .Li386_less_signed_end\n\t"
2440 ".Li386_less_signed_true:\n\t"
2442 ".Li386_less_signed_end:\n\t"
2444 "lea 0x8(%esp),%esp");
2448 i386_emit_less_unsigned (void)
2450 EMIT_ASM32 (i386_less_unsigned
,
2451 "cmpl %ebx,4(%esp)\n\t"
2452 "jb .Li386_less_unsigned_true\n\t"
2453 "jne .Li386_less_unsigned_false\n\t"
2454 "cmpl %eax,(%esp)\n\t"
2455 "jb .Li386_less_unsigned_true\n\t"
2456 ".Li386_less_unsigned_false:\n\t"
2458 "jmp .Li386_less_unsigned_end\n\t"
2459 ".Li386_less_unsigned_true:\n\t"
2461 ".Li386_less_unsigned_end:\n\t"
2463 "lea 0x8(%esp),%esp");
2467 i386_emit_ref (int size
)
2472 EMIT_ASM32 (i386_ref1
,
2476 EMIT_ASM32 (i386_ref2
,
2480 EMIT_ASM32 (i386_ref4
,
2481 "movl (%eax),%eax");
2484 EMIT_ASM32 (i386_ref8
,
2485 "movl 4(%eax),%ebx\n\t"
2486 "movl (%eax),%eax");
2492 i386_emit_if_goto (int *offset_p
, int *size_p
)
2494 EMIT_ASM32 (i386_if_goto
,
2500 /* Don't trust the assembler to choose the right jump */
2501 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2504 *offset_p
= 11; /* be sure that this matches the sequence above */
2510 i386_emit_goto (int *offset_p
, int *size_p
)
2512 EMIT_ASM32 (i386_goto
,
2513 /* Don't trust the assembler to choose the right jump */
2514 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2522 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2524 int diff
= (to
- (from
+ size
));
2525 unsigned char buf
[sizeof (int)];
2527 /* We're only doing 4-byte sizes at the moment. */
2534 memcpy (buf
, &diff
, sizeof (int));
2535 target_write_memory (from
, buf
, sizeof (int));
2539 i386_emit_const (LONGEST num
)
2541 unsigned char buf
[16];
2543 CORE_ADDR buildaddr
= current_insn_ptr
;
2546 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2547 lo
= num
& 0xffffffff;
2548 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2550 hi
= ((num
>> 32) & 0xffffffff);
2553 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2554 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2559 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2561 append_insns (&buildaddr
, i
, buf
);
2562 current_insn_ptr
= buildaddr
;
2566 i386_emit_call (CORE_ADDR fn
)
2568 unsigned char buf
[16];
2570 CORE_ADDR buildaddr
;
2572 buildaddr
= current_insn_ptr
;
2574 buf
[i
++] = 0xe8; /* call <reladdr> */
2575 offset
= ((int) fn
) - (buildaddr
+ 5);
2576 memcpy (buf
+ 1, &offset
, 4);
2577 append_insns (&buildaddr
, 5, buf
);
2578 current_insn_ptr
= buildaddr
;
2582 i386_emit_reg (int reg
)
2584 unsigned char buf
[16];
2586 CORE_ADDR buildaddr
;
2588 EMIT_ASM32 (i386_reg_a
,
2590 buildaddr
= current_insn_ptr
;
2592 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2593 memcpy (&buf
[i
], ®
, sizeof (reg
));
2595 append_insns (&buildaddr
, i
, buf
);
2596 current_insn_ptr
= buildaddr
;
2597 EMIT_ASM32 (i386_reg_b
,
2598 "mov %eax,4(%esp)\n\t"
2599 "mov 8(%ebp),%eax\n\t"
2601 i386_emit_call (get_raw_reg_func_addr ());
2602 EMIT_ASM32 (i386_reg_c
,
2604 "lea 0x8(%esp),%esp");
2608 i386_emit_pop (void)
2610 EMIT_ASM32 (i386_pop
,
2616 i386_emit_stack_flush (void)
2618 EMIT_ASM32 (i386_stack_flush
,
2624 i386_emit_zero_ext (int arg
)
2629 EMIT_ASM32 (i386_zero_ext_8
,
2630 "and $0xff,%eax\n\t"
2634 EMIT_ASM32 (i386_zero_ext_16
,
2635 "and $0xffff,%eax\n\t"
2639 EMIT_ASM32 (i386_zero_ext_32
,
2648 i386_emit_swap (void)
2650 EMIT_ASM32 (i386_swap
,
2660 i386_emit_stack_adjust (int n
)
2662 unsigned char buf
[16];
2664 CORE_ADDR buildaddr
= current_insn_ptr
;
2667 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2671 append_insns (&buildaddr
, i
, buf
);
2672 current_insn_ptr
= buildaddr
;
2675 /* FN's prototype is `LONGEST(*fn)(int)'. */
2678 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2680 unsigned char buf
[16];
2682 CORE_ADDR buildaddr
;
2684 EMIT_ASM32 (i386_int_call_1_a
,
2685 /* Reserve a bit of stack space. */
2687 /* Put the one argument on the stack. */
2688 buildaddr
= current_insn_ptr
;
2690 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2693 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2695 append_insns (&buildaddr
, i
, buf
);
2696 current_insn_ptr
= buildaddr
;
2697 i386_emit_call (fn
);
2698 EMIT_ASM32 (i386_int_call_1_c
,
2700 "lea 0x8(%esp),%esp");
2703 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2706 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2708 unsigned char buf
[16];
2710 CORE_ADDR buildaddr
;
2712 EMIT_ASM32 (i386_void_call_2_a
,
2713 /* Preserve %eax only; we don't have to worry about %ebx. */
2715 /* Reserve a bit of stack space for arguments. */
2716 "sub $0x10,%esp\n\t"
2717 /* Copy "top" to the second argument position. (Note that
2718 we can't assume function won't scribble on its
2719 arguments, so don't try to restore from this.) */
2720 "mov %eax,4(%esp)\n\t"
2721 "mov %ebx,8(%esp)");
2722 /* Put the first argument on the stack. */
2723 buildaddr
= current_insn_ptr
;
2725 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2728 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2730 append_insns (&buildaddr
, i
, buf
);
2731 current_insn_ptr
= buildaddr
;
2732 i386_emit_call (fn
);
2733 EMIT_ASM32 (i386_void_call_2_b
,
2734 "lea 0x10(%esp),%esp\n\t"
2735 /* Restore original stack top. */
2741 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2744 /* Check low half first, more likely to be decider */
2745 "cmpl %eax,(%esp)\n\t"
2746 "jne .Leq_fallthru\n\t"
2747 "cmpl %ebx,4(%esp)\n\t"
2748 "jne .Leq_fallthru\n\t"
2749 "lea 0x8(%esp),%esp\n\t"
2752 /* jmp, but don't trust the assembler to choose the right jump */
2753 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2754 ".Leq_fallthru:\n\t"
2755 "lea 0x8(%esp),%esp\n\t"
2766 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2769 /* Check low half first, more likely to be decider */
2770 "cmpl %eax,(%esp)\n\t"
2772 "cmpl %ebx,4(%esp)\n\t"
2773 "je .Lne_fallthru\n\t"
2775 "lea 0x8(%esp),%esp\n\t"
2778 /* jmp, but don't trust the assembler to choose the right jump */
2779 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2780 ".Lne_fallthru:\n\t"
2781 "lea 0x8(%esp),%esp\n\t"
2792 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2795 "cmpl %ebx,4(%esp)\n\t"
2797 "jne .Llt_fallthru\n\t"
2798 "cmpl %eax,(%esp)\n\t"
2799 "jnl .Llt_fallthru\n\t"
2801 "lea 0x8(%esp),%esp\n\t"
2804 /* jmp, but don't trust the assembler to choose the right jump */
2805 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2806 ".Llt_fallthru:\n\t"
2807 "lea 0x8(%esp),%esp\n\t"
2818 i386_emit_le_goto (int *offset_p
, int *size_p
)
2821 "cmpl %ebx,4(%esp)\n\t"
2823 "jne .Lle_fallthru\n\t"
2824 "cmpl %eax,(%esp)\n\t"
2825 "jnle .Lle_fallthru\n\t"
2827 "lea 0x8(%esp),%esp\n\t"
2830 /* jmp, but don't trust the assembler to choose the right jump */
2831 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2832 ".Lle_fallthru:\n\t"
2833 "lea 0x8(%esp),%esp\n\t"
2844 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2847 "cmpl %ebx,4(%esp)\n\t"
2849 "jne .Lgt_fallthru\n\t"
2850 "cmpl %eax,(%esp)\n\t"
2851 "jng .Lgt_fallthru\n\t"
2853 "lea 0x8(%esp),%esp\n\t"
2856 /* jmp, but don't trust the assembler to choose the right jump */
2857 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2858 ".Lgt_fallthru:\n\t"
2859 "lea 0x8(%esp),%esp\n\t"
2870 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2873 "cmpl %ebx,4(%esp)\n\t"
2875 "jne .Lge_fallthru\n\t"
2876 "cmpl %eax,(%esp)\n\t"
2877 "jnge .Lge_fallthru\n\t"
2879 "lea 0x8(%esp),%esp\n\t"
2882 /* jmp, but don't trust the assembler to choose the right jump */
2883 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2884 ".Lge_fallthru:\n\t"
2885 "lea 0x8(%esp),%esp\n\t"
2895 struct emit_ops i386_emit_ops
=
2903 i386_emit_rsh_signed
,
2904 i386_emit_rsh_unsigned
,
2912 i386_emit_less_signed
,
2913 i386_emit_less_unsigned
,
2917 i386_write_goto_address
,
2922 i386_emit_stack_flush
,
2925 i386_emit_stack_adjust
,
2926 i386_emit_int_call_1
,
2927 i386_emit_void_call_2
,
2937 static struct emit_ops
*
2941 if (is_64bit_tdesc ())
2942 return &amd64_emit_ops
;
2945 return &i386_emit_ops
;
2948 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2951 x86_target::sw_breakpoint_from_kind (int kind
, int *size
)
2953 *size
= x86_breakpoint_len
;
2954 return x86_breakpoint
;
2958 x86_supports_range_stepping (void)
2963 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2967 x86_supports_hardware_single_step (void)
2973 x86_get_ipa_tdesc_idx (void)
2975 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
2976 const struct target_desc
*tdesc
= regcache
->tdesc
;
2979 return amd64_get_ipa_tdesc_idx (tdesc
);
2982 if (tdesc
== tdesc_i386_linux_no_xml
)
2983 return X86_TDESC_SSE
;
2985 return i386_get_ipa_tdesc_idx (tdesc
);
2988 /* This is initialized assuming an amd64 target.
2989 x86_arch_setup will correct it for i386 or amd64 targets. */
2991 struct linux_target_ops the_low_target
=
2994 x86_supports_range_stepping
,
2995 x86_supports_hardware_single_step
,
2996 x86_get_syscall_trapinfo
,
2997 x86_get_ipa_tdesc_idx
,
3000 /* The linux target ops object. */
3002 linux_process_target
*the_linux_target
= &the_x86_target
;
3005 initialize_low_arch (void)
3007 /* Initialize the Linux target descriptions. */
3009 tdesc_amd64_linux_no_xml
= allocate_target_description ();
3010 copy_target_description (tdesc_amd64_linux_no_xml
,
3011 amd64_linux_read_description (X86_XSTATE_SSE_MASK
,
3013 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
3016 tdesc_i386_linux_no_xml
= allocate_target_description ();
3017 copy_target_description (tdesc_i386_linux_no_xml
,
3018 i386_linux_read_description (X86_XSTATE_SSE_MASK
));
3019 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3021 initialize_regsets_info (&x86_regsets_info
);