1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002, 2004-2012 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "linux-low.h"
28 #include "i386-xstate.h"
29 #include "elf/common.h"
31 #include "gdb_proc_service.h"
34 /* Defined in auto-generated file i386-linux.c. */
35 void init_registers_i386_linux (void);
36 /* Defined in auto-generated file amd64-linux.c. */
37 void init_registers_amd64_linux (void);
38 /* Defined in auto-generated file i386-avx-linux.c. */
39 void init_registers_i386_avx_linux (void);
40 /* Defined in auto-generated file amd64-avx-linux.c. */
41 void init_registers_amd64_avx_linux (void);
42 /* Defined in auto-generated file i386-mmx-linux.c. */
43 void init_registers_i386_mmx_linux (void);
44 /* Defined in auto-generated file x32-linux.c. */
45 void init_registers_x32_linux (void);
46 /* Defined in auto-generated file x32-avx-linux.c. */
47 void init_registers_x32_avx_linux (void);
49 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
50 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
52 /* Backward compatibility for gdb without XML support. */
54 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
55 <architecture>i386</architecture>\
56 <osabi>GNU/Linux</osabi>\
60 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
61 <architecture>i386:x86-64</architecture>\
62 <osabi>GNU/Linux</osabi>\
67 #include <sys/procfs.h>
68 #include <sys/ptrace.h>
71 #ifndef PTRACE_GETREGSET
72 #define PTRACE_GETREGSET 0x4204
75 #ifndef PTRACE_SETREGSET
76 #define PTRACE_SETREGSET 0x4205
80 #ifndef PTRACE_GET_THREAD_AREA
81 #define PTRACE_GET_THREAD_AREA 25
84 /* This definition comes from prctl.h, but some kernels may not have it. */
85 #ifndef PTRACE_ARCH_PRCTL
86 #define PTRACE_ARCH_PRCTL 30
89 /* The following definitions come from prctl.h, but may be absent
90 for certain configurations. */
92 #define ARCH_SET_GS 0x1001
93 #define ARCH_SET_FS 0x1002
94 #define ARCH_GET_FS 0x1003
95 #define ARCH_GET_GS 0x1004
98 /* Per-process arch-specific data we want to keep. */
100 struct arch_process_info
102 struct i386_debug_reg_state debug_reg_state
;
105 /* Per-thread arch-specific data we want to keep. */
109 /* Non-zero if our copy differs from what's recorded in the thread. */
110 int debug_registers_changed
;
115 /* Mapping between the general-purpose registers in `struct user'
116 format and GDB's register array layout.
117 Note that the transfer layout uses 64-bit regs. */
118 static /*const*/ int i386_regmap
[] =
120 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
121 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
122 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
123 DS
* 8, ES
* 8, FS
* 8, GS
* 8
126 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
128 /* So code below doesn't have to care, i386 or amd64. */
129 #define ORIG_EAX ORIG_RAX
131 static const int x86_64_regmap
[] =
133 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
134 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
135 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
136 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
137 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
138 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
139 -1, -1, -1, -1, -1, -1, -1, -1,
140 -1, -1, -1, -1, -1, -1, -1, -1,
141 -1, -1, -1, -1, -1, -1, -1, -1,
142 -1, -1, -1, -1, -1, -1, -1, -1, -1,
146 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
148 #else /* ! __x86_64__ */
150 /* Mapping between the general-purpose registers in `struct user'
151 format and GDB's register array layout. */
152 static /*const*/ int i386_regmap
[] =
154 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
155 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
156 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
157 DS
* 4, ES
* 4, FS
* 4, GS
* 4
160 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
164 /* Called by libthread_db. */
167 ps_get_thread_area (const struct ps_prochandle
*ph
,
168 lwpid_t lwpid
, int idx
, void **base
)
171 int use_64bit
= register_size (0) == 8;
178 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
182 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
193 unsigned int desc
[4];
195 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
196 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
199 *(int *)base
= desc
[1];
204 /* Get the thread area address. This is used to recognize which
205 thread is which when tracing with the in-process agent library. We
206 don't read anything from the address, and treat it as opaque; it's
207 the address itself that we assume is unique per-thread. */
210 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
213 int use_64bit
= register_size (0) == 8;
218 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
220 *addr
= (CORE_ADDR
) (uintptr_t) base
;
229 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
230 struct regcache
*regcache
= get_thread_regcache (get_lwp_thread (lwp
), 1);
231 unsigned int desc
[4];
233 const int reg_thread_area
= 3; /* bits to scale down register value. */
236 collect_register_by_name (regcache
, "gs", &gs
);
238 idx
= gs
>> reg_thread_area
;
240 if (ptrace (PTRACE_GET_THREAD_AREA
,
242 (void *) (long) idx
, (unsigned long) &desc
) < 0)
253 i386_cannot_store_register (int regno
)
255 return regno
>= I386_NUM_REGS
;
259 i386_cannot_fetch_register (int regno
)
261 return regno
>= I386_NUM_REGS
;
265 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
270 if (register_size (0) == 8)
272 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
273 if (x86_64_regmap
[i
] != -1)
274 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
279 for (i
= 0; i
< I386_NUM_REGS
; i
++)
280 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
282 collect_register_by_name (regcache
, "orig_eax",
283 ((char *) buf
) + ORIG_EAX
* 4);
287 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
292 if (register_size (0) == 8)
294 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
295 if (x86_64_regmap
[i
] != -1)
296 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
301 for (i
= 0; i
< I386_NUM_REGS
; i
++)
302 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
304 supply_register_by_name (regcache
, "orig_eax",
305 ((char *) buf
) + ORIG_EAX
* 4);
309 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
312 i387_cache_to_fxsave (regcache
, buf
);
314 i387_cache_to_fsave (regcache
, buf
);
319 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
322 i387_fxsave_to_cache (regcache
, buf
);
324 i387_fsave_to_cache (regcache
, buf
);
331 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
333 i387_cache_to_fxsave (regcache
, buf
);
337 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
339 i387_fxsave_to_cache (regcache
, buf
);
345 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
347 i387_cache_to_xsave (regcache
, buf
);
351 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
353 i387_xsave_to_cache (regcache
, buf
);
356 /* ??? The non-biarch i386 case stores all the i387 regs twice.
357 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
358 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
359 doesn't work. IWBN to avoid the duplication in the case where it
360 does work. Maybe the arch_setup routine could check whether it works
361 and update target_regsets accordingly, maybe by moving target_regsets
362 to linux_target_ops and set the right one there, rather than having to
363 modify the target_regsets global. */
365 struct regset_info target_regsets
[] =
367 #ifdef HAVE_PTRACE_GETREGS
368 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
370 x86_fill_gregset
, x86_store_gregset
},
371 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
372 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
374 # ifdef HAVE_PTRACE_GETFPXREGS
375 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
377 x86_fill_fpxregset
, x86_store_fpxregset
},
380 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
382 x86_fill_fpregset
, x86_store_fpregset
},
383 #endif /* HAVE_PTRACE_GETREGS */
384 { 0, 0, 0, -1, -1, NULL
, NULL
}
388 x86_get_pc (struct regcache
*regcache
)
390 int use_64bit
= register_size (0) == 8;
395 collect_register_by_name (regcache
, "rip", &pc
);
396 return (CORE_ADDR
) pc
;
401 collect_register_by_name (regcache
, "eip", &pc
);
402 return (CORE_ADDR
) pc
;
407 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
409 int use_64bit
= register_size (0) == 8;
413 unsigned long newpc
= pc
;
414 supply_register_by_name (regcache
, "rip", &newpc
);
418 unsigned int newpc
= pc
;
419 supply_register_by_name (regcache
, "eip", &newpc
);
423 static const unsigned char x86_breakpoint
[] = { 0xCC };
424 #define x86_breakpoint_len 1
427 x86_breakpoint_at (CORE_ADDR pc
)
431 (*the_target
->read_memory
) (pc
, &c
, 1);
438 /* Support for debug registers. */
441 x86_linux_dr_get (ptid_t ptid
, int regnum
)
446 tid
= ptid_get_lwp (ptid
);
449 value
= ptrace (PTRACE_PEEKUSER
, tid
,
450 offsetof (struct user
, u_debugreg
[regnum
]), 0);
452 error ("Couldn't read debug register");
458 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
462 tid
= ptid_get_lwp (ptid
);
465 ptrace (PTRACE_POKEUSER
, tid
,
466 offsetof (struct user
, u_debugreg
[regnum
]), value
);
468 error ("Couldn't write debug register");
472 update_debug_registers_callback (struct inferior_list_entry
*entry
,
475 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
476 int pid
= *(int *) pid_p
;
478 /* Only update the threads of this process. */
479 if (pid_of (lwp
) == pid
)
481 /* The actual update is done later just before resuming the lwp,
482 we just mark that the registers need updating. */
483 lwp
->arch_private
->debug_registers_changed
= 1;
485 /* If the lwp isn't stopped, force it to momentarily pause, so
486 we can update its debug registers. */
488 linux_stop_lwp (lwp
);
494 /* Update the inferior's debug register REGNUM from STATE. */
497 i386_dr_low_set_addr (const struct i386_debug_reg_state
*state
, int regnum
)
499 /* Only update the threads of this process. */
500 int pid
= pid_of (get_thread_lwp (current_inferior
));
502 if (! (regnum
>= 0 && regnum
<= DR_LASTADDR
- DR_FIRSTADDR
))
503 fatal ("Invalid debug register %d", regnum
);
505 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
508 /* Return the inferior's debug register REGNUM. */
511 i386_dr_low_get_addr (int regnum
)
513 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
514 ptid_t ptid
= ptid_of (lwp
);
516 /* DR6 and DR7 are retrieved with some other way. */
517 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
519 return x86_linux_dr_get (ptid
, regnum
);
522 /* Update the inferior's DR7 debug control register from STATE. */
525 i386_dr_low_set_control (const struct i386_debug_reg_state
*state
)
527 /* Only update the threads of this process. */
528 int pid
= pid_of (get_thread_lwp (current_inferior
));
530 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
533 /* Return the inferior's DR7 debug control register. */
536 i386_dr_low_get_control (void)
538 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
539 ptid_t ptid
= ptid_of (lwp
);
541 return x86_linux_dr_get (ptid
, DR_CONTROL
);
544 /* Get the value of the DR6 debug status register from the inferior
545 and record it in STATE. */
548 i386_dr_low_get_status (void)
550 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
551 ptid_t ptid
= ptid_of (lwp
);
553 return x86_linux_dr_get (ptid
, DR_STATUS
);
556 /* Breakpoint/Watchpoint support. */
559 x86_insert_point (char type
, CORE_ADDR addr
, int len
)
561 struct process_info
*proc
= current_process ();
568 ret
= prepare_to_access_memory ();
571 ret
= set_gdb_breakpoint_at (addr
);
572 done_accessing_memory ();
578 return i386_low_insert_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
587 x86_remove_point (char type
, CORE_ADDR addr
, int len
)
589 struct process_info
*proc
= current_process ();
596 ret
= prepare_to_access_memory ();
599 ret
= delete_gdb_breakpoint_at (addr
);
600 done_accessing_memory ();
606 return i386_low_remove_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
615 x86_stopped_by_watchpoint (void)
617 struct process_info
*proc
= current_process ();
618 return i386_low_stopped_by_watchpoint (&proc
->private->arch_private
->debug_reg_state
);
622 x86_stopped_data_address (void)
624 struct process_info
*proc
= current_process ();
626 if (i386_low_stopped_data_address (&proc
->private->arch_private
->debug_reg_state
,
632 /* Called when a new process is created. */
634 static struct arch_process_info
*
635 x86_linux_new_process (void)
637 struct arch_process_info
*info
= xcalloc (1, sizeof (*info
));
639 i386_low_init_dregs (&info
->debug_reg_state
);
644 /* Called when a new thread is detected. */
646 static struct arch_lwp_info
*
647 x86_linux_new_thread (void)
649 struct arch_lwp_info
*info
= xcalloc (1, sizeof (*info
));
651 info
->debug_registers_changed
= 1;
656 /* Called when resuming a thread.
657 If the debug regs have changed, update the thread's copies. */
660 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
662 ptid_t ptid
= ptid_of (lwp
);
663 int clear_status
= 0;
665 if (lwp
->arch_private
->debug_registers_changed
)
668 int pid
= ptid_get_pid (ptid
);
669 struct process_info
*proc
= find_process_pid (pid
);
670 struct i386_debug_reg_state
*state
671 = &proc
->private->arch_private
->debug_reg_state
;
673 for (i
= DR_FIRSTADDR
; i
<= DR_LASTADDR
; i
++)
674 if (state
->dr_ref_count
[i
] > 0)
676 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
678 /* If we're setting a watchpoint, any change the inferior
679 had done itself to the debug registers needs to be
680 discarded, otherwise, i386_low_stopped_data_address can
685 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
687 lwp
->arch_private
->debug_registers_changed
= 0;
690 if (clear_status
|| lwp
->stopped_by_watchpoint
)
691 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
694 /* When GDBSERVER is built as a 64-bit application on linux, the
695 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
696 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
697 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
698 conversion in-place ourselves. */
700 /* These types below (compat_*) define a siginfo type that is layout
701 compatible with the siginfo type exported by the 32-bit userspace
706 typedef int compat_int_t
;
707 typedef unsigned int compat_uptr_t
;
709 typedef int compat_time_t
;
710 typedef int compat_timer_t
;
711 typedef int compat_clock_t
;
713 struct compat_timeval
715 compat_time_t tv_sec
;
719 typedef union compat_sigval
721 compat_int_t sival_int
;
722 compat_uptr_t sival_ptr
;
725 typedef struct compat_siginfo
733 int _pad
[((128 / sizeof (int)) - 3)];
742 /* POSIX.1b timers */
747 compat_sigval_t _sigval
;
750 /* POSIX.1b signals */
755 compat_sigval_t _sigval
;
764 compat_clock_t _utime
;
765 compat_clock_t _stime
;
768 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
783 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
784 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t
;
786 typedef struct compat_x32_siginfo
794 int _pad
[((128 / sizeof (int)) - 3)];
803 /* POSIX.1b timers */
808 compat_sigval_t _sigval
;
811 /* POSIX.1b signals */
816 compat_sigval_t _sigval
;
825 compat_x32_clock_t _utime
;
826 compat_x32_clock_t _stime
;
829 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
842 } compat_x32_siginfo_t
__attribute__ ((__aligned__ (8)));
844 #define cpt_si_pid _sifields._kill._pid
845 #define cpt_si_uid _sifields._kill._uid
846 #define cpt_si_timerid _sifields._timer._tid
847 #define cpt_si_overrun _sifields._timer._overrun
848 #define cpt_si_status _sifields._sigchld._status
849 #define cpt_si_utime _sifields._sigchld._utime
850 #define cpt_si_stime _sifields._sigchld._stime
851 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
852 #define cpt_si_addr _sifields._sigfault._addr
853 #define cpt_si_band _sifields._sigpoll._band
854 #define cpt_si_fd _sifields._sigpoll._fd
856 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
857 In their place is si_timer1,si_timer2. */
859 #define si_timerid si_timer1
862 #define si_overrun si_timer2
866 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
868 memset (to
, 0, sizeof (*to
));
870 to
->si_signo
= from
->si_signo
;
871 to
->si_errno
= from
->si_errno
;
872 to
->si_code
= from
->si_code
;
874 if (to
->si_code
== SI_TIMER
)
876 to
->cpt_si_timerid
= from
->si_timerid
;
877 to
->cpt_si_overrun
= from
->si_overrun
;
878 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
880 else if (to
->si_code
== SI_USER
)
882 to
->cpt_si_pid
= from
->si_pid
;
883 to
->cpt_si_uid
= from
->si_uid
;
885 else if (to
->si_code
< 0)
887 to
->cpt_si_pid
= from
->si_pid
;
888 to
->cpt_si_uid
= from
->si_uid
;
889 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
893 switch (to
->si_signo
)
896 to
->cpt_si_pid
= from
->si_pid
;
897 to
->cpt_si_uid
= from
->si_uid
;
898 to
->cpt_si_status
= from
->si_status
;
899 to
->cpt_si_utime
= from
->si_utime
;
900 to
->cpt_si_stime
= from
->si_stime
;
906 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
909 to
->cpt_si_band
= from
->si_band
;
910 to
->cpt_si_fd
= from
->si_fd
;
913 to
->cpt_si_pid
= from
->si_pid
;
914 to
->cpt_si_uid
= from
->si_uid
;
915 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
922 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
924 memset (to
, 0, sizeof (*to
));
926 to
->si_signo
= from
->si_signo
;
927 to
->si_errno
= from
->si_errno
;
928 to
->si_code
= from
->si_code
;
930 if (to
->si_code
== SI_TIMER
)
932 to
->si_timerid
= from
->cpt_si_timerid
;
933 to
->si_overrun
= from
->cpt_si_overrun
;
934 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
936 else if (to
->si_code
== SI_USER
)
938 to
->si_pid
= from
->cpt_si_pid
;
939 to
->si_uid
= from
->cpt_si_uid
;
941 else if (to
->si_code
< 0)
943 to
->si_pid
= from
->cpt_si_pid
;
944 to
->si_uid
= from
->cpt_si_uid
;
945 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
949 switch (to
->si_signo
)
952 to
->si_pid
= from
->cpt_si_pid
;
953 to
->si_uid
= from
->cpt_si_uid
;
954 to
->si_status
= from
->cpt_si_status
;
955 to
->si_utime
= from
->cpt_si_utime
;
956 to
->si_stime
= from
->cpt_si_stime
;
962 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
965 to
->si_band
= from
->cpt_si_band
;
966 to
->si_fd
= from
->cpt_si_fd
;
969 to
->si_pid
= from
->cpt_si_pid
;
970 to
->si_uid
= from
->cpt_si_uid
;
971 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
978 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t
*to
,
981 memset (to
, 0, sizeof (*to
));
983 to
->si_signo
= from
->si_signo
;
984 to
->si_errno
= from
->si_errno
;
985 to
->si_code
= from
->si_code
;
987 if (to
->si_code
== SI_TIMER
)
989 to
->cpt_si_timerid
= from
->si_timerid
;
990 to
->cpt_si_overrun
= from
->si_overrun
;
991 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
993 else if (to
->si_code
== SI_USER
)
995 to
->cpt_si_pid
= from
->si_pid
;
996 to
->cpt_si_uid
= from
->si_uid
;
998 else if (to
->si_code
< 0)
1000 to
->cpt_si_pid
= from
->si_pid
;
1001 to
->cpt_si_uid
= from
->si_uid
;
1002 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1006 switch (to
->si_signo
)
1009 to
->cpt_si_pid
= from
->si_pid
;
1010 to
->cpt_si_uid
= from
->si_uid
;
1011 to
->cpt_si_status
= from
->si_status
;
1012 to
->cpt_si_utime
= from
->si_utime
;
1013 to
->cpt_si_stime
= from
->si_stime
;
1019 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1022 to
->cpt_si_band
= from
->si_band
;
1023 to
->cpt_si_fd
= from
->si_fd
;
1026 to
->cpt_si_pid
= from
->si_pid
;
1027 to
->cpt_si_uid
= from
->si_uid
;
1028 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1035 siginfo_from_compat_x32_siginfo (siginfo_t
*to
,
1036 compat_x32_siginfo_t
*from
)
1038 memset (to
, 0, sizeof (*to
));
1040 to
->si_signo
= from
->si_signo
;
1041 to
->si_errno
= from
->si_errno
;
1042 to
->si_code
= from
->si_code
;
1044 if (to
->si_code
== SI_TIMER
)
1046 to
->si_timerid
= from
->cpt_si_timerid
;
1047 to
->si_overrun
= from
->cpt_si_overrun
;
1048 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1050 else if (to
->si_code
== SI_USER
)
1052 to
->si_pid
= from
->cpt_si_pid
;
1053 to
->si_uid
= from
->cpt_si_uid
;
1055 else if (to
->si_code
< 0)
1057 to
->si_pid
= from
->cpt_si_pid
;
1058 to
->si_uid
= from
->cpt_si_uid
;
1059 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1063 switch (to
->si_signo
)
1066 to
->si_pid
= from
->cpt_si_pid
;
1067 to
->si_uid
= from
->cpt_si_uid
;
1068 to
->si_status
= from
->cpt_si_status
;
1069 to
->si_utime
= from
->cpt_si_utime
;
1070 to
->si_stime
= from
->cpt_si_stime
;
1076 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1079 to
->si_band
= from
->cpt_si_band
;
1080 to
->si_fd
= from
->cpt_si_fd
;
1083 to
->si_pid
= from
->cpt_si_pid
;
1084 to
->si_uid
= from
->cpt_si_uid
;
1085 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1091 /* Is this process 64-bit? */
1092 static int linux_is_elf64
;
1093 #endif /* __x86_64__ */
1095 /* Convert a native/host siginfo object, into/from the siginfo in the
1096 layout of the inferiors' architecture. Returns true if any
1097 conversion was done; false otherwise. If DIRECTION is 1, then copy
1098 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1102 x86_siginfo_fixup (siginfo_t
*native
, void *inf
, int direction
)
1105 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1106 if (register_size (0) == 4)
1108 if (sizeof (siginfo_t
) != sizeof (compat_siginfo_t
))
1109 fatal ("unexpected difference in siginfo");
1112 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
1114 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
1118 /* No fixup for native x32 GDB. */
1119 else if (!linux_is_elf64
&& sizeof (void *) == 8)
1121 if (sizeof (siginfo_t
) != sizeof (compat_x32_siginfo_t
))
1122 fatal ("unexpected difference in siginfo");
1125 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo
*) inf
,
1128 siginfo_from_compat_x32_siginfo (native
,
1129 (struct compat_x32_siginfo
*) inf
);
1140 /* Update gdbserver_xmltarget. */
1143 x86_linux_update_xmltarget (void)
1146 struct regset_info
*regset
;
1147 static unsigned long long xcr0
;
1148 static int have_ptrace_getregset
= -1;
1149 #if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
1150 static int have_ptrace_getfpxregs
= -1;
1153 if (!current_inferior
)
1156 /* Before changing the register cache internal layout or the target
1157 regsets, flush the contents of the current valid caches back to
1159 regcache_invalidate ();
1161 pid
= pid_of (get_thread_lwp (current_inferior
));
1163 if (num_xmm_registers
== 8)
1164 init_registers_i386_linux ();
1165 else if (linux_is_elf64
)
1166 init_registers_amd64_linux ();
1168 init_registers_x32_linux ();
1171 # ifdef HAVE_PTRACE_GETFPXREGS
1172 if (have_ptrace_getfpxregs
== -1)
1174 elf_fpxregset_t fpxregs
;
1176 if (ptrace (PTRACE_GETFPXREGS
, pid
, 0, (int) &fpxregs
) < 0)
1178 have_ptrace_getfpxregs
= 0;
1179 x86_xcr0
= I386_XSTATE_X87_MASK
;
1181 /* Disable PTRACE_GETFPXREGS. */
1182 for (regset
= target_regsets
;
1183 regset
->fill_function
!= NULL
; regset
++)
1184 if (regset
->get_request
== PTRACE_GETFPXREGS
)
1191 have_ptrace_getfpxregs
= 1;
1194 if (!have_ptrace_getfpxregs
)
1196 init_registers_i386_mmx_linux ();
1200 init_registers_i386_linux ();
1206 /* Don't use XML. */
1208 if (num_xmm_registers
== 8)
1209 gdbserver_xmltarget
= xmltarget_i386_linux_no_xml
;
1211 gdbserver_xmltarget
= xmltarget_amd64_linux_no_xml
;
1213 gdbserver_xmltarget
= xmltarget_i386_linux_no_xml
;
1216 x86_xcr0
= I386_XSTATE_SSE_MASK
;
1221 /* Check if XSAVE extended state is supported. */
1222 if (have_ptrace_getregset
== -1)
1224 unsigned long long xstateregs
[I386_XSTATE_SSE_SIZE
/ sizeof (long long)];
1227 iov
.iov_base
= xstateregs
;
1228 iov
.iov_len
= sizeof (xstateregs
);
1230 /* Check if PTRACE_GETREGSET works. */
1231 if (ptrace (PTRACE_GETREGSET
, pid
, (unsigned int) NT_X86_XSTATE
,
1234 have_ptrace_getregset
= 0;
1238 have_ptrace_getregset
= 1;
1240 /* Get XCR0 from XSAVE extended state at byte 464. */
1241 xcr0
= xstateregs
[464 / sizeof (long long)];
1243 /* Use PTRACE_GETREGSET if it is available. */
1244 for (regset
= target_regsets
;
1245 regset
->fill_function
!= NULL
; regset
++)
1246 if (regset
->get_request
== PTRACE_GETREGSET
)
1247 regset
->size
= I386_XSTATE_SIZE (xcr0
);
1248 else if (regset
->type
!= GENERAL_REGS
)
1252 if (have_ptrace_getregset
)
1254 /* AVX is the highest feature we support. */
1255 if ((xcr0
& I386_XSTATE_AVX_MASK
) == I386_XSTATE_AVX_MASK
)
1260 /* I386 has 8 xmm regs. */
1261 if (num_xmm_registers
== 8)
1262 init_registers_i386_avx_linux ();
1263 else if (linux_is_elf64
)
1264 init_registers_amd64_avx_linux ();
1266 init_registers_x32_avx_linux ();
1268 init_registers_i386_avx_linux ();
1274 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1275 PTRACE_GETREGSET. */
1278 x86_linux_process_qsupported (const char *query
)
1280 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1281 with "i386" in qSupported query, it supports x86 XML target
1284 if (query
!= NULL
&& strncmp (query
, "xmlRegisters=", 13) == 0)
1286 char *copy
= xstrdup (query
+ 13);
1289 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1291 if (strcmp (p
, "i386") == 0)
1301 x86_linux_update_xmltarget ();
1304 /* Initialize gdbserver for the architecture of the inferior. */
1307 x86_arch_setup (void)
1309 int pid
= pid_of (get_thread_lwp (current_inferior
));
1310 unsigned int machine
;
1311 int is_elf64
= linux_pid_exe_is_elf_64_file (pid
, &machine
);
1313 if (sizeof (void *) == 4)
1316 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1318 else if (machine
== EM_X86_64
)
1319 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1326 /* This can only happen if /proc/<pid>/exe is unreadable,
1327 but "that can't happen" if we've gotten this far.
1328 Fall through and assume this is a 32-bit program. */
1330 else if (machine
== EM_X86_64
)
1332 /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
1333 the_low_target
.num_regs
= -1;
1334 the_low_target
.regmap
= NULL
;
1335 the_low_target
.cannot_fetch_register
= NULL
;
1336 the_low_target
.cannot_store_register
= NULL
;
1338 /* Amd64 has 16 xmm regs. */
1339 num_xmm_registers
= 16;
1341 linux_is_elf64
= is_elf64
;
1342 x86_linux_update_xmltarget ();
1349 /* Ok we have a 32-bit inferior. */
1351 the_low_target
.num_regs
= I386_NUM_REGS
;
1352 the_low_target
.regmap
= i386_regmap
;
1353 the_low_target
.cannot_fetch_register
= i386_cannot_fetch_register
;
1354 the_low_target
.cannot_store_register
= i386_cannot_store_register
;
1356 /* I386 has 8 xmm regs. */
1357 num_xmm_registers
= 8;
1359 x86_linux_update_xmltarget ();
1363 x86_supports_tracepoints (void)
1369 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1371 write_inferior_memory (*to
, buf
, len
);
1376 push_opcode (unsigned char *buf
, char *op
)
1378 unsigned char *buf_org
= buf
;
1383 unsigned long ul
= strtoul (op
, &endptr
, 16);
1392 return buf
- buf_org
;
1397 /* Build a jump pad that saves registers and calls a collection
1398 function. Writes a jump instruction to the jump pad to
1399 JJUMPAD_INSN. The caller is responsible to write it in at the
1400 tracepoint address. */
1403 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1404 CORE_ADDR collector
,
1407 CORE_ADDR
*jump_entry
,
1408 CORE_ADDR
*trampoline
,
1409 ULONGEST
*trampoline_size
,
1410 unsigned char *jjump_pad_insn
,
1411 ULONGEST
*jjump_pad_insn_size
,
1412 CORE_ADDR
*adjusted_insn_addr
,
1413 CORE_ADDR
*adjusted_insn_addr_end
,
1416 unsigned char buf
[40];
1420 CORE_ADDR buildaddr
= *jump_entry
;
1422 /* Build the jump pad. */
1424 /* First, do tracepoint data collection. Save registers. */
1426 /* Need to ensure stack pointer saved first. */
1427 buf
[i
++] = 0x54; /* push %rsp */
1428 buf
[i
++] = 0x55; /* push %rbp */
1429 buf
[i
++] = 0x57; /* push %rdi */
1430 buf
[i
++] = 0x56; /* push %rsi */
1431 buf
[i
++] = 0x52; /* push %rdx */
1432 buf
[i
++] = 0x51; /* push %rcx */
1433 buf
[i
++] = 0x53; /* push %rbx */
1434 buf
[i
++] = 0x50; /* push %rax */
1435 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1436 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1437 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1438 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1439 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1440 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1441 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1442 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1443 buf
[i
++] = 0x9c; /* pushfq */
1444 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1446 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1447 i
+= sizeof (unsigned long);
1448 buf
[i
++] = 0x57; /* push %rdi */
1449 append_insns (&buildaddr
, i
, buf
);
1451 /* Stack space for the collecting_t object. */
1453 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1454 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1455 memcpy (buf
+ i
, &tpoint
, 8);
1457 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1458 i
+= push_opcode (&buf
[i
],
1459 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1460 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1461 append_insns (&buildaddr
, i
, buf
);
1465 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1466 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1468 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1469 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1470 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1471 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1472 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1473 append_insns (&buildaddr
, i
, buf
);
1475 /* Set up the gdb_collect call. */
1476 /* At this point, (stack pointer + 0x18) is the base of our saved
1480 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1481 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1483 /* tpoint address may be 64-bit wide. */
1484 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1485 memcpy (buf
+ i
, &tpoint
, 8);
1487 append_insns (&buildaddr
, i
, buf
);
1489 /* The collector function being in the shared library, may be
1490 >31-bits away off the jump pad. */
1492 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1493 memcpy (buf
+ i
, &collector
, 8);
1495 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1496 append_insns (&buildaddr
, i
, buf
);
1498 /* Clear the spin-lock. */
1500 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1501 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1502 memcpy (buf
+ i
, &lockaddr
, 8);
1504 append_insns (&buildaddr
, i
, buf
);
1506 /* Remove stack that had been used for the collect_t object. */
1508 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1509 append_insns (&buildaddr
, i
, buf
);
1511 /* Restore register state. */
1513 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1517 buf
[i
++] = 0x9d; /* popfq */
1518 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1519 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1520 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1521 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1522 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1523 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1524 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1525 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1526 buf
[i
++] = 0x58; /* pop %rax */
1527 buf
[i
++] = 0x5b; /* pop %rbx */
1528 buf
[i
++] = 0x59; /* pop %rcx */
1529 buf
[i
++] = 0x5a; /* pop %rdx */
1530 buf
[i
++] = 0x5e; /* pop %rsi */
1531 buf
[i
++] = 0x5f; /* pop %rdi */
1532 buf
[i
++] = 0x5d; /* pop %rbp */
1533 buf
[i
++] = 0x5c; /* pop %rsp */
1534 append_insns (&buildaddr
, i
, buf
);
1536 /* Now, adjust the original instruction to execute in the jump
1538 *adjusted_insn_addr
= buildaddr
;
1539 relocate_instruction (&buildaddr
, tpaddr
);
1540 *adjusted_insn_addr_end
= buildaddr
;
1542 /* Finally, write a jump back to the program. */
1544 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1545 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1548 "E.Jump back from jump pad too far from tracepoint "
1549 "(offset 0x%" PRIx64
" > int32).", loffset
);
1553 offset
= (int) loffset
;
1554 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1555 memcpy (buf
+ 1, &offset
, 4);
1556 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1558 /* The jump pad is now built. Wire in a jump to our jump pad. This
1559 is always done last (by our caller actually), so that we can
1560 install fast tracepoints with threads running. This relies on
1561 the agent's atomic write support. */
1562 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1563 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1566 "E.Jump pad too far from tracepoint "
1567 "(offset 0x%" PRIx64
" > int32).", loffset
);
1571 offset
= (int) loffset
;
1573 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1574 memcpy (buf
+ 1, &offset
, 4);
1575 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1576 *jjump_pad_insn_size
= sizeof (jump_insn
);
1578 /* Return the end address of our pad. */
1579 *jump_entry
= buildaddr
;
1584 #endif /* __x86_64__ */
1586 /* Build a jump pad that saves registers and calls a collection
1587 function. Writes a jump instruction to the jump pad to
1588 JJUMPAD_INSN. The caller is responsible to write it in at the
1589 tracepoint address. */
1592 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1593 CORE_ADDR collector
,
1596 CORE_ADDR
*jump_entry
,
1597 CORE_ADDR
*trampoline
,
1598 ULONGEST
*trampoline_size
,
1599 unsigned char *jjump_pad_insn
,
1600 ULONGEST
*jjump_pad_insn_size
,
1601 CORE_ADDR
*adjusted_insn_addr
,
1602 CORE_ADDR
*adjusted_insn_addr_end
,
1605 unsigned char buf
[0x100];
1607 CORE_ADDR buildaddr
= *jump_entry
;
1609 /* Build the jump pad. */
1611 /* First, do tracepoint data collection. Save registers. */
1613 buf
[i
++] = 0x60; /* pushad */
1614 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1615 *((int *)(buf
+ i
)) = (int) tpaddr
;
1617 buf
[i
++] = 0x9c; /* pushf */
1618 buf
[i
++] = 0x1e; /* push %ds */
1619 buf
[i
++] = 0x06; /* push %es */
1620 buf
[i
++] = 0x0f; /* push %fs */
1622 buf
[i
++] = 0x0f; /* push %gs */
1624 buf
[i
++] = 0x16; /* push %ss */
1625 buf
[i
++] = 0x0e; /* push %cs */
1626 append_insns (&buildaddr
, i
, buf
);
1628 /* Stack space for the collecting_t object. */
1630 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1632 /* Build the object. */
1633 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1634 memcpy (buf
+ i
, &tpoint
, 4);
1636 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1638 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1639 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1640 append_insns (&buildaddr
, i
, buf
);
1642 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1643 If we cared for it, this could be using xchg alternatively. */
1646 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1647 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1649 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1651 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1652 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1653 append_insns (&buildaddr
, i
, buf
);
1656 /* Set up arguments to the gdb_collect call. */
1658 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1659 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1660 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1661 append_insns (&buildaddr
, i
, buf
);
1664 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1665 append_insns (&buildaddr
, i
, buf
);
1668 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1669 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1671 append_insns (&buildaddr
, i
, buf
);
1673 buf
[0] = 0xe8; /* call <reladdr> */
1674 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1675 memcpy (buf
+ 1, &offset
, 4);
1676 append_insns (&buildaddr
, 5, buf
);
1677 /* Clean up after the call. */
1678 buf
[0] = 0x83; /* add $0x8,%esp */
1681 append_insns (&buildaddr
, 3, buf
);
1684 /* Clear the spin-lock. This would need the LOCK prefix on older
1687 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1688 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1689 memcpy (buf
+ i
, &lockaddr
, 4);
1691 append_insns (&buildaddr
, i
, buf
);
1694 /* Remove stack that had been used for the collect_t object. */
1696 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1697 append_insns (&buildaddr
, i
, buf
);
1700 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1703 buf
[i
++] = 0x17; /* pop %ss */
1704 buf
[i
++] = 0x0f; /* pop %gs */
1706 buf
[i
++] = 0x0f; /* pop %fs */
1708 buf
[i
++] = 0x07; /* pop %es */
1709 buf
[i
++] = 0x1f; /* pop %ds */
1710 buf
[i
++] = 0x9d; /* popf */
1711 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1714 buf
[i
++] = 0x61; /* popad */
1715 append_insns (&buildaddr
, i
, buf
);
1717 /* Now, adjust the original instruction to execute in the jump
1719 *adjusted_insn_addr
= buildaddr
;
1720 relocate_instruction (&buildaddr
, tpaddr
);
1721 *adjusted_insn_addr_end
= buildaddr
;
1723 /* Write the jump back to the program. */
1724 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1725 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1726 memcpy (buf
+ 1, &offset
, 4);
1727 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1729 /* The jump pad is now built. Wire in a jump to our jump pad. This
1730 is always done last (by our caller actually), so that we can
1731 install fast tracepoints with threads running. This relies on
1732 the agent's atomic write support. */
1735 /* Create a trampoline. */
1736 *trampoline_size
= sizeof (jump_insn
);
1737 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1739 /* No trampoline space available. */
1741 "E.Cannot allocate trampoline space needed for fast "
1742 "tracepoints on 4-byte instructions.");
1746 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1747 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1748 memcpy (buf
+ 1, &offset
, 4);
1749 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1751 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1752 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1753 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1754 memcpy (buf
+ 2, &offset
, 2);
1755 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1756 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1760 /* Else use a 32-bit relative jump instruction. */
1761 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1762 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1763 memcpy (buf
+ 1, &offset
, 4);
1764 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1765 *jjump_pad_insn_size
= sizeof (jump_insn
);
1768 /* Return the end address of our pad. */
1769 *jump_entry
= buildaddr
;
1775 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1776 CORE_ADDR collector
,
1779 CORE_ADDR
*jump_entry
,
1780 CORE_ADDR
*trampoline
,
1781 ULONGEST
*trampoline_size
,
1782 unsigned char *jjump_pad_insn
,
1783 ULONGEST
*jjump_pad_insn_size
,
1784 CORE_ADDR
*adjusted_insn_addr
,
1785 CORE_ADDR
*adjusted_insn_addr_end
,
1789 if (register_size (0) == 8)
1790 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1791 collector
, lockaddr
,
1792 orig_size
, jump_entry
,
1793 trampoline
, trampoline_size
,
1795 jjump_pad_insn_size
,
1797 adjusted_insn_addr_end
,
1801 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1802 collector
, lockaddr
,
1803 orig_size
, jump_entry
,
1804 trampoline
, trampoline_size
,
1806 jjump_pad_insn_size
,
1808 adjusted_insn_addr_end
,
1812 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1816 x86_get_min_fast_tracepoint_insn_len (void)
1818 static int warned_about_fast_tracepoints
= 0;
1821 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1822 used for fast tracepoints. */
1823 if (register_size (0) == 8)
1827 if (agent_loaded_p ())
1829 char errbuf
[IPA_BUFSIZ
];
1833 /* On x86, if trampolines are available, then 4-byte jump instructions
1834 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1835 with a 4-byte offset are used instead. */
1836 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1840 /* GDB has no channel to explain to user why a shorter fast
1841 tracepoint is not possible, but at least make GDBserver
1842 mention that something has gone awry. */
1843 if (!warned_about_fast_tracepoints
)
1845 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
1846 warned_about_fast_tracepoints
= 1;
1853 /* Indicate that the minimum length is currently unknown since the IPA
1854 has not loaded yet. */
1860 add_insns (unsigned char *start
, int len
)
1862 CORE_ADDR buildaddr
= current_insn_ptr
;
1865 fprintf (stderr
, "Adding %d bytes of insn at %s\n",
1866 len
, paddress (buildaddr
));
1868 append_insns (&buildaddr
, len
, start
);
1869 current_insn_ptr
= buildaddr
;
1872 /* Our general strategy for emitting code is to avoid specifying raw
1873 bytes whenever possible, and instead copy a block of inline asm
1874 that is embedded in the function. This is a little messy, because
1875 we need to keep the compiler from discarding what looks like dead
1876 code, plus suppress various warnings. */
1878 #define EMIT_ASM(NAME, INSNS) \
1881 extern unsigned char start_ ## NAME, end_ ## NAME; \
1882 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1883 __asm__ ("jmp end_" #NAME "\n" \
1884 "\t" "start_" #NAME ":" \
1886 "\t" "end_" #NAME ":"); \
1891 #define EMIT_ASM32(NAME,INSNS) \
1894 extern unsigned char start_ ## NAME, end_ ## NAME; \
1895 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1896 __asm__ (".code32\n" \
1897 "\t" "jmp end_" #NAME "\n" \
1898 "\t" "start_" #NAME ":\n" \
1900 "\t" "end_" #NAME ":\n" \
1906 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1913 amd64_emit_prologue (void)
1915 EMIT_ASM (amd64_prologue
,
1917 "movq %rsp,%rbp\n\t"
1918 "sub $0x20,%rsp\n\t"
1919 "movq %rdi,-8(%rbp)\n\t"
1920 "movq %rsi,-16(%rbp)");
1925 amd64_emit_epilogue (void)
1927 EMIT_ASM (amd64_epilogue
,
1928 "movq -16(%rbp),%rdi\n\t"
1929 "movq %rax,(%rdi)\n\t"
1936 amd64_emit_add (void)
1938 EMIT_ASM (amd64_add
,
1939 "add (%rsp),%rax\n\t"
1940 "lea 0x8(%rsp),%rsp");
1944 amd64_emit_sub (void)
1946 EMIT_ASM (amd64_sub
,
1947 "sub %rax,(%rsp)\n\t"
1952 amd64_emit_mul (void)
1958 amd64_emit_lsh (void)
1964 amd64_emit_rsh_signed (void)
1970 amd64_emit_rsh_unsigned (void)
1976 amd64_emit_ext (int arg
)
1981 EMIT_ASM (amd64_ext_8
,
1987 EMIT_ASM (amd64_ext_16
,
1992 EMIT_ASM (amd64_ext_32
,
2001 amd64_emit_log_not (void)
2003 EMIT_ASM (amd64_log_not
,
2004 "test %rax,%rax\n\t"
2010 amd64_emit_bit_and (void)
2012 EMIT_ASM (amd64_and
,
2013 "and (%rsp),%rax\n\t"
2014 "lea 0x8(%rsp),%rsp");
2018 amd64_emit_bit_or (void)
2021 "or (%rsp),%rax\n\t"
2022 "lea 0x8(%rsp),%rsp");
2026 amd64_emit_bit_xor (void)
2028 EMIT_ASM (amd64_xor
,
2029 "xor (%rsp),%rax\n\t"
2030 "lea 0x8(%rsp),%rsp");
2034 amd64_emit_bit_not (void)
2036 EMIT_ASM (amd64_bit_not
,
2037 "xorq $0xffffffffffffffff,%rax");
2041 amd64_emit_equal (void)
2043 EMIT_ASM (amd64_equal
,
2044 "cmp %rax,(%rsp)\n\t"
2045 "je .Lamd64_equal_true\n\t"
2047 "jmp .Lamd64_equal_end\n\t"
2048 ".Lamd64_equal_true:\n\t"
2050 ".Lamd64_equal_end:\n\t"
2051 "lea 0x8(%rsp),%rsp");
2055 amd64_emit_less_signed (void)
2057 EMIT_ASM (amd64_less_signed
,
2058 "cmp %rax,(%rsp)\n\t"
2059 "jl .Lamd64_less_signed_true\n\t"
2061 "jmp .Lamd64_less_signed_end\n\t"
2062 ".Lamd64_less_signed_true:\n\t"
2064 ".Lamd64_less_signed_end:\n\t"
2065 "lea 0x8(%rsp),%rsp");
2069 amd64_emit_less_unsigned (void)
2071 EMIT_ASM (amd64_less_unsigned
,
2072 "cmp %rax,(%rsp)\n\t"
2073 "jb .Lamd64_less_unsigned_true\n\t"
2075 "jmp .Lamd64_less_unsigned_end\n\t"
2076 ".Lamd64_less_unsigned_true:\n\t"
2078 ".Lamd64_less_unsigned_end:\n\t"
2079 "lea 0x8(%rsp),%rsp");
2083 amd64_emit_ref (int size
)
2088 EMIT_ASM (amd64_ref1
,
2092 EMIT_ASM (amd64_ref2
,
2096 EMIT_ASM (amd64_ref4
,
2097 "movl (%rax),%eax");
2100 EMIT_ASM (amd64_ref8
,
2101 "movq (%rax),%rax");
2107 amd64_emit_if_goto (int *offset_p
, int *size_p
)
2109 EMIT_ASM (amd64_if_goto
,
2113 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2121 amd64_emit_goto (int *offset_p
, int *size_p
)
2123 EMIT_ASM (amd64_goto
,
2124 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2132 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2134 int diff
= (to
- (from
+ size
));
2135 unsigned char buf
[sizeof (int)];
2143 memcpy (buf
, &diff
, sizeof (int));
2144 write_inferior_memory (from
, buf
, sizeof (int));
2148 amd64_emit_const (LONGEST num
)
2150 unsigned char buf
[16];
2152 CORE_ADDR buildaddr
= current_insn_ptr
;
2155 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
2156 memcpy (&buf
[i
], &num
, sizeof (num
));
2158 append_insns (&buildaddr
, i
, buf
);
2159 current_insn_ptr
= buildaddr
;
2163 amd64_emit_call (CORE_ADDR fn
)
2165 unsigned char buf
[16];
2167 CORE_ADDR buildaddr
;
2170 /* The destination function being in the shared library, may be
2171 >31-bits away off the compiled code pad. */
2173 buildaddr
= current_insn_ptr
;
2175 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
2179 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
2181 /* Offset is too large for a call. Use callq, but that requires
2182 a register, so avoid it if possible. Use r10, since it is
2183 call-clobbered, we don't have to push/pop it. */
2184 buf
[i
++] = 0x48; /* mov $fn,%r10 */
2186 memcpy (buf
+ i
, &fn
, 8);
2188 buf
[i
++] = 0xff; /* callq *%r10 */
2193 int offset32
= offset64
; /* we know we can't overflow here. */
2194 memcpy (buf
+ i
, &offset32
, 4);
2198 append_insns (&buildaddr
, i
, buf
);
2199 current_insn_ptr
= buildaddr
;
2203 amd64_emit_reg (int reg
)
2205 unsigned char buf
[16];
2207 CORE_ADDR buildaddr
;
2209 /* Assume raw_regs is still in %rdi. */
2210 buildaddr
= current_insn_ptr
;
2212 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
2213 memcpy (&buf
[i
], ®
, sizeof (reg
));
2215 append_insns (&buildaddr
, i
, buf
);
2216 current_insn_ptr
= buildaddr
;
2217 amd64_emit_call (get_raw_reg_func_addr ());
2221 amd64_emit_pop (void)
2223 EMIT_ASM (amd64_pop
,
2228 amd64_emit_stack_flush (void)
2230 EMIT_ASM (amd64_stack_flush
,
2235 amd64_emit_zero_ext (int arg
)
2240 EMIT_ASM (amd64_zero_ext_8
,
2244 EMIT_ASM (amd64_zero_ext_16
,
2245 "and $0xffff,%rax");
2248 EMIT_ASM (amd64_zero_ext_32
,
2249 "mov $0xffffffff,%rcx\n\t"
2258 amd64_emit_swap (void)
2260 EMIT_ASM (amd64_swap
,
2267 amd64_emit_stack_adjust (int n
)
2269 unsigned char buf
[16];
2271 CORE_ADDR buildaddr
= current_insn_ptr
;
2274 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2278 /* This only handles adjustments up to 16, but we don't expect any more. */
2280 append_insns (&buildaddr
, i
, buf
);
2281 current_insn_ptr
= buildaddr
;
2284 /* FN's prototype is `LONGEST(*fn)(int)'. */
2287 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2289 unsigned char buf
[16];
2291 CORE_ADDR buildaddr
;
2293 buildaddr
= current_insn_ptr
;
2295 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2296 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2298 append_insns (&buildaddr
, i
, buf
);
2299 current_insn_ptr
= buildaddr
;
2300 amd64_emit_call (fn
);
2303 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2306 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2308 unsigned char buf
[16];
2310 CORE_ADDR buildaddr
;
2312 buildaddr
= current_insn_ptr
;
2314 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2315 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2317 append_insns (&buildaddr
, i
, buf
);
2318 current_insn_ptr
= buildaddr
;
2319 EMIT_ASM (amd64_void_call_2_a
,
2320 /* Save away a copy of the stack top. */
2322 /* Also pass top as the second argument. */
2324 amd64_emit_call (fn
);
2325 EMIT_ASM (amd64_void_call_2_b
,
2326 /* Restore the stack top, %rax may have been trashed. */
2331 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2334 "cmp %rax,(%rsp)\n\t"
2335 "jne .Lamd64_eq_fallthru\n\t"
2336 "lea 0x8(%rsp),%rsp\n\t"
2338 /* jmp, but don't trust the assembler to choose the right jump */
2339 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2340 ".Lamd64_eq_fallthru:\n\t"
2341 "lea 0x8(%rsp),%rsp\n\t"
2351 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2354 "cmp %rax,(%rsp)\n\t"
2355 "je .Lamd64_ne_fallthru\n\t"
2356 "lea 0x8(%rsp),%rsp\n\t"
2358 /* jmp, but don't trust the assembler to choose the right jump */
2359 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2360 ".Lamd64_ne_fallthru:\n\t"
2361 "lea 0x8(%rsp),%rsp\n\t"
2371 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2374 "cmp %rax,(%rsp)\n\t"
2375 "jnl .Lamd64_lt_fallthru\n\t"
2376 "lea 0x8(%rsp),%rsp\n\t"
2378 /* jmp, but don't trust the assembler to choose the right jump */
2379 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2380 ".Lamd64_lt_fallthru:\n\t"
2381 "lea 0x8(%rsp),%rsp\n\t"
2391 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2394 "cmp %rax,(%rsp)\n\t"
2395 "jnle .Lamd64_le_fallthru\n\t"
2396 "lea 0x8(%rsp),%rsp\n\t"
2398 /* jmp, but don't trust the assembler to choose the right jump */
2399 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2400 ".Lamd64_le_fallthru:\n\t"
2401 "lea 0x8(%rsp),%rsp\n\t"
2411 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2414 "cmp %rax,(%rsp)\n\t"
2415 "jng .Lamd64_gt_fallthru\n\t"
2416 "lea 0x8(%rsp),%rsp\n\t"
2418 /* jmp, but don't trust the assembler to choose the right jump */
2419 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2420 ".Lamd64_gt_fallthru:\n\t"
2421 "lea 0x8(%rsp),%rsp\n\t"
2431 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2434 "cmp %rax,(%rsp)\n\t"
2435 "jnge .Lamd64_ge_fallthru\n\t"
2436 ".Lamd64_ge_jump:\n\t"
2437 "lea 0x8(%rsp),%rsp\n\t"
2439 /* jmp, but don't trust the assembler to choose the right jump */
2440 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2441 ".Lamd64_ge_fallthru:\n\t"
2442 "lea 0x8(%rsp),%rsp\n\t"
2451 struct emit_ops amd64_emit_ops
=
2453 amd64_emit_prologue
,
2454 amd64_emit_epilogue
,
2459 amd64_emit_rsh_signed
,
2460 amd64_emit_rsh_unsigned
,
2468 amd64_emit_less_signed
,
2469 amd64_emit_less_unsigned
,
2473 amd64_write_goto_address
,
2478 amd64_emit_stack_flush
,
2479 amd64_emit_zero_ext
,
2481 amd64_emit_stack_adjust
,
2482 amd64_emit_int_call_1
,
2483 amd64_emit_void_call_2
,
2492 #endif /* __x86_64__ */
2495 i386_emit_prologue (void)
2497 EMIT_ASM32 (i386_prologue
,
2501 /* At this point, the raw regs base address is at 8(%ebp), and the
2502 value pointer is at 12(%ebp). */
2506 i386_emit_epilogue (void)
2508 EMIT_ASM32 (i386_epilogue
,
2509 "mov 12(%ebp),%ecx\n\t"
2510 "mov %eax,(%ecx)\n\t"
2511 "mov %ebx,0x4(%ecx)\n\t"
2519 i386_emit_add (void)
2521 EMIT_ASM32 (i386_add
,
2522 "add (%esp),%eax\n\t"
2523 "adc 0x4(%esp),%ebx\n\t"
2524 "lea 0x8(%esp),%esp");
2528 i386_emit_sub (void)
2530 EMIT_ASM32 (i386_sub
,
2531 "subl %eax,(%esp)\n\t"
2532 "sbbl %ebx,4(%esp)\n\t"
2538 i386_emit_mul (void)
2544 i386_emit_lsh (void)
2550 i386_emit_rsh_signed (void)
2556 i386_emit_rsh_unsigned (void)
2562 i386_emit_ext (int arg
)
2567 EMIT_ASM32 (i386_ext_8
,
2570 "movl %eax,%ebx\n\t"
2574 EMIT_ASM32 (i386_ext_16
,
2576 "movl %eax,%ebx\n\t"
2580 EMIT_ASM32 (i386_ext_32
,
2581 "movl %eax,%ebx\n\t"
2590 i386_emit_log_not (void)
2592 EMIT_ASM32 (i386_log_not
,
2594 "test %eax,%eax\n\t"
2601 i386_emit_bit_and (void)
2603 EMIT_ASM32 (i386_and
,
2604 "and (%esp),%eax\n\t"
2605 "and 0x4(%esp),%ebx\n\t"
2606 "lea 0x8(%esp),%esp");
2610 i386_emit_bit_or (void)
2612 EMIT_ASM32 (i386_or
,
2613 "or (%esp),%eax\n\t"
2614 "or 0x4(%esp),%ebx\n\t"
2615 "lea 0x8(%esp),%esp");
2619 i386_emit_bit_xor (void)
2621 EMIT_ASM32 (i386_xor
,
2622 "xor (%esp),%eax\n\t"
2623 "xor 0x4(%esp),%ebx\n\t"
2624 "lea 0x8(%esp),%esp");
2628 i386_emit_bit_not (void)
2630 EMIT_ASM32 (i386_bit_not
,
2631 "xor $0xffffffff,%eax\n\t"
2632 "xor $0xffffffff,%ebx\n\t");
2636 i386_emit_equal (void)
2638 EMIT_ASM32 (i386_equal
,
2639 "cmpl %ebx,4(%esp)\n\t"
2640 "jne .Li386_equal_false\n\t"
2641 "cmpl %eax,(%esp)\n\t"
2642 "je .Li386_equal_true\n\t"
2643 ".Li386_equal_false:\n\t"
2645 "jmp .Li386_equal_end\n\t"
2646 ".Li386_equal_true:\n\t"
2648 ".Li386_equal_end:\n\t"
2650 "lea 0x8(%esp),%esp");
2654 i386_emit_less_signed (void)
2656 EMIT_ASM32 (i386_less_signed
,
2657 "cmpl %ebx,4(%esp)\n\t"
2658 "jl .Li386_less_signed_true\n\t"
2659 "jne .Li386_less_signed_false\n\t"
2660 "cmpl %eax,(%esp)\n\t"
2661 "jl .Li386_less_signed_true\n\t"
2662 ".Li386_less_signed_false:\n\t"
2664 "jmp .Li386_less_signed_end\n\t"
2665 ".Li386_less_signed_true:\n\t"
2667 ".Li386_less_signed_end:\n\t"
2669 "lea 0x8(%esp),%esp");
2673 i386_emit_less_unsigned (void)
2675 EMIT_ASM32 (i386_less_unsigned
,
2676 "cmpl %ebx,4(%esp)\n\t"
2677 "jb .Li386_less_unsigned_true\n\t"
2678 "jne .Li386_less_unsigned_false\n\t"
2679 "cmpl %eax,(%esp)\n\t"
2680 "jb .Li386_less_unsigned_true\n\t"
2681 ".Li386_less_unsigned_false:\n\t"
2683 "jmp .Li386_less_unsigned_end\n\t"
2684 ".Li386_less_unsigned_true:\n\t"
2686 ".Li386_less_unsigned_end:\n\t"
2688 "lea 0x8(%esp),%esp");
2692 i386_emit_ref (int size
)
2697 EMIT_ASM32 (i386_ref1
,
2701 EMIT_ASM32 (i386_ref2
,
2705 EMIT_ASM32 (i386_ref4
,
2706 "movl (%eax),%eax");
2709 EMIT_ASM32 (i386_ref8
,
2710 "movl 4(%eax),%ebx\n\t"
2711 "movl (%eax),%eax");
2717 i386_emit_if_goto (int *offset_p
, int *size_p
)
2719 EMIT_ASM32 (i386_if_goto
,
2725 /* Don't trust the assembler to choose the right jump */
2726 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2729 *offset_p
= 11; /* be sure that this matches the sequence above */
2735 i386_emit_goto (int *offset_p
, int *size_p
)
2737 EMIT_ASM32 (i386_goto
,
2738 /* Don't trust the assembler to choose the right jump */
2739 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2747 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2749 int diff
= (to
- (from
+ size
));
2750 unsigned char buf
[sizeof (int)];
2752 /* We're only doing 4-byte sizes at the moment. */
2759 memcpy (buf
, &diff
, sizeof (int));
2760 write_inferior_memory (from
, buf
, sizeof (int));
2764 i386_emit_const (LONGEST num
)
2766 unsigned char buf
[16];
2768 CORE_ADDR buildaddr
= current_insn_ptr
;
2771 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2772 lo
= num
& 0xffffffff;
2773 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2775 hi
= ((num
>> 32) & 0xffffffff);
2778 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2779 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2784 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2786 append_insns (&buildaddr
, i
, buf
);
2787 current_insn_ptr
= buildaddr
;
2791 i386_emit_call (CORE_ADDR fn
)
2793 unsigned char buf
[16];
2795 CORE_ADDR buildaddr
;
2797 buildaddr
= current_insn_ptr
;
2799 buf
[i
++] = 0xe8; /* call <reladdr> */
2800 offset
= ((int) fn
) - (buildaddr
+ 5);
2801 memcpy (buf
+ 1, &offset
, 4);
2802 append_insns (&buildaddr
, 5, buf
);
2803 current_insn_ptr
= buildaddr
;
2807 i386_emit_reg (int reg
)
2809 unsigned char buf
[16];
2811 CORE_ADDR buildaddr
;
2813 EMIT_ASM32 (i386_reg_a
,
2815 buildaddr
= current_insn_ptr
;
2817 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2818 memcpy (&buf
[i
], ®
, sizeof (reg
));
2820 append_insns (&buildaddr
, i
, buf
);
2821 current_insn_ptr
= buildaddr
;
2822 EMIT_ASM32 (i386_reg_b
,
2823 "mov %eax,4(%esp)\n\t"
2824 "mov 8(%ebp),%eax\n\t"
2826 i386_emit_call (get_raw_reg_func_addr ());
2827 EMIT_ASM32 (i386_reg_c
,
2829 "lea 0x8(%esp),%esp");
2833 i386_emit_pop (void)
2835 EMIT_ASM32 (i386_pop
,
2841 i386_emit_stack_flush (void)
2843 EMIT_ASM32 (i386_stack_flush
,
2849 i386_emit_zero_ext (int arg
)
2854 EMIT_ASM32 (i386_zero_ext_8
,
2855 "and $0xff,%eax\n\t"
2859 EMIT_ASM32 (i386_zero_ext_16
,
2860 "and $0xffff,%eax\n\t"
2864 EMIT_ASM32 (i386_zero_ext_32
,
2873 i386_emit_swap (void)
2875 EMIT_ASM32 (i386_swap
,
2885 i386_emit_stack_adjust (int n
)
2887 unsigned char buf
[16];
2889 CORE_ADDR buildaddr
= current_insn_ptr
;
2892 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2896 append_insns (&buildaddr
, i
, buf
);
2897 current_insn_ptr
= buildaddr
;
2900 /* FN's prototype is `LONGEST(*fn)(int)'. */
2903 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2905 unsigned char buf
[16];
2907 CORE_ADDR buildaddr
;
2909 EMIT_ASM32 (i386_int_call_1_a
,
2910 /* Reserve a bit of stack space. */
2912 /* Put the one argument on the stack. */
2913 buildaddr
= current_insn_ptr
;
2915 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2918 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2920 append_insns (&buildaddr
, i
, buf
);
2921 current_insn_ptr
= buildaddr
;
2922 i386_emit_call (fn
);
2923 EMIT_ASM32 (i386_int_call_1_c
,
2925 "lea 0x8(%esp),%esp");
2928 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2931 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2933 unsigned char buf
[16];
2935 CORE_ADDR buildaddr
;
2937 EMIT_ASM32 (i386_void_call_2_a
,
2938 /* Preserve %eax only; we don't have to worry about %ebx. */
2940 /* Reserve a bit of stack space for arguments. */
2941 "sub $0x10,%esp\n\t"
2942 /* Copy "top" to the second argument position. (Note that
2943 we can't assume function won't scribble on its
2944 arguments, so don't try to restore from this.) */
2945 "mov %eax,4(%esp)\n\t"
2946 "mov %ebx,8(%esp)");
2947 /* Put the first argument on the stack. */
2948 buildaddr
= current_insn_ptr
;
2950 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2953 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2955 append_insns (&buildaddr
, i
, buf
);
2956 current_insn_ptr
= buildaddr
;
2957 i386_emit_call (fn
);
2958 EMIT_ASM32 (i386_void_call_2_b
,
2959 "lea 0x10(%esp),%esp\n\t"
2960 /* Restore original stack top. */
2966 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2969 /* Check low half first, more likely to be decider */
2970 "cmpl %eax,(%esp)\n\t"
2971 "jne .Leq_fallthru\n\t"
2972 "cmpl %ebx,4(%esp)\n\t"
2973 "jne .Leq_fallthru\n\t"
2974 "lea 0x8(%esp),%esp\n\t"
2977 /* jmp, but don't trust the assembler to choose the right jump */
2978 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2979 ".Leq_fallthru:\n\t"
2980 "lea 0x8(%esp),%esp\n\t"
2991 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2994 /* Check low half first, more likely to be decider */
2995 "cmpl %eax,(%esp)\n\t"
2997 "cmpl %ebx,4(%esp)\n\t"
2998 "je .Lne_fallthru\n\t"
3000 "lea 0x8(%esp),%esp\n\t"
3003 /* jmp, but don't trust the assembler to choose the right jump */
3004 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3005 ".Lne_fallthru:\n\t"
3006 "lea 0x8(%esp),%esp\n\t"
3017 i386_emit_lt_goto (int *offset_p
, int *size_p
)
3020 "cmpl %ebx,4(%esp)\n\t"
3022 "jne .Llt_fallthru\n\t"
3023 "cmpl %eax,(%esp)\n\t"
3024 "jnl .Llt_fallthru\n\t"
3026 "lea 0x8(%esp),%esp\n\t"
3029 /* jmp, but don't trust the assembler to choose the right jump */
3030 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3031 ".Llt_fallthru:\n\t"
3032 "lea 0x8(%esp),%esp\n\t"
3043 i386_emit_le_goto (int *offset_p
, int *size_p
)
3046 "cmpl %ebx,4(%esp)\n\t"
3048 "jne .Lle_fallthru\n\t"
3049 "cmpl %eax,(%esp)\n\t"
3050 "jnle .Lle_fallthru\n\t"
3052 "lea 0x8(%esp),%esp\n\t"
3055 /* jmp, but don't trust the assembler to choose the right jump */
3056 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3057 ".Lle_fallthru:\n\t"
3058 "lea 0x8(%esp),%esp\n\t"
3069 i386_emit_gt_goto (int *offset_p
, int *size_p
)
3072 "cmpl %ebx,4(%esp)\n\t"
3074 "jne .Lgt_fallthru\n\t"
3075 "cmpl %eax,(%esp)\n\t"
3076 "jng .Lgt_fallthru\n\t"
3078 "lea 0x8(%esp),%esp\n\t"
3081 /* jmp, but don't trust the assembler to choose the right jump */
3082 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3083 ".Lgt_fallthru:\n\t"
3084 "lea 0x8(%esp),%esp\n\t"
3095 i386_emit_ge_goto (int *offset_p
, int *size_p
)
3098 "cmpl %ebx,4(%esp)\n\t"
3100 "jne .Lge_fallthru\n\t"
3101 "cmpl %eax,(%esp)\n\t"
3102 "jnge .Lge_fallthru\n\t"
3104 "lea 0x8(%esp),%esp\n\t"
3107 /* jmp, but don't trust the assembler to choose the right jump */
3108 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3109 ".Lge_fallthru:\n\t"
3110 "lea 0x8(%esp),%esp\n\t"
3120 struct emit_ops i386_emit_ops
=
3128 i386_emit_rsh_signed
,
3129 i386_emit_rsh_unsigned
,
3137 i386_emit_less_signed
,
3138 i386_emit_less_unsigned
,
3142 i386_write_goto_address
,
3147 i386_emit_stack_flush
,
3150 i386_emit_stack_adjust
,
3151 i386_emit_int_call_1
,
3152 i386_emit_void_call_2
,
3162 static struct emit_ops
*
3166 int use_64bit
= register_size (0) == 8;
3169 return &amd64_emit_ops
;
3172 return &i386_emit_ops
;
3175 /* This is initialized assuming an amd64 target.
3176 x86_arch_setup will correct it for i386 or amd64 targets. */
3178 struct linux_target_ops the_low_target
=
3186 NULL
, /* fetch_register */
3196 x86_stopped_by_watchpoint
,
3197 x86_stopped_data_address
,
3198 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3199 native i386 case (no registers smaller than an xfer unit), and are not
3200 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3203 /* need to fix up i386 siginfo if host is amd64 */
3205 x86_linux_new_process
,
3206 x86_linux_new_thread
,
3207 x86_linux_prepare_to_resume
,
3208 x86_linux_process_qsupported
,
3209 x86_supports_tracepoints
,
3210 x86_get_thread_area
,
3211 x86_install_fast_tracepoint_jump_pad
,
3213 x86_get_min_fast_tracepoint_insn_len
,