1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002, 2004-2012 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "linux-low.h"
28 #include "i386-xstate.h"
29 #include "elf/common.h"
31 #include "gdb_proc_service.h"
34 /* Defined in auto-generated file i386-linux.c. */
35 void init_registers_i386_linux (void);
36 /* Defined in auto-generated file amd64-linux.c. */
37 void init_registers_amd64_linux (void);
38 /* Defined in auto-generated file i386-avx-linux.c. */
39 void init_registers_i386_avx_linux (void);
40 /* Defined in auto-generated file amd64-avx-linux.c. */
41 void init_registers_amd64_avx_linux (void);
42 /* Defined in auto-generated file i386-mmx-linux.c. */
43 void init_registers_i386_mmx_linux (void);
45 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
46 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
48 /* Backward compatibility for gdb without XML support. */
50 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
51 <architecture>i386</architecture>\
52 <osabi>GNU/Linux</osabi>\
56 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
57 <architecture>i386:x86-64</architecture>\
58 <osabi>GNU/Linux</osabi>\
63 #include <sys/procfs.h>
64 #include <sys/ptrace.h>
67 #ifndef PTRACE_GETREGSET
68 #define PTRACE_GETREGSET 0x4204
71 #ifndef PTRACE_SETREGSET
72 #define PTRACE_SETREGSET 0x4205
76 #ifndef PTRACE_GET_THREAD_AREA
77 #define PTRACE_GET_THREAD_AREA 25
80 /* This definition comes from prctl.h, but some kernels may not have it. */
81 #ifndef PTRACE_ARCH_PRCTL
82 #define PTRACE_ARCH_PRCTL 30
85 /* The following definitions come from prctl.h, but may be absent
86 for certain configurations. */
88 #define ARCH_SET_GS 0x1001
89 #define ARCH_SET_FS 0x1002
90 #define ARCH_GET_FS 0x1003
91 #define ARCH_GET_GS 0x1004
94 /* Per-process arch-specific data we want to keep. */
96 struct arch_process_info
98 struct i386_debug_reg_state debug_reg_state
;
101 /* Per-thread arch-specific data we want to keep. */
105 /* Non-zero if our copy differs from what's recorded in the thread. */
106 int debug_registers_changed
;
111 /* Mapping between the general-purpose registers in `struct user'
112 format and GDB's register array layout.
113 Note that the transfer layout uses 64-bit regs. */
114 static /*const*/ int i386_regmap
[] =
116 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
117 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
118 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
119 DS
* 8, ES
* 8, FS
* 8, GS
* 8
122 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
124 /* So code below doesn't have to care, i386 or amd64. */
125 #define ORIG_EAX ORIG_RAX
127 static const int x86_64_regmap
[] =
129 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
130 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
131 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
132 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
133 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
134 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
135 -1, -1, -1, -1, -1, -1, -1, -1,
136 -1, -1, -1, -1, -1, -1, -1, -1,
137 -1, -1, -1, -1, -1, -1, -1, -1,
138 -1, -1, -1, -1, -1, -1, -1, -1, -1,
142 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
144 #else /* ! __x86_64__ */
146 /* Mapping between the general-purpose registers in `struct user'
147 format and GDB's register array layout. */
148 static /*const*/ int i386_regmap
[] =
150 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
151 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
152 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
153 DS
* 4, ES
* 4, FS
* 4, GS
* 4
156 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
160 /* Called by libthread_db. */
163 ps_get_thread_area (const struct ps_prochandle
*ph
,
164 lwpid_t lwpid
, int idx
, void **base
)
167 int use_64bit
= register_size (0) == 8;
174 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
178 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
189 unsigned int desc
[4];
191 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
192 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
195 *(int *)base
= desc
[1];
200 /* Get the thread area address. This is used to recognize which
201 thread is which when tracing with the in-process agent library. We
202 don't read anything from the address, and treat it as opaque; it's
203 the address itself that we assume is unique per-thread. */
206 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
209 int use_64bit
= register_size (0) == 8;
214 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
216 *addr
= (CORE_ADDR
) (uintptr_t) base
;
225 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
226 struct regcache
*regcache
= get_thread_regcache (get_lwp_thread (lwp
), 1);
227 unsigned int desc
[4];
229 const int reg_thread_area
= 3; /* bits to scale down register value. */
232 collect_register_by_name (regcache
, "gs", &gs
);
234 idx
= gs
>> reg_thread_area
;
236 if (ptrace (PTRACE_GET_THREAD_AREA
,
238 (void *) (long) idx
, (unsigned long) &desc
) < 0)
249 i386_cannot_store_register (int regno
)
251 return regno
>= I386_NUM_REGS
;
255 i386_cannot_fetch_register (int regno
)
257 return regno
>= I386_NUM_REGS
;
261 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
266 if (register_size (0) == 8)
268 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
269 if (x86_64_regmap
[i
] != -1)
270 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
275 for (i
= 0; i
< I386_NUM_REGS
; i
++)
276 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
278 collect_register_by_name (regcache
, "orig_eax",
279 ((char *) buf
) + ORIG_EAX
* 4);
283 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
288 if (register_size (0) == 8)
290 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
291 if (x86_64_regmap
[i
] != -1)
292 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
297 for (i
= 0; i
< I386_NUM_REGS
; i
++)
298 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
300 supply_register_by_name (regcache
, "orig_eax",
301 ((char *) buf
) + ORIG_EAX
* 4);
305 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
308 i387_cache_to_fxsave (regcache
, buf
);
310 i387_cache_to_fsave (regcache
, buf
);
315 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
318 i387_fxsave_to_cache (regcache
, buf
);
320 i387_fsave_to_cache (regcache
, buf
);
327 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
329 i387_cache_to_fxsave (regcache
, buf
);
333 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
335 i387_fxsave_to_cache (regcache
, buf
);
341 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
343 i387_cache_to_xsave (regcache
, buf
);
347 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
349 i387_xsave_to_cache (regcache
, buf
);
352 /* ??? The non-biarch i386 case stores all the i387 regs twice.
353 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
354 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
355 doesn't work. IWBN to avoid the duplication in the case where it
356 does work. Maybe the arch_setup routine could check whether it works
357 and update target_regsets accordingly, maybe by moving target_regsets
358 to linux_target_ops and set the right one there, rather than having to
359 modify the target_regsets global. */
361 struct regset_info target_regsets
[] =
363 #ifdef HAVE_PTRACE_GETREGS
364 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
366 x86_fill_gregset
, x86_store_gregset
},
367 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
368 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
370 # ifdef HAVE_PTRACE_GETFPXREGS
371 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
373 x86_fill_fpxregset
, x86_store_fpxregset
},
376 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
378 x86_fill_fpregset
, x86_store_fpregset
},
379 #endif /* HAVE_PTRACE_GETREGS */
380 { 0, 0, 0, -1, -1, NULL
, NULL
}
384 x86_get_pc (struct regcache
*regcache
)
386 int use_64bit
= register_size (0) == 8;
391 collect_register_by_name (regcache
, "rip", &pc
);
392 return (CORE_ADDR
) pc
;
397 collect_register_by_name (regcache
, "eip", &pc
);
398 return (CORE_ADDR
) pc
;
403 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
405 int use_64bit
= register_size (0) == 8;
409 unsigned long newpc
= pc
;
410 supply_register_by_name (regcache
, "rip", &newpc
);
414 unsigned int newpc
= pc
;
415 supply_register_by_name (regcache
, "eip", &newpc
);
419 static const unsigned char x86_breakpoint
[] = { 0xCC };
420 #define x86_breakpoint_len 1
423 x86_breakpoint_at (CORE_ADDR pc
)
427 (*the_target
->read_memory
) (pc
, &c
, 1);
434 /* Support for debug registers. */
437 x86_linux_dr_get (ptid_t ptid
, int regnum
)
442 tid
= ptid_get_lwp (ptid
);
445 value
= ptrace (PTRACE_PEEKUSER
, tid
,
446 offsetof (struct user
, u_debugreg
[regnum
]), 0);
448 error ("Couldn't read debug register");
454 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
458 tid
= ptid_get_lwp (ptid
);
461 ptrace (PTRACE_POKEUSER
, tid
,
462 offsetof (struct user
, u_debugreg
[regnum
]), value
);
464 error ("Couldn't write debug register");
468 update_debug_registers_callback (struct inferior_list_entry
*entry
,
471 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
472 int pid
= *(int *) pid_p
;
474 /* Only update the threads of this process. */
475 if (pid_of (lwp
) == pid
)
477 /* The actual update is done later just before resuming the lwp,
478 we just mark that the registers need updating. */
479 lwp
->arch_private
->debug_registers_changed
= 1;
481 /* If the lwp isn't stopped, force it to momentarily pause, so
482 we can update its debug registers. */
484 linux_stop_lwp (lwp
);
490 /* Update the inferior's debug register REGNUM from STATE. */
493 i386_dr_low_set_addr (const struct i386_debug_reg_state
*state
, int regnum
)
495 /* Only update the threads of this process. */
496 int pid
= pid_of (get_thread_lwp (current_inferior
));
498 if (! (regnum
>= 0 && regnum
<= DR_LASTADDR
- DR_FIRSTADDR
))
499 fatal ("Invalid debug register %d", regnum
);
501 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
504 /* Return the inferior's debug register REGNUM. */
507 i386_dr_low_get_addr (int regnum
)
509 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
510 ptid_t ptid
= ptid_of (lwp
);
512 /* DR6 and DR7 are retrieved with some other way. */
513 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
515 return x86_linux_dr_get (ptid
, regnum
);
518 /* Update the inferior's DR7 debug control register from STATE. */
521 i386_dr_low_set_control (const struct i386_debug_reg_state
*state
)
523 /* Only update the threads of this process. */
524 int pid
= pid_of (get_thread_lwp (current_inferior
));
526 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
529 /* Return the inferior's DR7 debug control register. */
532 i386_dr_low_get_control (void)
534 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
535 ptid_t ptid
= ptid_of (lwp
);
537 return x86_linux_dr_get (ptid
, DR_CONTROL
);
540 /* Get the value of the DR6 debug status register from the inferior
541 and record it in STATE. */
544 i386_dr_low_get_status (void)
546 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
547 ptid_t ptid
= ptid_of (lwp
);
549 return x86_linux_dr_get (ptid
, DR_STATUS
);
552 /* Breakpoint/Watchpoint support. */
555 x86_insert_point (char type
, CORE_ADDR addr
, int len
)
557 struct process_info
*proc
= current_process ();
564 ret
= prepare_to_access_memory ();
567 ret
= set_gdb_breakpoint_at (addr
);
568 done_accessing_memory ();
574 return i386_low_insert_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
583 x86_remove_point (char type
, CORE_ADDR addr
, int len
)
585 struct process_info
*proc
= current_process ();
592 ret
= prepare_to_access_memory ();
595 ret
= delete_gdb_breakpoint_at (addr
);
596 done_accessing_memory ();
602 return i386_low_remove_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
611 x86_stopped_by_watchpoint (void)
613 struct process_info
*proc
= current_process ();
614 return i386_low_stopped_by_watchpoint (&proc
->private->arch_private
->debug_reg_state
);
618 x86_stopped_data_address (void)
620 struct process_info
*proc
= current_process ();
622 if (i386_low_stopped_data_address (&proc
->private->arch_private
->debug_reg_state
,
628 /* Called when a new process is created. */
630 static struct arch_process_info
*
631 x86_linux_new_process (void)
633 struct arch_process_info
*info
= xcalloc (1, sizeof (*info
));
635 i386_low_init_dregs (&info
->debug_reg_state
);
640 /* Called when a new thread is detected. */
642 static struct arch_lwp_info
*
643 x86_linux_new_thread (void)
645 struct arch_lwp_info
*info
= xcalloc (1, sizeof (*info
));
647 info
->debug_registers_changed
= 1;
652 /* Called when resuming a thread.
653 If the debug regs have changed, update the thread's copies. */
656 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
658 ptid_t ptid
= ptid_of (lwp
);
659 int clear_status
= 0;
661 if (lwp
->arch_private
->debug_registers_changed
)
664 int pid
= ptid_get_pid (ptid
);
665 struct process_info
*proc
= find_process_pid (pid
);
666 struct i386_debug_reg_state
*state
667 = &proc
->private->arch_private
->debug_reg_state
;
669 for (i
= DR_FIRSTADDR
; i
<= DR_LASTADDR
; i
++)
670 if (state
->dr_ref_count
[i
] > 0)
672 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
674 /* If we're setting a watchpoint, any change the inferior
675 had done itself to the debug registers needs to be
676 discarded, otherwise, i386_low_stopped_data_address can
681 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
683 lwp
->arch_private
->debug_registers_changed
= 0;
686 if (clear_status
|| lwp
->stopped_by_watchpoint
)
687 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
690 /* When GDBSERVER is built as a 64-bit application on linux, the
691 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
692 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
693 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
694 conversion in-place ourselves. */
696 /* These types below (compat_*) define a siginfo type that is layout
697 compatible with the siginfo type exported by the 32-bit userspace
702 typedef int compat_int_t
;
703 typedef unsigned int compat_uptr_t
;
705 typedef int compat_time_t
;
706 typedef int compat_timer_t
;
707 typedef int compat_clock_t
;
709 struct compat_timeval
711 compat_time_t tv_sec
;
715 typedef union compat_sigval
717 compat_int_t sival_int
;
718 compat_uptr_t sival_ptr
;
721 typedef struct compat_siginfo
729 int _pad
[((128 / sizeof (int)) - 3)];
738 /* POSIX.1b timers */
743 compat_sigval_t _sigval
;
746 /* POSIX.1b signals */
751 compat_sigval_t _sigval
;
760 compat_clock_t _utime
;
761 compat_clock_t _stime
;
764 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
779 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
780 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t
;
782 typedef struct compat_x32_siginfo
790 int _pad
[((128 / sizeof (int)) - 3)];
799 /* POSIX.1b timers */
804 compat_sigval_t _sigval
;
807 /* POSIX.1b signals */
812 compat_sigval_t _sigval
;
821 compat_x32_clock_t _utime
;
822 compat_x32_clock_t _stime
;
825 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
838 } compat_x32_siginfo_t
__attribute__ ((__aligned__ (8)));
840 #define cpt_si_pid _sifields._kill._pid
841 #define cpt_si_uid _sifields._kill._uid
842 #define cpt_si_timerid _sifields._timer._tid
843 #define cpt_si_overrun _sifields._timer._overrun
844 #define cpt_si_status _sifields._sigchld._status
845 #define cpt_si_utime _sifields._sigchld._utime
846 #define cpt_si_stime _sifields._sigchld._stime
847 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
848 #define cpt_si_addr _sifields._sigfault._addr
849 #define cpt_si_band _sifields._sigpoll._band
850 #define cpt_si_fd _sifields._sigpoll._fd
852 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
853 In their place is si_timer1,si_timer2. */
855 #define si_timerid si_timer1
858 #define si_overrun si_timer2
862 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
864 memset (to
, 0, sizeof (*to
));
866 to
->si_signo
= from
->si_signo
;
867 to
->si_errno
= from
->si_errno
;
868 to
->si_code
= from
->si_code
;
870 if (to
->si_code
== SI_TIMER
)
872 to
->cpt_si_timerid
= from
->si_timerid
;
873 to
->cpt_si_overrun
= from
->si_overrun
;
874 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
876 else if (to
->si_code
== SI_USER
)
878 to
->cpt_si_pid
= from
->si_pid
;
879 to
->cpt_si_uid
= from
->si_uid
;
881 else if (to
->si_code
< 0)
883 to
->cpt_si_pid
= from
->si_pid
;
884 to
->cpt_si_uid
= from
->si_uid
;
885 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
889 switch (to
->si_signo
)
892 to
->cpt_si_pid
= from
->si_pid
;
893 to
->cpt_si_uid
= from
->si_uid
;
894 to
->cpt_si_status
= from
->si_status
;
895 to
->cpt_si_utime
= from
->si_utime
;
896 to
->cpt_si_stime
= from
->si_stime
;
902 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
905 to
->cpt_si_band
= from
->si_band
;
906 to
->cpt_si_fd
= from
->si_fd
;
909 to
->cpt_si_pid
= from
->si_pid
;
910 to
->cpt_si_uid
= from
->si_uid
;
911 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
918 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
920 memset (to
, 0, sizeof (*to
));
922 to
->si_signo
= from
->si_signo
;
923 to
->si_errno
= from
->si_errno
;
924 to
->si_code
= from
->si_code
;
926 if (to
->si_code
== SI_TIMER
)
928 to
->si_timerid
= from
->cpt_si_timerid
;
929 to
->si_overrun
= from
->cpt_si_overrun
;
930 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
932 else if (to
->si_code
== SI_USER
)
934 to
->si_pid
= from
->cpt_si_pid
;
935 to
->si_uid
= from
->cpt_si_uid
;
937 else if (to
->si_code
< 0)
939 to
->si_pid
= from
->cpt_si_pid
;
940 to
->si_uid
= from
->cpt_si_uid
;
941 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
945 switch (to
->si_signo
)
948 to
->si_pid
= from
->cpt_si_pid
;
949 to
->si_uid
= from
->cpt_si_uid
;
950 to
->si_status
= from
->cpt_si_status
;
951 to
->si_utime
= from
->cpt_si_utime
;
952 to
->si_stime
= from
->cpt_si_stime
;
958 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
961 to
->si_band
= from
->cpt_si_band
;
962 to
->si_fd
= from
->cpt_si_fd
;
965 to
->si_pid
= from
->cpt_si_pid
;
966 to
->si_uid
= from
->cpt_si_uid
;
967 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
974 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t
*to
,
977 memset (to
, 0, sizeof (*to
));
979 to
->si_signo
= from
->si_signo
;
980 to
->si_errno
= from
->si_errno
;
981 to
->si_code
= from
->si_code
;
983 if (to
->si_code
== SI_TIMER
)
985 to
->cpt_si_timerid
= from
->si_timerid
;
986 to
->cpt_si_overrun
= from
->si_overrun
;
987 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
989 else if (to
->si_code
== SI_USER
)
991 to
->cpt_si_pid
= from
->si_pid
;
992 to
->cpt_si_uid
= from
->si_uid
;
994 else if (to
->si_code
< 0)
996 to
->cpt_si_pid
= from
->si_pid
;
997 to
->cpt_si_uid
= from
->si_uid
;
998 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1002 switch (to
->si_signo
)
1005 to
->cpt_si_pid
= from
->si_pid
;
1006 to
->cpt_si_uid
= from
->si_uid
;
1007 to
->cpt_si_status
= from
->si_status
;
1008 to
->cpt_si_utime
= from
->si_utime
;
1009 to
->cpt_si_stime
= from
->si_stime
;
1015 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1018 to
->cpt_si_band
= from
->si_band
;
1019 to
->cpt_si_fd
= from
->si_fd
;
1022 to
->cpt_si_pid
= from
->si_pid
;
1023 to
->cpt_si_uid
= from
->si_uid
;
1024 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1031 siginfo_from_compat_x32_siginfo (siginfo_t
*to
,
1032 compat_x32_siginfo_t
*from
)
1034 memset (to
, 0, sizeof (*to
));
1036 to
->si_signo
= from
->si_signo
;
1037 to
->si_errno
= from
->si_errno
;
1038 to
->si_code
= from
->si_code
;
1040 if (to
->si_code
== SI_TIMER
)
1042 to
->si_timerid
= from
->cpt_si_timerid
;
1043 to
->si_overrun
= from
->cpt_si_overrun
;
1044 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1046 else if (to
->si_code
== SI_USER
)
1048 to
->si_pid
= from
->cpt_si_pid
;
1049 to
->si_uid
= from
->cpt_si_uid
;
1051 else if (to
->si_code
< 0)
1053 to
->si_pid
= from
->cpt_si_pid
;
1054 to
->si_uid
= from
->cpt_si_uid
;
1055 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1059 switch (to
->si_signo
)
1062 to
->si_pid
= from
->cpt_si_pid
;
1063 to
->si_uid
= from
->cpt_si_uid
;
1064 to
->si_status
= from
->cpt_si_status
;
1065 to
->si_utime
= from
->cpt_si_utime
;
1066 to
->si_stime
= from
->cpt_si_stime
;
1072 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1075 to
->si_band
= from
->cpt_si_band
;
1076 to
->si_fd
= from
->cpt_si_fd
;
1079 to
->si_pid
= from
->cpt_si_pid
;
1080 to
->si_uid
= from
->cpt_si_uid
;
1081 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1087 /* Is this process 64-bit? */
1088 static int linux_is_elf64
;
1089 #endif /* __x86_64__ */
1091 /* Convert a native/host siginfo object, into/from the siginfo in the
1092 layout of the inferiors' architecture. Returns true if any
1093 conversion was done; false otherwise. If DIRECTION is 1, then copy
1094 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1098 x86_siginfo_fixup (siginfo_t
*native
, void *inf
, int direction
)
1101 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1102 if (register_size (0) == 4)
1104 if (sizeof (siginfo_t
) != sizeof (compat_siginfo_t
))
1105 fatal ("unexpected difference in siginfo");
1108 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
1110 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
1114 /* No fixup for native x32 GDB. */
1115 else if (!linux_is_elf64
&& sizeof (void *) == 8)
1117 if (sizeof (siginfo_t
) != sizeof (compat_x32_siginfo_t
))
1118 fatal ("unexpected difference in siginfo");
1121 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo
*) inf
,
1124 siginfo_from_compat_x32_siginfo (native
,
1125 (struct compat_x32_siginfo
*) inf
);
1136 /* Update gdbserver_xmltarget. */
1139 x86_linux_update_xmltarget (void)
1142 struct regset_info
*regset
;
1143 static unsigned long long xcr0
;
1144 static int have_ptrace_getregset
= -1;
1145 #if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
1146 static int have_ptrace_getfpxregs
= -1;
1149 if (!current_inferior
)
1152 /* Before changing the register cache internal layout or the target
1153 regsets, flush the contents of the current valid caches back to
1155 regcache_invalidate ();
1157 pid
= pid_of (get_thread_lwp (current_inferior
));
1159 if (num_xmm_registers
== 8)
1160 init_registers_i386_linux ();
1162 init_registers_amd64_linux ();
1165 # ifdef HAVE_PTRACE_GETFPXREGS
1166 if (have_ptrace_getfpxregs
== -1)
1168 elf_fpxregset_t fpxregs
;
1170 if (ptrace (PTRACE_GETFPXREGS
, pid
, 0, (int) &fpxregs
) < 0)
1172 have_ptrace_getfpxregs
= 0;
1173 x86_xcr0
= I386_XSTATE_X87_MASK
;
1175 /* Disable PTRACE_GETFPXREGS. */
1176 for (regset
= target_regsets
;
1177 regset
->fill_function
!= NULL
; regset
++)
1178 if (regset
->get_request
== PTRACE_GETFPXREGS
)
1185 have_ptrace_getfpxregs
= 1;
1188 if (!have_ptrace_getfpxregs
)
1190 init_registers_i386_mmx_linux ();
1194 init_registers_i386_linux ();
1200 /* Don't use XML. */
1202 if (num_xmm_registers
== 8)
1203 gdbserver_xmltarget
= xmltarget_i386_linux_no_xml
;
1205 gdbserver_xmltarget
= xmltarget_amd64_linux_no_xml
;
1207 gdbserver_xmltarget
= xmltarget_i386_linux_no_xml
;
1210 x86_xcr0
= I386_XSTATE_SSE_MASK
;
1215 /* Check if XSAVE extended state is supported. */
1216 if (have_ptrace_getregset
== -1)
1218 unsigned long long xstateregs
[I386_XSTATE_SSE_SIZE
/ sizeof (long long)];
1221 iov
.iov_base
= xstateregs
;
1222 iov
.iov_len
= sizeof (xstateregs
);
1224 /* Check if PTRACE_GETREGSET works. */
1225 if (ptrace (PTRACE_GETREGSET
, pid
, (unsigned int) NT_X86_XSTATE
,
1228 have_ptrace_getregset
= 0;
1232 have_ptrace_getregset
= 1;
1234 /* Get XCR0 from XSAVE extended state at byte 464. */
1235 xcr0
= xstateregs
[464 / sizeof (long long)];
1237 /* Use PTRACE_GETREGSET if it is available. */
1238 for (regset
= target_regsets
;
1239 regset
->fill_function
!= NULL
; regset
++)
1240 if (regset
->get_request
== PTRACE_GETREGSET
)
1241 regset
->size
= I386_XSTATE_SIZE (xcr0
);
1242 else if (regset
->type
!= GENERAL_REGS
)
1246 if (have_ptrace_getregset
)
1248 /* AVX is the highest feature we support. */
1249 if ((xcr0
& I386_XSTATE_AVX_MASK
) == I386_XSTATE_AVX_MASK
)
1254 /* I386 has 8 xmm regs. */
1255 if (num_xmm_registers
== 8)
1256 init_registers_i386_avx_linux ();
1258 init_registers_amd64_avx_linux ();
1260 init_registers_i386_avx_linux ();
1266 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1267 PTRACE_GETREGSET. */
1270 x86_linux_process_qsupported (const char *query
)
1272 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1273 with "i386" in qSupported query, it supports x86 XML target
1276 if (query
!= NULL
&& strncmp (query
, "xmlRegisters=", 13) == 0)
1278 char *copy
= xstrdup (query
+ 13);
1281 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1283 if (strcmp (p
, "i386") == 0)
1293 x86_linux_update_xmltarget ();
1296 /* Initialize gdbserver for the architecture of the inferior. */
1299 x86_arch_setup (void)
1301 int pid
= pid_of (get_thread_lwp (current_inferior
));
1302 unsigned int machine
;
1303 int is_elf64
= linux_pid_exe_is_elf_64_file (pid
, &machine
);
1305 if (sizeof (void *) == 4)
1308 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1310 else if (machine
== EM_X86_64
)
1311 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1318 /* This can only happen if /proc/<pid>/exe is unreadable,
1319 but "that can't happen" if we've gotten this far.
1320 Fall through and assume this is a 32-bit program. */
1322 else if (machine
== EM_X86_64
)
1324 /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
1325 the_low_target
.num_regs
= -1;
1326 the_low_target
.regmap
= NULL
;
1327 the_low_target
.cannot_fetch_register
= NULL
;
1328 the_low_target
.cannot_store_register
= NULL
;
1330 /* Amd64 has 16 xmm regs. */
1331 num_xmm_registers
= 16;
1333 linux_is_elf64
= is_elf64
;
1334 x86_linux_update_xmltarget ();
1341 /* Ok we have a 32-bit inferior. */
1343 the_low_target
.num_regs
= I386_NUM_REGS
;
1344 the_low_target
.regmap
= i386_regmap
;
1345 the_low_target
.cannot_fetch_register
= i386_cannot_fetch_register
;
1346 the_low_target
.cannot_store_register
= i386_cannot_store_register
;
1348 /* I386 has 8 xmm regs. */
1349 num_xmm_registers
= 8;
1351 x86_linux_update_xmltarget ();
1355 x86_supports_tracepoints (void)
1361 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1363 write_inferior_memory (*to
, buf
, len
);
1368 push_opcode (unsigned char *buf
, char *op
)
1370 unsigned char *buf_org
= buf
;
1375 unsigned long ul
= strtoul (op
, &endptr
, 16);
1384 return buf
- buf_org
;
1389 /* Build a jump pad that saves registers and calls a collection
1390 function. Writes a jump instruction to the jump pad to
1391 JJUMPAD_INSN. The caller is responsible to write it in at the
1392 tracepoint address. */
1395 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1396 CORE_ADDR collector
,
1399 CORE_ADDR
*jump_entry
,
1400 CORE_ADDR
*trampoline
,
1401 ULONGEST
*trampoline_size
,
1402 unsigned char *jjump_pad_insn
,
1403 ULONGEST
*jjump_pad_insn_size
,
1404 CORE_ADDR
*adjusted_insn_addr
,
1405 CORE_ADDR
*adjusted_insn_addr_end
,
1408 unsigned char buf
[40];
1412 CORE_ADDR buildaddr
= *jump_entry
;
1414 /* Build the jump pad. */
1416 /* First, do tracepoint data collection. Save registers. */
1418 /* Need to ensure stack pointer saved first. */
1419 buf
[i
++] = 0x54; /* push %rsp */
1420 buf
[i
++] = 0x55; /* push %rbp */
1421 buf
[i
++] = 0x57; /* push %rdi */
1422 buf
[i
++] = 0x56; /* push %rsi */
1423 buf
[i
++] = 0x52; /* push %rdx */
1424 buf
[i
++] = 0x51; /* push %rcx */
1425 buf
[i
++] = 0x53; /* push %rbx */
1426 buf
[i
++] = 0x50; /* push %rax */
1427 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1428 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1429 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1430 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1431 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1432 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1433 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1434 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1435 buf
[i
++] = 0x9c; /* pushfq */
1436 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1438 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1439 i
+= sizeof (unsigned long);
1440 buf
[i
++] = 0x57; /* push %rdi */
1441 append_insns (&buildaddr
, i
, buf
);
1443 /* Stack space for the collecting_t object. */
1445 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1446 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1447 memcpy (buf
+ i
, &tpoint
, 8);
1449 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1450 i
+= push_opcode (&buf
[i
],
1451 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1452 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1453 append_insns (&buildaddr
, i
, buf
);
1457 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1458 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1460 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1461 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1462 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1463 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1464 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1465 append_insns (&buildaddr
, i
, buf
);
1467 /* Set up the gdb_collect call. */
1468 /* At this point, (stack pointer + 0x18) is the base of our saved
1472 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1473 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1475 /* tpoint address may be 64-bit wide. */
1476 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1477 memcpy (buf
+ i
, &tpoint
, 8);
1479 append_insns (&buildaddr
, i
, buf
);
1481 /* The collector function being in the shared library, may be
1482 >31-bits away off the jump pad. */
1484 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1485 memcpy (buf
+ i
, &collector
, 8);
1487 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1488 append_insns (&buildaddr
, i
, buf
);
1490 /* Clear the spin-lock. */
1492 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1493 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1494 memcpy (buf
+ i
, &lockaddr
, 8);
1496 append_insns (&buildaddr
, i
, buf
);
1498 /* Remove stack that had been used for the collect_t object. */
1500 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1501 append_insns (&buildaddr
, i
, buf
);
1503 /* Restore register state. */
1505 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1509 buf
[i
++] = 0x9d; /* popfq */
1510 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1511 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1512 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1513 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1514 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1515 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1516 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1517 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1518 buf
[i
++] = 0x58; /* pop %rax */
1519 buf
[i
++] = 0x5b; /* pop %rbx */
1520 buf
[i
++] = 0x59; /* pop %rcx */
1521 buf
[i
++] = 0x5a; /* pop %rdx */
1522 buf
[i
++] = 0x5e; /* pop %rsi */
1523 buf
[i
++] = 0x5f; /* pop %rdi */
1524 buf
[i
++] = 0x5d; /* pop %rbp */
1525 buf
[i
++] = 0x5c; /* pop %rsp */
1526 append_insns (&buildaddr
, i
, buf
);
1528 /* Now, adjust the original instruction to execute in the jump
1530 *adjusted_insn_addr
= buildaddr
;
1531 relocate_instruction (&buildaddr
, tpaddr
);
1532 *adjusted_insn_addr_end
= buildaddr
;
1534 /* Finally, write a jump back to the program. */
1536 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1537 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1540 "E.Jump back from jump pad too far from tracepoint "
1541 "(offset 0x%" PRIx64
" > int32).", loffset
);
1545 offset
= (int) loffset
;
1546 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1547 memcpy (buf
+ 1, &offset
, 4);
1548 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1550 /* The jump pad is now built. Wire in a jump to our jump pad. This
1551 is always done last (by our caller actually), so that we can
1552 install fast tracepoints with threads running. This relies on
1553 the agent's atomic write support. */
1554 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1555 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1558 "E.Jump pad too far from tracepoint "
1559 "(offset 0x%" PRIx64
" > int32).", loffset
);
1563 offset
= (int) loffset
;
1565 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1566 memcpy (buf
+ 1, &offset
, 4);
1567 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1568 *jjump_pad_insn_size
= sizeof (jump_insn
);
1570 /* Return the end address of our pad. */
1571 *jump_entry
= buildaddr
;
1576 #endif /* __x86_64__ */
1578 /* Build a jump pad that saves registers and calls a collection
1579 function. Writes a jump instruction to the jump pad to
1580 JJUMPAD_INSN. The caller is responsible to write it in at the
1581 tracepoint address. */
1584 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1585 CORE_ADDR collector
,
1588 CORE_ADDR
*jump_entry
,
1589 CORE_ADDR
*trampoline
,
1590 ULONGEST
*trampoline_size
,
1591 unsigned char *jjump_pad_insn
,
1592 ULONGEST
*jjump_pad_insn_size
,
1593 CORE_ADDR
*adjusted_insn_addr
,
1594 CORE_ADDR
*adjusted_insn_addr_end
,
1597 unsigned char buf
[0x100];
1599 CORE_ADDR buildaddr
= *jump_entry
;
1601 /* Build the jump pad. */
1603 /* First, do tracepoint data collection. Save registers. */
1605 buf
[i
++] = 0x60; /* pushad */
1606 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1607 *((int *)(buf
+ i
)) = (int) tpaddr
;
1609 buf
[i
++] = 0x9c; /* pushf */
1610 buf
[i
++] = 0x1e; /* push %ds */
1611 buf
[i
++] = 0x06; /* push %es */
1612 buf
[i
++] = 0x0f; /* push %fs */
1614 buf
[i
++] = 0x0f; /* push %gs */
1616 buf
[i
++] = 0x16; /* push %ss */
1617 buf
[i
++] = 0x0e; /* push %cs */
1618 append_insns (&buildaddr
, i
, buf
);
1620 /* Stack space for the collecting_t object. */
1622 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1624 /* Build the object. */
1625 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1626 memcpy (buf
+ i
, &tpoint
, 4);
1628 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1630 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1631 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1632 append_insns (&buildaddr
, i
, buf
);
1634 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1635 If we cared for it, this could be using xchg alternatively. */
1638 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1639 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1641 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1643 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1644 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1645 append_insns (&buildaddr
, i
, buf
);
1648 /* Set up arguments to the gdb_collect call. */
1650 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1651 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1652 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1653 append_insns (&buildaddr
, i
, buf
);
1656 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1657 append_insns (&buildaddr
, i
, buf
);
1660 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1661 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1663 append_insns (&buildaddr
, i
, buf
);
1665 buf
[0] = 0xe8; /* call <reladdr> */
1666 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1667 memcpy (buf
+ 1, &offset
, 4);
1668 append_insns (&buildaddr
, 5, buf
);
1669 /* Clean up after the call. */
1670 buf
[0] = 0x83; /* add $0x8,%esp */
1673 append_insns (&buildaddr
, 3, buf
);
1676 /* Clear the spin-lock. This would need the LOCK prefix on older
1679 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1680 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1681 memcpy (buf
+ i
, &lockaddr
, 4);
1683 append_insns (&buildaddr
, i
, buf
);
1686 /* Remove stack that had been used for the collect_t object. */
1688 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1689 append_insns (&buildaddr
, i
, buf
);
1692 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1695 buf
[i
++] = 0x17; /* pop %ss */
1696 buf
[i
++] = 0x0f; /* pop %gs */
1698 buf
[i
++] = 0x0f; /* pop %fs */
1700 buf
[i
++] = 0x07; /* pop %es */
1701 buf
[i
++] = 0x1f; /* pop %ds */
1702 buf
[i
++] = 0x9d; /* popf */
1703 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1706 buf
[i
++] = 0x61; /* popad */
1707 append_insns (&buildaddr
, i
, buf
);
1709 /* Now, adjust the original instruction to execute in the jump
1711 *adjusted_insn_addr
= buildaddr
;
1712 relocate_instruction (&buildaddr
, tpaddr
);
1713 *adjusted_insn_addr_end
= buildaddr
;
1715 /* Write the jump back to the program. */
1716 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1717 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1718 memcpy (buf
+ 1, &offset
, 4);
1719 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1721 /* The jump pad is now built. Wire in a jump to our jump pad. This
1722 is always done last (by our caller actually), so that we can
1723 install fast tracepoints with threads running. This relies on
1724 the agent's atomic write support. */
1727 /* Create a trampoline. */
1728 *trampoline_size
= sizeof (jump_insn
);
1729 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1731 /* No trampoline space available. */
1733 "E.Cannot allocate trampoline space needed for fast "
1734 "tracepoints on 4-byte instructions.");
1738 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1739 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1740 memcpy (buf
+ 1, &offset
, 4);
1741 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1743 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1744 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1745 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1746 memcpy (buf
+ 2, &offset
, 2);
1747 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1748 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1752 /* Else use a 32-bit relative jump instruction. */
1753 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1754 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1755 memcpy (buf
+ 1, &offset
, 4);
1756 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1757 *jjump_pad_insn_size
= sizeof (jump_insn
);
1760 /* Return the end address of our pad. */
1761 *jump_entry
= buildaddr
;
1767 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1768 CORE_ADDR collector
,
1771 CORE_ADDR
*jump_entry
,
1772 CORE_ADDR
*trampoline
,
1773 ULONGEST
*trampoline_size
,
1774 unsigned char *jjump_pad_insn
,
1775 ULONGEST
*jjump_pad_insn_size
,
1776 CORE_ADDR
*adjusted_insn_addr
,
1777 CORE_ADDR
*adjusted_insn_addr_end
,
1781 if (register_size (0) == 8)
1782 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1783 collector
, lockaddr
,
1784 orig_size
, jump_entry
,
1785 trampoline
, trampoline_size
,
1787 jjump_pad_insn_size
,
1789 adjusted_insn_addr_end
,
1793 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1794 collector
, lockaddr
,
1795 orig_size
, jump_entry
,
1796 trampoline
, trampoline_size
,
1798 jjump_pad_insn_size
,
1800 adjusted_insn_addr_end
,
1804 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1808 x86_get_min_fast_tracepoint_insn_len (void)
1810 static int warned_about_fast_tracepoints
= 0;
1813 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1814 used for fast tracepoints. */
1815 if (register_size (0) == 8)
1819 if (agent_loaded_p ())
1821 char errbuf
[IPA_BUFSIZ
];
1825 /* On x86, if trampolines are available, then 4-byte jump instructions
1826 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1827 with a 4-byte offset are used instead. */
1828 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1832 /* GDB has no channel to explain to user why a shorter fast
1833 tracepoint is not possible, but at least make GDBserver
1834 mention that something has gone awry. */
1835 if (!warned_about_fast_tracepoints
)
1837 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
1838 warned_about_fast_tracepoints
= 1;
1845 /* Indicate that the minimum length is currently unknown since the IPA
1846 has not loaded yet. */
1852 add_insns (unsigned char *start
, int len
)
1854 CORE_ADDR buildaddr
= current_insn_ptr
;
1857 fprintf (stderr
, "Adding %d bytes of insn at %s\n",
1858 len
, paddress (buildaddr
));
1860 append_insns (&buildaddr
, len
, start
);
1861 current_insn_ptr
= buildaddr
;
1864 /* Our general strategy for emitting code is to avoid specifying raw
1865 bytes whenever possible, and instead copy a block of inline asm
1866 that is embedded in the function. This is a little messy, because
1867 we need to keep the compiler from discarding what looks like dead
1868 code, plus suppress various warnings. */
1870 #define EMIT_ASM(NAME, INSNS) \
1873 extern unsigned char start_ ## NAME, end_ ## NAME; \
1874 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1875 __asm__ ("jmp end_" #NAME "\n" \
1876 "\t" "start_" #NAME ":" \
1878 "\t" "end_" #NAME ":"); \
1883 #define EMIT_ASM32(NAME,INSNS) \
1886 extern unsigned char start_ ## NAME, end_ ## NAME; \
1887 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1888 __asm__ (".code32\n" \
1889 "\t" "jmp end_" #NAME "\n" \
1890 "\t" "start_" #NAME ":\n" \
1892 "\t" "end_" #NAME ":\n" \
1898 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1905 amd64_emit_prologue (void)
1907 EMIT_ASM (amd64_prologue
,
1909 "movq %rsp,%rbp\n\t"
1910 "sub $0x20,%rsp\n\t"
1911 "movq %rdi,-8(%rbp)\n\t"
1912 "movq %rsi,-16(%rbp)");
1917 amd64_emit_epilogue (void)
1919 EMIT_ASM (amd64_epilogue
,
1920 "movq -16(%rbp),%rdi\n\t"
1921 "movq %rax,(%rdi)\n\t"
1928 amd64_emit_add (void)
1930 EMIT_ASM (amd64_add
,
1931 "add (%rsp),%rax\n\t"
1932 "lea 0x8(%rsp),%rsp");
1936 amd64_emit_sub (void)
1938 EMIT_ASM (amd64_sub
,
1939 "sub %rax,(%rsp)\n\t"
1944 amd64_emit_mul (void)
1950 amd64_emit_lsh (void)
1956 amd64_emit_rsh_signed (void)
1962 amd64_emit_rsh_unsigned (void)
1968 amd64_emit_ext (int arg
)
1973 EMIT_ASM (amd64_ext_8
,
1979 EMIT_ASM (amd64_ext_16
,
1984 EMIT_ASM (amd64_ext_32
,
1993 amd64_emit_log_not (void)
1995 EMIT_ASM (amd64_log_not
,
1996 "test %rax,%rax\n\t"
2002 amd64_emit_bit_and (void)
2004 EMIT_ASM (amd64_and
,
2005 "and (%rsp),%rax\n\t"
2006 "lea 0x8(%rsp),%rsp");
2010 amd64_emit_bit_or (void)
2013 "or (%rsp),%rax\n\t"
2014 "lea 0x8(%rsp),%rsp");
2018 amd64_emit_bit_xor (void)
2020 EMIT_ASM (amd64_xor
,
2021 "xor (%rsp),%rax\n\t"
2022 "lea 0x8(%rsp),%rsp");
2026 amd64_emit_bit_not (void)
2028 EMIT_ASM (amd64_bit_not
,
2029 "xorq $0xffffffffffffffff,%rax");
2033 amd64_emit_equal (void)
2035 EMIT_ASM (amd64_equal
,
2036 "cmp %rax,(%rsp)\n\t"
2037 "je .Lamd64_equal_true\n\t"
2039 "jmp .Lamd64_equal_end\n\t"
2040 ".Lamd64_equal_true:\n\t"
2042 ".Lamd64_equal_end:\n\t"
2043 "lea 0x8(%rsp),%rsp");
2047 amd64_emit_less_signed (void)
2049 EMIT_ASM (amd64_less_signed
,
2050 "cmp %rax,(%rsp)\n\t"
2051 "jl .Lamd64_less_signed_true\n\t"
2053 "jmp .Lamd64_less_signed_end\n\t"
2054 ".Lamd64_less_signed_true:\n\t"
2056 ".Lamd64_less_signed_end:\n\t"
2057 "lea 0x8(%rsp),%rsp");
2061 amd64_emit_less_unsigned (void)
2063 EMIT_ASM (amd64_less_unsigned
,
2064 "cmp %rax,(%rsp)\n\t"
2065 "jb .Lamd64_less_unsigned_true\n\t"
2067 "jmp .Lamd64_less_unsigned_end\n\t"
2068 ".Lamd64_less_unsigned_true:\n\t"
2070 ".Lamd64_less_unsigned_end:\n\t"
2071 "lea 0x8(%rsp),%rsp");
2075 amd64_emit_ref (int size
)
2080 EMIT_ASM (amd64_ref1
,
2084 EMIT_ASM (amd64_ref2
,
2088 EMIT_ASM (amd64_ref4
,
2089 "movl (%rax),%eax");
2092 EMIT_ASM (amd64_ref8
,
2093 "movq (%rax),%rax");
2099 amd64_emit_if_goto (int *offset_p
, int *size_p
)
2101 EMIT_ASM (amd64_if_goto
,
2105 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2113 amd64_emit_goto (int *offset_p
, int *size_p
)
2115 EMIT_ASM (amd64_goto
,
2116 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2124 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2126 int diff
= (to
- (from
+ size
));
2127 unsigned char buf
[sizeof (int)];
2135 memcpy (buf
, &diff
, sizeof (int));
2136 write_inferior_memory (from
, buf
, sizeof (int));
2140 amd64_emit_const (LONGEST num
)
2142 unsigned char buf
[16];
2144 CORE_ADDR buildaddr
= current_insn_ptr
;
2147 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
2148 memcpy (&buf
[i
], &num
, sizeof (num
));
2150 append_insns (&buildaddr
, i
, buf
);
2151 current_insn_ptr
= buildaddr
;
2155 amd64_emit_call (CORE_ADDR fn
)
2157 unsigned char buf
[16];
2159 CORE_ADDR buildaddr
;
2162 /* The destination function being in the shared library, may be
2163 >31-bits away off the compiled code pad. */
2165 buildaddr
= current_insn_ptr
;
2167 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
2171 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
2173 /* Offset is too large for a call. Use callq, but that requires
2174 a register, so avoid it if possible. Use r10, since it is
2175 call-clobbered, we don't have to push/pop it. */
2176 buf
[i
++] = 0x48; /* mov $fn,%r10 */
2178 memcpy (buf
+ i
, &fn
, 8);
2180 buf
[i
++] = 0xff; /* callq *%r10 */
2185 int offset32
= offset64
; /* we know we can't overflow here. */
2186 memcpy (buf
+ i
, &offset32
, 4);
2190 append_insns (&buildaddr
, i
, buf
);
2191 current_insn_ptr
= buildaddr
;
2195 amd64_emit_reg (int reg
)
2197 unsigned char buf
[16];
2199 CORE_ADDR buildaddr
;
2201 /* Assume raw_regs is still in %rdi. */
2202 buildaddr
= current_insn_ptr
;
2204 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
2205 memcpy (&buf
[i
], ®
, sizeof (reg
));
2207 append_insns (&buildaddr
, i
, buf
);
2208 current_insn_ptr
= buildaddr
;
2209 amd64_emit_call (get_raw_reg_func_addr ());
2213 amd64_emit_pop (void)
2215 EMIT_ASM (amd64_pop
,
2220 amd64_emit_stack_flush (void)
2222 EMIT_ASM (amd64_stack_flush
,
2227 amd64_emit_zero_ext (int arg
)
2232 EMIT_ASM (amd64_zero_ext_8
,
2236 EMIT_ASM (amd64_zero_ext_16
,
2237 "and $0xffff,%rax");
2240 EMIT_ASM (amd64_zero_ext_32
,
2241 "mov $0xffffffff,%rcx\n\t"
2250 amd64_emit_swap (void)
2252 EMIT_ASM (amd64_swap
,
2259 amd64_emit_stack_adjust (int n
)
2261 unsigned char buf
[16];
2263 CORE_ADDR buildaddr
= current_insn_ptr
;
2266 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2270 /* This only handles adjustments up to 16, but we don't expect any more. */
2272 append_insns (&buildaddr
, i
, buf
);
2273 current_insn_ptr
= buildaddr
;
2276 /* FN's prototype is `LONGEST(*fn)(int)'. */
2279 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2281 unsigned char buf
[16];
2283 CORE_ADDR buildaddr
;
2285 buildaddr
= current_insn_ptr
;
2287 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2288 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2290 append_insns (&buildaddr
, i
, buf
);
2291 current_insn_ptr
= buildaddr
;
2292 amd64_emit_call (fn
);
2295 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2298 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2300 unsigned char buf
[16];
2302 CORE_ADDR buildaddr
;
2304 buildaddr
= current_insn_ptr
;
2306 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2307 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2309 append_insns (&buildaddr
, i
, buf
);
2310 current_insn_ptr
= buildaddr
;
2311 EMIT_ASM (amd64_void_call_2_a
,
2312 /* Save away a copy of the stack top. */
2314 /* Also pass top as the second argument. */
2316 amd64_emit_call (fn
);
2317 EMIT_ASM (amd64_void_call_2_b
,
2318 /* Restore the stack top, %rax may have been trashed. */
2323 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2326 "cmp %rax,(%rsp)\n\t"
2327 "jne .Lamd64_eq_fallthru\n\t"
2328 "lea 0x8(%rsp),%rsp\n\t"
2330 /* jmp, but don't trust the assembler to choose the right jump */
2331 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2332 ".Lamd64_eq_fallthru:\n\t"
2333 "lea 0x8(%rsp),%rsp\n\t"
2343 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2346 "cmp %rax,(%rsp)\n\t"
2347 "je .Lamd64_ne_fallthru\n\t"
2348 "lea 0x8(%rsp),%rsp\n\t"
2350 /* jmp, but don't trust the assembler to choose the right jump */
2351 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2352 ".Lamd64_ne_fallthru:\n\t"
2353 "lea 0x8(%rsp),%rsp\n\t"
2363 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2366 "cmp %rax,(%rsp)\n\t"
2367 "jnl .Lamd64_lt_fallthru\n\t"
2368 "lea 0x8(%rsp),%rsp\n\t"
2370 /* jmp, but don't trust the assembler to choose the right jump */
2371 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2372 ".Lamd64_lt_fallthru:\n\t"
2373 "lea 0x8(%rsp),%rsp\n\t"
2383 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2386 "cmp %rax,(%rsp)\n\t"
2387 "jnle .Lamd64_le_fallthru\n\t"
2388 "lea 0x8(%rsp),%rsp\n\t"
2390 /* jmp, but don't trust the assembler to choose the right jump */
2391 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2392 ".Lamd64_le_fallthru:\n\t"
2393 "lea 0x8(%rsp),%rsp\n\t"
2403 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2406 "cmp %rax,(%rsp)\n\t"
2407 "jng .Lamd64_gt_fallthru\n\t"
2408 "lea 0x8(%rsp),%rsp\n\t"
2410 /* jmp, but don't trust the assembler to choose the right jump */
2411 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2412 ".Lamd64_gt_fallthru:\n\t"
2413 "lea 0x8(%rsp),%rsp\n\t"
2423 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2426 "cmp %rax,(%rsp)\n\t"
2427 "jnge .Lamd64_ge_fallthru\n\t"
2428 ".Lamd64_ge_jump:\n\t"
2429 "lea 0x8(%rsp),%rsp\n\t"
2431 /* jmp, but don't trust the assembler to choose the right jump */
2432 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2433 ".Lamd64_ge_fallthru:\n\t"
2434 "lea 0x8(%rsp),%rsp\n\t"
2443 struct emit_ops amd64_emit_ops
=
2445 amd64_emit_prologue
,
2446 amd64_emit_epilogue
,
2451 amd64_emit_rsh_signed
,
2452 amd64_emit_rsh_unsigned
,
2460 amd64_emit_less_signed
,
2461 amd64_emit_less_unsigned
,
2465 amd64_write_goto_address
,
2470 amd64_emit_stack_flush
,
2471 amd64_emit_zero_ext
,
2473 amd64_emit_stack_adjust
,
2474 amd64_emit_int_call_1
,
2475 amd64_emit_void_call_2
,
2484 #endif /* __x86_64__ */
2487 i386_emit_prologue (void)
2489 EMIT_ASM32 (i386_prologue
,
2493 /* At this point, the raw regs base address is at 8(%ebp), and the
2494 value pointer is at 12(%ebp). */
2498 i386_emit_epilogue (void)
2500 EMIT_ASM32 (i386_epilogue
,
2501 "mov 12(%ebp),%ecx\n\t"
2502 "mov %eax,(%ecx)\n\t"
2503 "mov %ebx,0x4(%ecx)\n\t"
2511 i386_emit_add (void)
2513 EMIT_ASM32 (i386_add
,
2514 "add (%esp),%eax\n\t"
2515 "adc 0x4(%esp),%ebx\n\t"
2516 "lea 0x8(%esp),%esp");
2520 i386_emit_sub (void)
2522 EMIT_ASM32 (i386_sub
,
2523 "subl %eax,(%esp)\n\t"
2524 "sbbl %ebx,4(%esp)\n\t"
2530 i386_emit_mul (void)
2536 i386_emit_lsh (void)
2542 i386_emit_rsh_signed (void)
2548 i386_emit_rsh_unsigned (void)
2554 i386_emit_ext (int arg
)
2559 EMIT_ASM32 (i386_ext_8
,
2562 "movl %eax,%ebx\n\t"
2566 EMIT_ASM32 (i386_ext_16
,
2568 "movl %eax,%ebx\n\t"
2572 EMIT_ASM32 (i386_ext_32
,
2573 "movl %eax,%ebx\n\t"
2582 i386_emit_log_not (void)
2584 EMIT_ASM32 (i386_log_not
,
2586 "test %eax,%eax\n\t"
2593 i386_emit_bit_and (void)
2595 EMIT_ASM32 (i386_and
,
2596 "and (%esp),%eax\n\t"
2597 "and 0x4(%esp),%ebx\n\t"
2598 "lea 0x8(%esp),%esp");
2602 i386_emit_bit_or (void)
2604 EMIT_ASM32 (i386_or
,
2605 "or (%esp),%eax\n\t"
2606 "or 0x4(%esp),%ebx\n\t"
2607 "lea 0x8(%esp),%esp");
2611 i386_emit_bit_xor (void)
2613 EMIT_ASM32 (i386_xor
,
2614 "xor (%esp),%eax\n\t"
2615 "xor 0x4(%esp),%ebx\n\t"
2616 "lea 0x8(%esp),%esp");
2620 i386_emit_bit_not (void)
2622 EMIT_ASM32 (i386_bit_not
,
2623 "xor $0xffffffff,%eax\n\t"
2624 "xor $0xffffffff,%ebx\n\t");
2628 i386_emit_equal (void)
2630 EMIT_ASM32 (i386_equal
,
2631 "cmpl %ebx,4(%esp)\n\t"
2632 "jne .Li386_equal_false\n\t"
2633 "cmpl %eax,(%esp)\n\t"
2634 "je .Li386_equal_true\n\t"
2635 ".Li386_equal_false:\n\t"
2637 "jmp .Li386_equal_end\n\t"
2638 ".Li386_equal_true:\n\t"
2640 ".Li386_equal_end:\n\t"
2642 "lea 0x8(%esp),%esp");
2646 i386_emit_less_signed (void)
2648 EMIT_ASM32 (i386_less_signed
,
2649 "cmpl %ebx,4(%esp)\n\t"
2650 "jl .Li386_less_signed_true\n\t"
2651 "jne .Li386_less_signed_false\n\t"
2652 "cmpl %eax,(%esp)\n\t"
2653 "jl .Li386_less_signed_true\n\t"
2654 ".Li386_less_signed_false:\n\t"
2656 "jmp .Li386_less_signed_end\n\t"
2657 ".Li386_less_signed_true:\n\t"
2659 ".Li386_less_signed_end:\n\t"
2661 "lea 0x8(%esp),%esp");
2665 i386_emit_less_unsigned (void)
2667 EMIT_ASM32 (i386_less_unsigned
,
2668 "cmpl %ebx,4(%esp)\n\t"
2669 "jb .Li386_less_unsigned_true\n\t"
2670 "jne .Li386_less_unsigned_false\n\t"
2671 "cmpl %eax,(%esp)\n\t"
2672 "jb .Li386_less_unsigned_true\n\t"
2673 ".Li386_less_unsigned_false:\n\t"
2675 "jmp .Li386_less_unsigned_end\n\t"
2676 ".Li386_less_unsigned_true:\n\t"
2678 ".Li386_less_unsigned_end:\n\t"
2680 "lea 0x8(%esp),%esp");
2684 i386_emit_ref (int size
)
2689 EMIT_ASM32 (i386_ref1
,
2693 EMIT_ASM32 (i386_ref2
,
2697 EMIT_ASM32 (i386_ref4
,
2698 "movl (%eax),%eax");
2701 EMIT_ASM32 (i386_ref8
,
2702 "movl 4(%eax),%ebx\n\t"
2703 "movl (%eax),%eax");
2709 i386_emit_if_goto (int *offset_p
, int *size_p
)
2711 EMIT_ASM32 (i386_if_goto
,
2717 /* Don't trust the assembler to choose the right jump */
2718 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2721 *offset_p
= 11; /* be sure that this matches the sequence above */
2727 i386_emit_goto (int *offset_p
, int *size_p
)
2729 EMIT_ASM32 (i386_goto
,
2730 /* Don't trust the assembler to choose the right jump */
2731 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2739 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2741 int diff
= (to
- (from
+ size
));
2742 unsigned char buf
[sizeof (int)];
2744 /* We're only doing 4-byte sizes at the moment. */
2751 memcpy (buf
, &diff
, sizeof (int));
2752 write_inferior_memory (from
, buf
, sizeof (int));
2756 i386_emit_const (LONGEST num
)
2758 unsigned char buf
[16];
2760 CORE_ADDR buildaddr
= current_insn_ptr
;
2763 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2764 lo
= num
& 0xffffffff;
2765 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2767 hi
= ((num
>> 32) & 0xffffffff);
2770 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2771 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2776 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2778 append_insns (&buildaddr
, i
, buf
);
2779 current_insn_ptr
= buildaddr
;
2783 i386_emit_call (CORE_ADDR fn
)
2785 unsigned char buf
[16];
2787 CORE_ADDR buildaddr
;
2789 buildaddr
= current_insn_ptr
;
2791 buf
[i
++] = 0xe8; /* call <reladdr> */
2792 offset
= ((int) fn
) - (buildaddr
+ 5);
2793 memcpy (buf
+ 1, &offset
, 4);
2794 append_insns (&buildaddr
, 5, buf
);
2795 current_insn_ptr
= buildaddr
;
2799 i386_emit_reg (int reg
)
2801 unsigned char buf
[16];
2803 CORE_ADDR buildaddr
;
2805 EMIT_ASM32 (i386_reg_a
,
2807 buildaddr
= current_insn_ptr
;
2809 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2810 memcpy (&buf
[i
], ®
, sizeof (reg
));
2812 append_insns (&buildaddr
, i
, buf
);
2813 current_insn_ptr
= buildaddr
;
2814 EMIT_ASM32 (i386_reg_b
,
2815 "mov %eax,4(%esp)\n\t"
2816 "mov 8(%ebp),%eax\n\t"
2818 i386_emit_call (get_raw_reg_func_addr ());
2819 EMIT_ASM32 (i386_reg_c
,
2821 "lea 0x8(%esp),%esp");
2825 i386_emit_pop (void)
2827 EMIT_ASM32 (i386_pop
,
2833 i386_emit_stack_flush (void)
2835 EMIT_ASM32 (i386_stack_flush
,
2841 i386_emit_zero_ext (int arg
)
2846 EMIT_ASM32 (i386_zero_ext_8
,
2847 "and $0xff,%eax\n\t"
2851 EMIT_ASM32 (i386_zero_ext_16
,
2852 "and $0xffff,%eax\n\t"
2856 EMIT_ASM32 (i386_zero_ext_32
,
2865 i386_emit_swap (void)
2867 EMIT_ASM32 (i386_swap
,
2877 i386_emit_stack_adjust (int n
)
2879 unsigned char buf
[16];
2881 CORE_ADDR buildaddr
= current_insn_ptr
;
2884 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2888 append_insns (&buildaddr
, i
, buf
);
2889 current_insn_ptr
= buildaddr
;
2892 /* FN's prototype is `LONGEST(*fn)(int)'. */
2895 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2897 unsigned char buf
[16];
2899 CORE_ADDR buildaddr
;
2901 EMIT_ASM32 (i386_int_call_1_a
,
2902 /* Reserve a bit of stack space. */
2904 /* Put the one argument on the stack. */
2905 buildaddr
= current_insn_ptr
;
2907 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2910 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2912 append_insns (&buildaddr
, i
, buf
);
2913 current_insn_ptr
= buildaddr
;
2914 i386_emit_call (fn
);
2915 EMIT_ASM32 (i386_int_call_1_c
,
2917 "lea 0x8(%esp),%esp");
2920 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2923 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2925 unsigned char buf
[16];
2927 CORE_ADDR buildaddr
;
2929 EMIT_ASM32 (i386_void_call_2_a
,
2930 /* Preserve %eax only; we don't have to worry about %ebx. */
2932 /* Reserve a bit of stack space for arguments. */
2933 "sub $0x10,%esp\n\t"
2934 /* Copy "top" to the second argument position. (Note that
2935 we can't assume function won't scribble on its
2936 arguments, so don't try to restore from this.) */
2937 "mov %eax,4(%esp)\n\t"
2938 "mov %ebx,8(%esp)");
2939 /* Put the first argument on the stack. */
2940 buildaddr
= current_insn_ptr
;
2942 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2945 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2947 append_insns (&buildaddr
, i
, buf
);
2948 current_insn_ptr
= buildaddr
;
2949 i386_emit_call (fn
);
2950 EMIT_ASM32 (i386_void_call_2_b
,
2951 "lea 0x10(%esp),%esp\n\t"
2952 /* Restore original stack top. */
2958 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2961 /* Check low half first, more likely to be decider */
2962 "cmpl %eax,(%esp)\n\t"
2963 "jne .Leq_fallthru\n\t"
2964 "cmpl %ebx,4(%esp)\n\t"
2965 "jne .Leq_fallthru\n\t"
2966 "lea 0x8(%esp),%esp\n\t"
2969 /* jmp, but don't trust the assembler to choose the right jump */
2970 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2971 ".Leq_fallthru:\n\t"
2972 "lea 0x8(%esp),%esp\n\t"
2983 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2986 /* Check low half first, more likely to be decider */
2987 "cmpl %eax,(%esp)\n\t"
2989 "cmpl %ebx,4(%esp)\n\t"
2990 "je .Lne_fallthru\n\t"
2992 "lea 0x8(%esp),%esp\n\t"
2995 /* jmp, but don't trust the assembler to choose the right jump */
2996 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2997 ".Lne_fallthru:\n\t"
2998 "lea 0x8(%esp),%esp\n\t"
3009 i386_emit_lt_goto (int *offset_p
, int *size_p
)
3012 "cmpl %ebx,4(%esp)\n\t"
3014 "jne .Llt_fallthru\n\t"
3015 "cmpl %eax,(%esp)\n\t"
3016 "jnl .Llt_fallthru\n\t"
3018 "lea 0x8(%esp),%esp\n\t"
3021 /* jmp, but don't trust the assembler to choose the right jump */
3022 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3023 ".Llt_fallthru:\n\t"
3024 "lea 0x8(%esp),%esp\n\t"
3035 i386_emit_le_goto (int *offset_p
, int *size_p
)
3038 "cmpl %ebx,4(%esp)\n\t"
3040 "jne .Lle_fallthru\n\t"
3041 "cmpl %eax,(%esp)\n\t"
3042 "jnle .Lle_fallthru\n\t"
3044 "lea 0x8(%esp),%esp\n\t"
3047 /* jmp, but don't trust the assembler to choose the right jump */
3048 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3049 ".Lle_fallthru:\n\t"
3050 "lea 0x8(%esp),%esp\n\t"
3061 i386_emit_gt_goto (int *offset_p
, int *size_p
)
3064 "cmpl %ebx,4(%esp)\n\t"
3066 "jne .Lgt_fallthru\n\t"
3067 "cmpl %eax,(%esp)\n\t"
3068 "jng .Lgt_fallthru\n\t"
3070 "lea 0x8(%esp),%esp\n\t"
3073 /* jmp, but don't trust the assembler to choose the right jump */
3074 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3075 ".Lgt_fallthru:\n\t"
3076 "lea 0x8(%esp),%esp\n\t"
3087 i386_emit_ge_goto (int *offset_p
, int *size_p
)
3090 "cmpl %ebx,4(%esp)\n\t"
3092 "jne .Lge_fallthru\n\t"
3093 "cmpl %eax,(%esp)\n\t"
3094 "jnge .Lge_fallthru\n\t"
3096 "lea 0x8(%esp),%esp\n\t"
3099 /* jmp, but don't trust the assembler to choose the right jump */
3100 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3101 ".Lge_fallthru:\n\t"
3102 "lea 0x8(%esp),%esp\n\t"
3112 struct emit_ops i386_emit_ops
=
3120 i386_emit_rsh_signed
,
3121 i386_emit_rsh_unsigned
,
3129 i386_emit_less_signed
,
3130 i386_emit_less_unsigned
,
3134 i386_write_goto_address
,
3139 i386_emit_stack_flush
,
3142 i386_emit_stack_adjust
,
3143 i386_emit_int_call_1
,
3144 i386_emit_void_call_2
,
3154 static struct emit_ops
*
3158 int use_64bit
= register_size (0) == 8;
3161 return &amd64_emit_ops
;
3164 return &i386_emit_ops
;
3167 /* This is initialized assuming an amd64 target.
3168 x86_arch_setup will correct it for i386 or amd64 targets. */
3170 struct linux_target_ops the_low_target
=
3178 NULL
, /* fetch_register */
3188 x86_stopped_by_watchpoint
,
3189 x86_stopped_data_address
,
3190 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3191 native i386 case (no registers smaller than an xfer unit), and are not
3192 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3195 /* need to fix up i386 siginfo if host is amd64 */
3197 x86_linux_new_process
,
3198 x86_linux_new_thread
,
3199 x86_linux_prepare_to_resume
,
3200 x86_linux_process_qsupported
,
3201 x86_supports_tracepoints
,
3202 x86_get_thread_area
,
3203 x86_install_fast_tracepoint_jump_pad
,
3205 x86_get_min_fast_tracepoint_insn_len
,