1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2013 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "linux-low.h"
28 #include "i386-xstate.h"
29 #include "elf/common.h"
31 #include "gdb_proc_service.h"
34 #include "tracepoint.h"
37 /* Defined in auto-generated file amd64-linux.c. */
38 void init_registers_amd64_linux (void);
39 extern const struct target_desc
*tdesc_amd64_linux
;
41 /* Defined in auto-generated file amd64-avx-linux.c. */
42 void init_registers_amd64_avx_linux (void);
43 extern const struct target_desc
*tdesc_amd64_avx_linux
;
45 /* Defined in auto-generated file x32-linux.c. */
46 void init_registers_x32_linux (void);
47 extern const struct target_desc
*tdesc_x32_linux
;
49 /* Defined in auto-generated file x32-avx-linux.c. */
50 void init_registers_x32_avx_linux (void);
51 extern const struct target_desc
*tdesc_x32_avx_linux
;
54 /* Defined in auto-generated file i386-linux.c. */
55 void init_registers_i386_linux (void);
56 extern const struct target_desc
*tdesc_i386_linux
;
58 /* Defined in auto-generated file i386-mmx-linux.c. */
59 void init_registers_i386_mmx_linux (void);
60 extern const struct target_desc
*tdesc_i386_mmx_linux
;
62 /* Defined in auto-generated file i386-avx-linux.c. */
63 void init_registers_i386_avx_linux (void);
64 extern const struct target_desc
*tdesc_i386_avx_linux
;
67 static struct target_desc
*tdesc_amd64_linux_no_xml
;
69 static struct target_desc
*tdesc_i386_linux_no_xml
;
72 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
73 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
75 /* Backward compatibility for gdb without XML support. */
77 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
78 <architecture>i386</architecture>\
79 <osabi>GNU/Linux</osabi>\
83 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
84 <architecture>i386:x86-64</architecture>\
85 <osabi>GNU/Linux</osabi>\
90 #include <sys/procfs.h>
91 #include <sys/ptrace.h>
94 #ifndef PTRACE_GETREGSET
95 #define PTRACE_GETREGSET 0x4204
98 #ifndef PTRACE_SETREGSET
99 #define PTRACE_SETREGSET 0x4205
103 #ifndef PTRACE_GET_THREAD_AREA
104 #define PTRACE_GET_THREAD_AREA 25
107 /* This definition comes from prctl.h, but some kernels may not have it. */
108 #ifndef PTRACE_ARCH_PRCTL
109 #define PTRACE_ARCH_PRCTL 30
112 /* The following definitions come from prctl.h, but may be absent
113 for certain configurations. */
115 #define ARCH_SET_GS 0x1001
116 #define ARCH_SET_FS 0x1002
117 #define ARCH_GET_FS 0x1003
118 #define ARCH_GET_GS 0x1004
121 /* Per-process arch-specific data we want to keep. */
123 struct arch_process_info
125 struct i386_debug_reg_state debug_reg_state
;
128 /* Per-thread arch-specific data we want to keep. */
132 /* Non-zero if our copy differs from what's recorded in the thread. */
133 int debug_registers_changed
;
138 /* Mapping between the general-purpose registers in `struct user'
139 format and GDB's register array layout.
140 Note that the transfer layout uses 64-bit regs. */
141 static /*const*/ int i386_regmap
[] =
143 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
144 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
145 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
146 DS
* 8, ES
* 8, FS
* 8, GS
* 8
149 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
151 /* So code below doesn't have to care, i386 or amd64. */
152 #define ORIG_EAX ORIG_RAX
154 static const int x86_64_regmap
[] =
156 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
157 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
158 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
159 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
160 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
161 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
162 -1, -1, -1, -1, -1, -1, -1, -1,
163 -1, -1, -1, -1, -1, -1, -1, -1,
164 -1, -1, -1, -1, -1, -1, -1, -1,
165 -1, -1, -1, -1, -1, -1, -1, -1, -1,
169 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
171 #else /* ! __x86_64__ */
173 /* Mapping between the general-purpose registers in `struct user'
174 format and GDB's register array layout. */
175 static /*const*/ int i386_regmap
[] =
177 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
178 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
179 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
180 DS
* 4, ES
* 4, FS
* 4, GS
* 4
183 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
189 /* Returns true if the current inferior belongs to a x86-64 process,
193 is_64bit_tdesc (void)
195 struct regcache
*regcache
= get_thread_regcache (current_inferior
, 0);
197 return register_size (regcache
->tdesc
, 0) == 8;
203 /* Called by libthread_db. */
206 ps_get_thread_area (const struct ps_prochandle
*ph
,
207 lwpid_t lwpid
, int idx
, void **base
)
210 int use_64bit
= is_64bit_tdesc ();
217 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
221 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
232 unsigned int desc
[4];
234 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
235 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
238 /* Ensure we properly extend the value to 64-bits for x86_64. */
239 *base
= (void *) (uintptr_t) desc
[1];
244 /* Get the thread area address. This is used to recognize which
245 thread is which when tracing with the in-process agent library. We
246 don't read anything from the address, and treat it as opaque; it's
247 the address itself that we assume is unique per-thread. */
250 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
253 int use_64bit
= is_64bit_tdesc ();
258 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
260 *addr
= (CORE_ADDR
) (uintptr_t) base
;
269 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
270 struct regcache
*regcache
= get_thread_regcache (get_lwp_thread (lwp
), 1);
271 unsigned int desc
[4];
273 const int reg_thread_area
= 3; /* bits to scale down register value. */
276 collect_register_by_name (regcache
, "gs", &gs
);
278 idx
= gs
>> reg_thread_area
;
280 if (ptrace (PTRACE_GET_THREAD_AREA
,
282 (void *) (long) idx
, (unsigned long) &desc
) < 0)
293 x86_cannot_store_register (int regno
)
296 if (is_64bit_tdesc ())
300 return regno
>= I386_NUM_REGS
;
304 x86_cannot_fetch_register (int regno
)
307 if (is_64bit_tdesc ())
311 return regno
>= I386_NUM_REGS
;
315 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
320 if (register_size (regcache
->tdesc
, 0) == 8)
322 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
323 if (x86_64_regmap
[i
] != -1)
324 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
329 for (i
= 0; i
< I386_NUM_REGS
; i
++)
330 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
332 collect_register_by_name (regcache
, "orig_eax",
333 ((char *) buf
) + ORIG_EAX
* 4);
337 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
342 if (register_size (regcache
->tdesc
, 0) == 8)
344 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
345 if (x86_64_regmap
[i
] != -1)
346 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
351 for (i
= 0; i
< I386_NUM_REGS
; i
++)
352 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
354 supply_register_by_name (regcache
, "orig_eax",
355 ((char *) buf
) + ORIG_EAX
* 4);
359 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
362 i387_cache_to_fxsave (regcache
, buf
);
364 i387_cache_to_fsave (regcache
, buf
);
369 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
372 i387_fxsave_to_cache (regcache
, buf
);
374 i387_fsave_to_cache (regcache
, buf
);
381 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
383 i387_cache_to_fxsave (regcache
, buf
);
387 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
389 i387_fxsave_to_cache (regcache
, buf
);
395 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
397 i387_cache_to_xsave (regcache
, buf
);
401 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
403 i387_xsave_to_cache (regcache
, buf
);
406 /* ??? The non-biarch i386 case stores all the i387 regs twice.
407 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
408 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
409 doesn't work. IWBN to avoid the duplication in the case where it
410 does work. Maybe the arch_setup routine could check whether it works
411 and update the supported regsets accordingly. */
413 static struct regset_info x86_regsets
[] =
415 #ifdef HAVE_PTRACE_GETREGS
416 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
418 x86_fill_gregset
, x86_store_gregset
},
419 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
420 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
422 # ifdef HAVE_PTRACE_GETFPXREGS
423 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
425 x86_fill_fpxregset
, x86_store_fpxregset
},
428 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
430 x86_fill_fpregset
, x86_store_fpregset
},
431 #endif /* HAVE_PTRACE_GETREGS */
432 { 0, 0, 0, -1, -1, NULL
, NULL
}
436 x86_get_pc (struct regcache
*regcache
)
438 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
443 collect_register_by_name (regcache
, "rip", &pc
);
444 return (CORE_ADDR
) pc
;
449 collect_register_by_name (regcache
, "eip", &pc
);
450 return (CORE_ADDR
) pc
;
455 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
457 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
461 unsigned long newpc
= pc
;
462 supply_register_by_name (regcache
, "rip", &newpc
);
466 unsigned int newpc
= pc
;
467 supply_register_by_name (regcache
, "eip", &newpc
);
471 static const unsigned char x86_breakpoint
[] = { 0xCC };
472 #define x86_breakpoint_len 1
475 x86_breakpoint_at (CORE_ADDR pc
)
479 (*the_target
->read_memory
) (pc
, &c
, 1);
486 /* Support for debug registers. */
489 x86_linux_dr_get (ptid_t ptid
, int regnum
)
494 tid
= ptid_get_lwp (ptid
);
497 value
= ptrace (PTRACE_PEEKUSER
, tid
,
498 offsetof (struct user
, u_debugreg
[regnum
]), 0);
500 error ("Couldn't read debug register");
506 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
510 tid
= ptid_get_lwp (ptid
);
513 ptrace (PTRACE_POKEUSER
, tid
,
514 offsetof (struct user
, u_debugreg
[regnum
]), value
);
516 error ("Couldn't write debug register");
520 update_debug_registers_callback (struct inferior_list_entry
*entry
,
523 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
524 int pid
= *(int *) pid_p
;
526 /* Only update the threads of this process. */
527 if (pid_of (lwp
) == pid
)
529 /* The actual update is done later just before resuming the lwp,
530 we just mark that the registers need updating. */
531 lwp
->arch_private
->debug_registers_changed
= 1;
533 /* If the lwp isn't stopped, force it to momentarily pause, so
534 we can update its debug registers. */
536 linux_stop_lwp (lwp
);
542 /* Update the inferior's debug register REGNUM from STATE. */
545 i386_dr_low_set_addr (const struct i386_debug_reg_state
*state
, int regnum
)
547 /* Only update the threads of this process. */
548 int pid
= pid_of (get_thread_lwp (current_inferior
));
550 if (! (regnum
>= 0 && regnum
<= DR_LASTADDR
- DR_FIRSTADDR
))
551 fatal ("Invalid debug register %d", regnum
);
553 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
556 /* Return the inferior's debug register REGNUM. */
559 i386_dr_low_get_addr (int regnum
)
561 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
562 ptid_t ptid
= ptid_of (lwp
);
564 /* DR6 and DR7 are retrieved with some other way. */
565 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
567 return x86_linux_dr_get (ptid
, regnum
);
570 /* Update the inferior's DR7 debug control register from STATE. */
573 i386_dr_low_set_control (const struct i386_debug_reg_state
*state
)
575 /* Only update the threads of this process. */
576 int pid
= pid_of (get_thread_lwp (current_inferior
));
578 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
581 /* Return the inferior's DR7 debug control register. */
584 i386_dr_low_get_control (void)
586 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
587 ptid_t ptid
= ptid_of (lwp
);
589 return x86_linux_dr_get (ptid
, DR_CONTROL
);
592 /* Get the value of the DR6 debug status register from the inferior
593 and record it in STATE. */
596 i386_dr_low_get_status (void)
598 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
599 ptid_t ptid
= ptid_of (lwp
);
601 return x86_linux_dr_get (ptid
, DR_STATUS
);
604 /* Breakpoint/Watchpoint support. */
607 x86_insert_point (char type
, CORE_ADDR addr
, int len
)
609 struct process_info
*proc
= current_process ();
612 case '0': /* software-breakpoint */
616 ret
= prepare_to_access_memory ();
619 ret
= set_gdb_breakpoint_at (addr
);
620 done_accessing_memory ();
623 case '1': /* hardware-breakpoint */
624 case '2': /* write watchpoint */
625 case '3': /* read watchpoint */
626 case '4': /* access watchpoint */
627 return i386_low_insert_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
637 x86_remove_point (char type
, CORE_ADDR addr
, int len
)
639 struct process_info
*proc
= current_process ();
642 case '0': /* software-breakpoint */
646 ret
= prepare_to_access_memory ();
649 ret
= delete_gdb_breakpoint_at (addr
);
650 done_accessing_memory ();
653 case '1': /* hardware-breakpoint */
654 case '2': /* write watchpoint */
655 case '3': /* read watchpoint */
656 case '4': /* access watchpoint */
657 return i386_low_remove_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
666 x86_stopped_by_watchpoint (void)
668 struct process_info
*proc
= current_process ();
669 return i386_low_stopped_by_watchpoint (&proc
->private->arch_private
->debug_reg_state
);
673 x86_stopped_data_address (void)
675 struct process_info
*proc
= current_process ();
677 if (i386_low_stopped_data_address (&proc
->private->arch_private
->debug_reg_state
,
683 /* Called when a new process is created. */
685 static struct arch_process_info
*
686 x86_linux_new_process (void)
688 struct arch_process_info
*info
= xcalloc (1, sizeof (*info
));
690 i386_low_init_dregs (&info
->debug_reg_state
);
695 /* Called when a new thread is detected. */
697 static struct arch_lwp_info
*
698 x86_linux_new_thread (void)
700 struct arch_lwp_info
*info
= xcalloc (1, sizeof (*info
));
702 info
->debug_registers_changed
= 1;
707 /* Called when resuming a thread.
708 If the debug regs have changed, update the thread's copies. */
711 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
713 ptid_t ptid
= ptid_of (lwp
);
714 int clear_status
= 0;
716 if (lwp
->arch_private
->debug_registers_changed
)
719 int pid
= ptid_get_pid (ptid
);
720 struct process_info
*proc
= find_process_pid (pid
);
721 struct i386_debug_reg_state
*state
722 = &proc
->private->arch_private
->debug_reg_state
;
724 for (i
= DR_FIRSTADDR
; i
<= DR_LASTADDR
; i
++)
725 if (state
->dr_ref_count
[i
] > 0)
727 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
729 /* If we're setting a watchpoint, any change the inferior
730 had done itself to the debug registers needs to be
731 discarded, otherwise, i386_low_stopped_data_address can
736 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
738 lwp
->arch_private
->debug_registers_changed
= 0;
741 if (clear_status
|| lwp
->stopped_by_watchpoint
)
742 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
745 /* When GDBSERVER is built as a 64-bit application on linux, the
746 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
747 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
748 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
749 conversion in-place ourselves. */
751 /* These types below (compat_*) define a siginfo type that is layout
752 compatible with the siginfo type exported by the 32-bit userspace
757 typedef int compat_int_t
;
758 typedef unsigned int compat_uptr_t
;
760 typedef int compat_time_t
;
761 typedef int compat_timer_t
;
762 typedef int compat_clock_t
;
764 struct compat_timeval
766 compat_time_t tv_sec
;
770 typedef union compat_sigval
772 compat_int_t sival_int
;
773 compat_uptr_t sival_ptr
;
776 typedef struct compat_siginfo
784 int _pad
[((128 / sizeof (int)) - 3)];
793 /* POSIX.1b timers */
798 compat_sigval_t _sigval
;
801 /* POSIX.1b signals */
806 compat_sigval_t _sigval
;
815 compat_clock_t _utime
;
816 compat_clock_t _stime
;
819 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
834 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
835 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t
;
837 typedef struct compat_x32_siginfo
845 int _pad
[((128 / sizeof (int)) - 3)];
854 /* POSIX.1b timers */
859 compat_sigval_t _sigval
;
862 /* POSIX.1b signals */
867 compat_sigval_t _sigval
;
876 compat_x32_clock_t _utime
;
877 compat_x32_clock_t _stime
;
880 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
893 } compat_x32_siginfo_t
__attribute__ ((__aligned__ (8)));
895 #define cpt_si_pid _sifields._kill._pid
896 #define cpt_si_uid _sifields._kill._uid
897 #define cpt_si_timerid _sifields._timer._tid
898 #define cpt_si_overrun _sifields._timer._overrun
899 #define cpt_si_status _sifields._sigchld._status
900 #define cpt_si_utime _sifields._sigchld._utime
901 #define cpt_si_stime _sifields._sigchld._stime
902 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
903 #define cpt_si_addr _sifields._sigfault._addr
904 #define cpt_si_band _sifields._sigpoll._band
905 #define cpt_si_fd _sifields._sigpoll._fd
907 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
908 In their place is si_timer1,si_timer2. */
910 #define si_timerid si_timer1
913 #define si_overrun si_timer2
917 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
919 memset (to
, 0, sizeof (*to
));
921 to
->si_signo
= from
->si_signo
;
922 to
->si_errno
= from
->si_errno
;
923 to
->si_code
= from
->si_code
;
925 if (to
->si_code
== SI_TIMER
)
927 to
->cpt_si_timerid
= from
->si_timerid
;
928 to
->cpt_si_overrun
= from
->si_overrun
;
929 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
931 else if (to
->si_code
== SI_USER
)
933 to
->cpt_si_pid
= from
->si_pid
;
934 to
->cpt_si_uid
= from
->si_uid
;
936 else if (to
->si_code
< 0)
938 to
->cpt_si_pid
= from
->si_pid
;
939 to
->cpt_si_uid
= from
->si_uid
;
940 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
944 switch (to
->si_signo
)
947 to
->cpt_si_pid
= from
->si_pid
;
948 to
->cpt_si_uid
= from
->si_uid
;
949 to
->cpt_si_status
= from
->si_status
;
950 to
->cpt_si_utime
= from
->si_utime
;
951 to
->cpt_si_stime
= from
->si_stime
;
957 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
960 to
->cpt_si_band
= from
->si_band
;
961 to
->cpt_si_fd
= from
->si_fd
;
964 to
->cpt_si_pid
= from
->si_pid
;
965 to
->cpt_si_uid
= from
->si_uid
;
966 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
973 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
975 memset (to
, 0, sizeof (*to
));
977 to
->si_signo
= from
->si_signo
;
978 to
->si_errno
= from
->si_errno
;
979 to
->si_code
= from
->si_code
;
981 if (to
->si_code
== SI_TIMER
)
983 to
->si_timerid
= from
->cpt_si_timerid
;
984 to
->si_overrun
= from
->cpt_si_overrun
;
985 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
987 else if (to
->si_code
== SI_USER
)
989 to
->si_pid
= from
->cpt_si_pid
;
990 to
->si_uid
= from
->cpt_si_uid
;
992 else if (to
->si_code
< 0)
994 to
->si_pid
= from
->cpt_si_pid
;
995 to
->si_uid
= from
->cpt_si_uid
;
996 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1000 switch (to
->si_signo
)
1003 to
->si_pid
= from
->cpt_si_pid
;
1004 to
->si_uid
= from
->cpt_si_uid
;
1005 to
->si_status
= from
->cpt_si_status
;
1006 to
->si_utime
= from
->cpt_si_utime
;
1007 to
->si_stime
= from
->cpt_si_stime
;
1013 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1016 to
->si_band
= from
->cpt_si_band
;
1017 to
->si_fd
= from
->cpt_si_fd
;
1020 to
->si_pid
= from
->cpt_si_pid
;
1021 to
->si_uid
= from
->cpt_si_uid
;
1022 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1029 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t
*to
,
1032 memset (to
, 0, sizeof (*to
));
1034 to
->si_signo
= from
->si_signo
;
1035 to
->si_errno
= from
->si_errno
;
1036 to
->si_code
= from
->si_code
;
1038 if (to
->si_code
== SI_TIMER
)
1040 to
->cpt_si_timerid
= from
->si_timerid
;
1041 to
->cpt_si_overrun
= from
->si_overrun
;
1042 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1044 else if (to
->si_code
== SI_USER
)
1046 to
->cpt_si_pid
= from
->si_pid
;
1047 to
->cpt_si_uid
= from
->si_uid
;
1049 else if (to
->si_code
< 0)
1051 to
->cpt_si_pid
= from
->si_pid
;
1052 to
->cpt_si_uid
= from
->si_uid
;
1053 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1057 switch (to
->si_signo
)
1060 to
->cpt_si_pid
= from
->si_pid
;
1061 to
->cpt_si_uid
= from
->si_uid
;
1062 to
->cpt_si_status
= from
->si_status
;
1063 to
->cpt_si_utime
= from
->si_utime
;
1064 to
->cpt_si_stime
= from
->si_stime
;
1070 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1073 to
->cpt_si_band
= from
->si_band
;
1074 to
->cpt_si_fd
= from
->si_fd
;
1077 to
->cpt_si_pid
= from
->si_pid
;
1078 to
->cpt_si_uid
= from
->si_uid
;
1079 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1086 siginfo_from_compat_x32_siginfo (siginfo_t
*to
,
1087 compat_x32_siginfo_t
*from
)
1089 memset (to
, 0, sizeof (*to
));
1091 to
->si_signo
= from
->si_signo
;
1092 to
->si_errno
= from
->si_errno
;
1093 to
->si_code
= from
->si_code
;
1095 if (to
->si_code
== SI_TIMER
)
1097 to
->si_timerid
= from
->cpt_si_timerid
;
1098 to
->si_overrun
= from
->cpt_si_overrun
;
1099 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1101 else if (to
->si_code
== SI_USER
)
1103 to
->si_pid
= from
->cpt_si_pid
;
1104 to
->si_uid
= from
->cpt_si_uid
;
1106 else if (to
->si_code
< 0)
1108 to
->si_pid
= from
->cpt_si_pid
;
1109 to
->si_uid
= from
->cpt_si_uid
;
1110 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1114 switch (to
->si_signo
)
1117 to
->si_pid
= from
->cpt_si_pid
;
1118 to
->si_uid
= from
->cpt_si_uid
;
1119 to
->si_status
= from
->cpt_si_status
;
1120 to
->si_utime
= from
->cpt_si_utime
;
1121 to
->si_stime
= from
->cpt_si_stime
;
1127 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1130 to
->si_band
= from
->cpt_si_band
;
1131 to
->si_fd
= from
->cpt_si_fd
;
1134 to
->si_pid
= from
->cpt_si_pid
;
1135 to
->si_uid
= from
->cpt_si_uid
;
1136 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1142 #endif /* __x86_64__ */
1144 /* Convert a native/host siginfo object, into/from the siginfo in the
1145 layout of the inferiors' architecture. Returns true if any
1146 conversion was done; false otherwise. If DIRECTION is 1, then copy
1147 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1151 x86_siginfo_fixup (siginfo_t
*native
, void *inf
, int direction
)
1154 unsigned int machine
;
1155 int tid
= lwpid_of (get_thread_lwp (current_inferior
));
1156 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1158 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1159 if (!is_64bit_tdesc ())
1161 if (sizeof (siginfo_t
) != sizeof (compat_siginfo_t
))
1162 fatal ("unexpected difference in siginfo");
1165 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
1167 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
1171 /* No fixup for native x32 GDB. */
1172 else if (!is_elf64
&& sizeof (void *) == 8)
1174 if (sizeof (siginfo_t
) != sizeof (compat_x32_siginfo_t
))
1175 fatal ("unexpected difference in siginfo");
1178 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo
*) inf
,
1181 siginfo_from_compat_x32_siginfo (native
,
1182 (struct compat_x32_siginfo
*) inf
);
1193 /* Format of XSAVE extended state is:
1196 fxsave_bytes[0..463]
1197 sw_usable_bytes[464..511]
1198 xstate_hdr_bytes[512..575]
1203 Same memory layout will be used for the coredump NT_X86_XSTATE
1204 representing the XSAVE extended state registers.
1206 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1207 extended state mask, which is the same as the extended control register
1208 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1209 together with the mask saved in the xstate_hdr_bytes to determine what
1210 states the processor/OS supports and what state, used or initialized,
1211 the process/thread is in. */
1212 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1214 /* Does the current host support the GETFPXREGS request? The header
1215 file may or may not define it, and even if it is defined, the
1216 kernel will return EIO if it's running on a pre-SSE processor. */
1217 int have_ptrace_getfpxregs
=
1218 #ifdef HAVE_PTRACE_GETFPXREGS
1225 /* Does the current host support PTRACE_GETREGSET? */
1226 static int have_ptrace_getregset
= -1;
1228 /* Get Linux/x86 target description from running target. */
1230 static const struct target_desc
*
1231 x86_linux_read_description (void)
1233 unsigned int machine
;
1237 static uint64_t xcr0
;
1238 struct regset_info
*regset
;
1240 tid
= lwpid_of (get_thread_lwp (current_inferior
));
1242 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1244 if (sizeof (void *) == 4)
1247 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1249 else if (machine
== EM_X86_64
)
1250 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1254 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1255 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
1257 elf_fpxregset_t fpxregs
;
1259 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
1261 have_ptrace_getfpxregs
= 0;
1262 have_ptrace_getregset
= 0;
1263 return tdesc_i386_mmx_linux
;
1266 have_ptrace_getfpxregs
= 1;
1272 x86_xcr0
= I386_XSTATE_SSE_MASK
;
1274 /* Don't use XML. */
1276 if (machine
== EM_X86_64
)
1277 return tdesc_amd64_linux_no_xml
;
1280 return tdesc_i386_linux_no_xml
;
1283 if (have_ptrace_getregset
== -1)
1285 uint64_t xstateregs
[(I386_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
1288 iov
.iov_base
= xstateregs
;
1289 iov
.iov_len
= sizeof (xstateregs
);
1291 /* Check if PTRACE_GETREGSET works. */
1292 if (ptrace (PTRACE_GETREGSET
, tid
,
1293 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
1294 have_ptrace_getregset
= 0;
1297 have_ptrace_getregset
= 1;
1299 /* Get XCR0 from XSAVE extended state. */
1300 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
1301 / sizeof (uint64_t))];
1303 /* Use PTRACE_GETREGSET if it is available. */
1304 for (regset
= x86_regsets
;
1305 regset
->fill_function
!= NULL
; regset
++)
1306 if (regset
->get_request
== PTRACE_GETREGSET
)
1307 regset
->size
= I386_XSTATE_SIZE (xcr0
);
1308 else if (regset
->type
!= GENERAL_REGS
)
1313 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1314 avx
= (have_ptrace_getregset
1315 && (xcr0
& I386_XSTATE_AVX_MASK
) == I386_XSTATE_AVX_MASK
);
1317 /* AVX is the highest feature we support. */
1321 if (machine
== EM_X86_64
)
1327 return tdesc_x32_avx_linux
;
1329 return tdesc_amd64_avx_linux
;
1334 return tdesc_x32_linux
;
1336 return tdesc_amd64_linux
;
1343 return tdesc_i386_avx_linux
;
1345 return tdesc_i386_linux
;
1348 gdb_assert_not_reached ("failed to return tdesc");
1351 /* Callback for find_inferior. Stops iteration when a thread with a
1352 given PID is found. */
1355 same_process_callback (struct inferior_list_entry
*entry
, void *data
)
1357 int pid
= *(int *) data
;
1359 return (ptid_get_pid (entry
->id
) == pid
);
1362 /* Callback for for_each_inferior. Calls the arch_setup routine for
1366 x86_arch_setup_process_callback (struct inferior_list_entry
*entry
)
1368 int pid
= ptid_get_pid (entry
->id
);
1370 /* Look up any thread of this processes. */
1372 = (struct thread_info
*) find_inferior (&all_threads
,
1373 same_process_callback
, &pid
);
1375 the_low_target
.arch_setup ();
1378 /* Update all the target description of all processes; a new GDB
1379 connected, and it may or not support xml target descriptions. */
1382 x86_linux_update_xmltarget (void)
1384 struct thread_info
*save_inferior
= current_inferior
;
1386 /* Before changing the register cache's internal layout, flush the
1387 contents of the current valid caches back to the threads, and
1388 release the current regcache objects. */
1389 regcache_release ();
1391 for_each_inferior (&all_processes
, x86_arch_setup_process_callback
);
1393 current_inferior
= save_inferior
;
1396 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1397 PTRACE_GETREGSET. */
1400 x86_linux_process_qsupported (const char *query
)
1402 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1403 with "i386" in qSupported query, it supports x86 XML target
1406 if (query
!= NULL
&& strncmp (query
, "xmlRegisters=", 13) == 0)
1408 char *copy
= xstrdup (query
+ 13);
1411 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1413 if (strcmp (p
, "i386") == 0)
1423 x86_linux_update_xmltarget ();
1426 /* Common for x86/x86-64. */
1428 static struct regsets_info x86_regsets_info
=
1430 x86_regsets
, /* regsets */
1431 0, /* num_regsets */
1432 NULL
, /* disabled_regsets */
1436 static struct regs_info amd64_linux_regs_info
=
1438 NULL
, /* regset_bitmap */
1439 NULL
, /* usrregs_info */
1443 static struct usrregs_info i386_linux_usrregs_info
=
1449 static struct regs_info i386_linux_regs_info
=
1451 NULL
, /* regset_bitmap */
1452 &i386_linux_usrregs_info
,
1456 const struct regs_info
*
1457 x86_linux_regs_info (void)
1460 if (is_64bit_tdesc ())
1461 return &amd64_linux_regs_info
;
1464 return &i386_linux_regs_info
;
1467 /* Initialize the target description for the architecture of the
1471 x86_arch_setup (void)
1473 current_process ()->tdesc
= x86_linux_read_description ();
1477 x86_supports_tracepoints (void)
1483 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1485 write_inferior_memory (*to
, buf
, len
);
1490 push_opcode (unsigned char *buf
, char *op
)
1492 unsigned char *buf_org
= buf
;
1497 unsigned long ul
= strtoul (op
, &endptr
, 16);
1506 return buf
- buf_org
;
1511 /* Build a jump pad that saves registers and calls a collection
1512 function. Writes a jump instruction to the jump pad to
1513 JJUMPAD_INSN. The caller is responsible to write it in at the
1514 tracepoint address. */
1517 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1518 CORE_ADDR collector
,
1521 CORE_ADDR
*jump_entry
,
1522 CORE_ADDR
*trampoline
,
1523 ULONGEST
*trampoline_size
,
1524 unsigned char *jjump_pad_insn
,
1525 ULONGEST
*jjump_pad_insn_size
,
1526 CORE_ADDR
*adjusted_insn_addr
,
1527 CORE_ADDR
*adjusted_insn_addr_end
,
1530 unsigned char buf
[40];
1534 CORE_ADDR buildaddr
= *jump_entry
;
1536 /* Build the jump pad. */
1538 /* First, do tracepoint data collection. Save registers. */
1540 /* Need to ensure stack pointer saved first. */
1541 buf
[i
++] = 0x54; /* push %rsp */
1542 buf
[i
++] = 0x55; /* push %rbp */
1543 buf
[i
++] = 0x57; /* push %rdi */
1544 buf
[i
++] = 0x56; /* push %rsi */
1545 buf
[i
++] = 0x52; /* push %rdx */
1546 buf
[i
++] = 0x51; /* push %rcx */
1547 buf
[i
++] = 0x53; /* push %rbx */
1548 buf
[i
++] = 0x50; /* push %rax */
1549 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1550 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1551 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1552 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1553 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1554 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1555 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1556 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1557 buf
[i
++] = 0x9c; /* pushfq */
1558 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1560 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1561 i
+= sizeof (unsigned long);
1562 buf
[i
++] = 0x57; /* push %rdi */
1563 append_insns (&buildaddr
, i
, buf
);
1565 /* Stack space for the collecting_t object. */
1567 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1568 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1569 memcpy (buf
+ i
, &tpoint
, 8);
1571 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1572 i
+= push_opcode (&buf
[i
],
1573 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1574 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1575 append_insns (&buildaddr
, i
, buf
);
1579 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1580 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1582 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1583 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1584 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1585 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1586 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1587 append_insns (&buildaddr
, i
, buf
);
1589 /* Set up the gdb_collect call. */
1590 /* At this point, (stack pointer + 0x18) is the base of our saved
1594 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1595 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1597 /* tpoint address may be 64-bit wide. */
1598 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1599 memcpy (buf
+ i
, &tpoint
, 8);
1601 append_insns (&buildaddr
, i
, buf
);
1603 /* The collector function being in the shared library, may be
1604 >31-bits away off the jump pad. */
1606 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1607 memcpy (buf
+ i
, &collector
, 8);
1609 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1610 append_insns (&buildaddr
, i
, buf
);
1612 /* Clear the spin-lock. */
1614 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1615 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1616 memcpy (buf
+ i
, &lockaddr
, 8);
1618 append_insns (&buildaddr
, i
, buf
);
1620 /* Remove stack that had been used for the collect_t object. */
1622 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1623 append_insns (&buildaddr
, i
, buf
);
1625 /* Restore register state. */
1627 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1631 buf
[i
++] = 0x9d; /* popfq */
1632 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1633 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1634 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1635 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1636 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1637 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1638 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1639 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1640 buf
[i
++] = 0x58; /* pop %rax */
1641 buf
[i
++] = 0x5b; /* pop %rbx */
1642 buf
[i
++] = 0x59; /* pop %rcx */
1643 buf
[i
++] = 0x5a; /* pop %rdx */
1644 buf
[i
++] = 0x5e; /* pop %rsi */
1645 buf
[i
++] = 0x5f; /* pop %rdi */
1646 buf
[i
++] = 0x5d; /* pop %rbp */
1647 buf
[i
++] = 0x5c; /* pop %rsp */
1648 append_insns (&buildaddr
, i
, buf
);
1650 /* Now, adjust the original instruction to execute in the jump
1652 *adjusted_insn_addr
= buildaddr
;
1653 relocate_instruction (&buildaddr
, tpaddr
);
1654 *adjusted_insn_addr_end
= buildaddr
;
1656 /* Finally, write a jump back to the program. */
1658 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1659 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1662 "E.Jump back from jump pad too far from tracepoint "
1663 "(offset 0x%" PRIx64
" > int32).", loffset
);
1667 offset
= (int) loffset
;
1668 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1669 memcpy (buf
+ 1, &offset
, 4);
1670 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1672 /* The jump pad is now built. Wire in a jump to our jump pad. This
1673 is always done last (by our caller actually), so that we can
1674 install fast tracepoints with threads running. This relies on
1675 the agent's atomic write support. */
1676 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1677 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1680 "E.Jump pad too far from tracepoint "
1681 "(offset 0x%" PRIx64
" > int32).", loffset
);
1685 offset
= (int) loffset
;
1687 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1688 memcpy (buf
+ 1, &offset
, 4);
1689 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1690 *jjump_pad_insn_size
= sizeof (jump_insn
);
1692 /* Return the end address of our pad. */
1693 *jump_entry
= buildaddr
;
1698 #endif /* __x86_64__ */
1700 /* Build a jump pad that saves registers and calls a collection
1701 function. Writes a jump instruction to the jump pad to
1702 JJUMPAD_INSN. The caller is responsible to write it in at the
1703 tracepoint address. */
1706 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1707 CORE_ADDR collector
,
1710 CORE_ADDR
*jump_entry
,
1711 CORE_ADDR
*trampoline
,
1712 ULONGEST
*trampoline_size
,
1713 unsigned char *jjump_pad_insn
,
1714 ULONGEST
*jjump_pad_insn_size
,
1715 CORE_ADDR
*adjusted_insn_addr
,
1716 CORE_ADDR
*adjusted_insn_addr_end
,
1719 unsigned char buf
[0x100];
1721 CORE_ADDR buildaddr
= *jump_entry
;
1723 /* Build the jump pad. */
1725 /* First, do tracepoint data collection. Save registers. */
1727 buf
[i
++] = 0x60; /* pushad */
1728 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1729 *((int *)(buf
+ i
)) = (int) tpaddr
;
1731 buf
[i
++] = 0x9c; /* pushf */
1732 buf
[i
++] = 0x1e; /* push %ds */
1733 buf
[i
++] = 0x06; /* push %es */
1734 buf
[i
++] = 0x0f; /* push %fs */
1736 buf
[i
++] = 0x0f; /* push %gs */
1738 buf
[i
++] = 0x16; /* push %ss */
1739 buf
[i
++] = 0x0e; /* push %cs */
1740 append_insns (&buildaddr
, i
, buf
);
1742 /* Stack space for the collecting_t object. */
1744 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1746 /* Build the object. */
1747 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1748 memcpy (buf
+ i
, &tpoint
, 4);
1750 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1752 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1753 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1754 append_insns (&buildaddr
, i
, buf
);
1756 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1757 If we cared for it, this could be using xchg alternatively. */
1760 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1761 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1763 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1765 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1766 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1767 append_insns (&buildaddr
, i
, buf
);
1770 /* Set up arguments to the gdb_collect call. */
1772 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1773 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1774 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1775 append_insns (&buildaddr
, i
, buf
);
1778 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1779 append_insns (&buildaddr
, i
, buf
);
1782 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1783 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1785 append_insns (&buildaddr
, i
, buf
);
1787 buf
[0] = 0xe8; /* call <reladdr> */
1788 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1789 memcpy (buf
+ 1, &offset
, 4);
1790 append_insns (&buildaddr
, 5, buf
);
1791 /* Clean up after the call. */
1792 buf
[0] = 0x83; /* add $0x8,%esp */
1795 append_insns (&buildaddr
, 3, buf
);
1798 /* Clear the spin-lock. This would need the LOCK prefix on older
1801 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1802 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1803 memcpy (buf
+ i
, &lockaddr
, 4);
1805 append_insns (&buildaddr
, i
, buf
);
1808 /* Remove stack that had been used for the collect_t object. */
1810 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1811 append_insns (&buildaddr
, i
, buf
);
1814 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1817 buf
[i
++] = 0x17; /* pop %ss */
1818 buf
[i
++] = 0x0f; /* pop %gs */
1820 buf
[i
++] = 0x0f; /* pop %fs */
1822 buf
[i
++] = 0x07; /* pop %es */
1823 buf
[i
++] = 0x1f; /* pop %ds */
1824 buf
[i
++] = 0x9d; /* popf */
1825 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1828 buf
[i
++] = 0x61; /* popad */
1829 append_insns (&buildaddr
, i
, buf
);
1831 /* Now, adjust the original instruction to execute in the jump
1833 *adjusted_insn_addr
= buildaddr
;
1834 relocate_instruction (&buildaddr
, tpaddr
);
1835 *adjusted_insn_addr_end
= buildaddr
;
1837 /* Write the jump back to the program. */
1838 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1839 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1840 memcpy (buf
+ 1, &offset
, 4);
1841 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1843 /* The jump pad is now built. Wire in a jump to our jump pad. This
1844 is always done last (by our caller actually), so that we can
1845 install fast tracepoints with threads running. This relies on
1846 the agent's atomic write support. */
1849 /* Create a trampoline. */
1850 *trampoline_size
= sizeof (jump_insn
);
1851 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1853 /* No trampoline space available. */
1855 "E.Cannot allocate trampoline space needed for fast "
1856 "tracepoints on 4-byte instructions.");
1860 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1861 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1862 memcpy (buf
+ 1, &offset
, 4);
1863 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1865 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1866 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1867 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1868 memcpy (buf
+ 2, &offset
, 2);
1869 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1870 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1874 /* Else use a 32-bit relative jump instruction. */
1875 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1876 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1877 memcpy (buf
+ 1, &offset
, 4);
1878 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1879 *jjump_pad_insn_size
= sizeof (jump_insn
);
1882 /* Return the end address of our pad. */
1883 *jump_entry
= buildaddr
;
1889 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1890 CORE_ADDR collector
,
1893 CORE_ADDR
*jump_entry
,
1894 CORE_ADDR
*trampoline
,
1895 ULONGEST
*trampoline_size
,
1896 unsigned char *jjump_pad_insn
,
1897 ULONGEST
*jjump_pad_insn_size
,
1898 CORE_ADDR
*adjusted_insn_addr
,
1899 CORE_ADDR
*adjusted_insn_addr_end
,
1903 if (is_64bit_tdesc ())
1904 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1905 collector
, lockaddr
,
1906 orig_size
, jump_entry
,
1907 trampoline
, trampoline_size
,
1909 jjump_pad_insn_size
,
1911 adjusted_insn_addr_end
,
1915 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1916 collector
, lockaddr
,
1917 orig_size
, jump_entry
,
1918 trampoline
, trampoline_size
,
1920 jjump_pad_insn_size
,
1922 adjusted_insn_addr_end
,
1926 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1930 x86_get_min_fast_tracepoint_insn_len (void)
1932 static int warned_about_fast_tracepoints
= 0;
1935 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1936 used for fast tracepoints. */
1937 if (is_64bit_tdesc ())
1941 if (agent_loaded_p ())
1943 char errbuf
[IPA_BUFSIZ
];
1947 /* On x86, if trampolines are available, then 4-byte jump instructions
1948 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1949 with a 4-byte offset are used instead. */
1950 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1954 /* GDB has no channel to explain to user why a shorter fast
1955 tracepoint is not possible, but at least make GDBserver
1956 mention that something has gone awry. */
1957 if (!warned_about_fast_tracepoints
)
1959 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
1960 warned_about_fast_tracepoints
= 1;
1967 /* Indicate that the minimum length is currently unknown since the IPA
1968 has not loaded yet. */
1974 add_insns (unsigned char *start
, int len
)
1976 CORE_ADDR buildaddr
= current_insn_ptr
;
1979 fprintf (stderr
, "Adding %d bytes of insn at %s\n",
1980 len
, paddress (buildaddr
));
1982 append_insns (&buildaddr
, len
, start
);
1983 current_insn_ptr
= buildaddr
;
1986 /* Our general strategy for emitting code is to avoid specifying raw
1987 bytes whenever possible, and instead copy a block of inline asm
1988 that is embedded in the function. This is a little messy, because
1989 we need to keep the compiler from discarding what looks like dead
1990 code, plus suppress various warnings. */
1992 #define EMIT_ASM(NAME, INSNS) \
1995 extern unsigned char start_ ## NAME, end_ ## NAME; \
1996 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1997 __asm__ ("jmp end_" #NAME "\n" \
1998 "\t" "start_" #NAME ":" \
2000 "\t" "end_" #NAME ":"); \
2005 #define EMIT_ASM32(NAME,INSNS) \
2008 extern unsigned char start_ ## NAME, end_ ## NAME; \
2009 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2010 __asm__ (".code32\n" \
2011 "\t" "jmp end_" #NAME "\n" \
2012 "\t" "start_" #NAME ":\n" \
2014 "\t" "end_" #NAME ":\n" \
2020 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2027 amd64_emit_prologue (void)
2029 EMIT_ASM (amd64_prologue
,
2031 "movq %rsp,%rbp\n\t"
2032 "sub $0x20,%rsp\n\t"
2033 "movq %rdi,-8(%rbp)\n\t"
2034 "movq %rsi,-16(%rbp)");
2039 amd64_emit_epilogue (void)
2041 EMIT_ASM (amd64_epilogue
,
2042 "movq -16(%rbp),%rdi\n\t"
2043 "movq %rax,(%rdi)\n\t"
2050 amd64_emit_add (void)
2052 EMIT_ASM (amd64_add
,
2053 "add (%rsp),%rax\n\t"
2054 "lea 0x8(%rsp),%rsp");
2058 amd64_emit_sub (void)
2060 EMIT_ASM (amd64_sub
,
2061 "sub %rax,(%rsp)\n\t"
2066 amd64_emit_mul (void)
2072 amd64_emit_lsh (void)
2078 amd64_emit_rsh_signed (void)
2084 amd64_emit_rsh_unsigned (void)
2090 amd64_emit_ext (int arg
)
2095 EMIT_ASM (amd64_ext_8
,
2101 EMIT_ASM (amd64_ext_16
,
2106 EMIT_ASM (amd64_ext_32
,
2115 amd64_emit_log_not (void)
2117 EMIT_ASM (amd64_log_not
,
2118 "test %rax,%rax\n\t"
2124 amd64_emit_bit_and (void)
2126 EMIT_ASM (amd64_and
,
2127 "and (%rsp),%rax\n\t"
2128 "lea 0x8(%rsp),%rsp");
2132 amd64_emit_bit_or (void)
2135 "or (%rsp),%rax\n\t"
2136 "lea 0x8(%rsp),%rsp");
2140 amd64_emit_bit_xor (void)
2142 EMIT_ASM (amd64_xor
,
2143 "xor (%rsp),%rax\n\t"
2144 "lea 0x8(%rsp),%rsp");
2148 amd64_emit_bit_not (void)
2150 EMIT_ASM (amd64_bit_not
,
2151 "xorq $0xffffffffffffffff,%rax");
2155 amd64_emit_equal (void)
2157 EMIT_ASM (amd64_equal
,
2158 "cmp %rax,(%rsp)\n\t"
2159 "je .Lamd64_equal_true\n\t"
2161 "jmp .Lamd64_equal_end\n\t"
2162 ".Lamd64_equal_true:\n\t"
2164 ".Lamd64_equal_end:\n\t"
2165 "lea 0x8(%rsp),%rsp");
2169 amd64_emit_less_signed (void)
2171 EMIT_ASM (amd64_less_signed
,
2172 "cmp %rax,(%rsp)\n\t"
2173 "jl .Lamd64_less_signed_true\n\t"
2175 "jmp .Lamd64_less_signed_end\n\t"
2176 ".Lamd64_less_signed_true:\n\t"
2178 ".Lamd64_less_signed_end:\n\t"
2179 "lea 0x8(%rsp),%rsp");
2183 amd64_emit_less_unsigned (void)
2185 EMIT_ASM (amd64_less_unsigned
,
2186 "cmp %rax,(%rsp)\n\t"
2187 "jb .Lamd64_less_unsigned_true\n\t"
2189 "jmp .Lamd64_less_unsigned_end\n\t"
2190 ".Lamd64_less_unsigned_true:\n\t"
2192 ".Lamd64_less_unsigned_end:\n\t"
2193 "lea 0x8(%rsp),%rsp");
2197 amd64_emit_ref (int size
)
2202 EMIT_ASM (amd64_ref1
,
2206 EMIT_ASM (amd64_ref2
,
2210 EMIT_ASM (amd64_ref4
,
2211 "movl (%rax),%eax");
2214 EMIT_ASM (amd64_ref8
,
2215 "movq (%rax),%rax");
2221 amd64_emit_if_goto (int *offset_p
, int *size_p
)
2223 EMIT_ASM (amd64_if_goto
,
2227 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2235 amd64_emit_goto (int *offset_p
, int *size_p
)
2237 EMIT_ASM (amd64_goto
,
2238 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2246 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2248 int diff
= (to
- (from
+ size
));
2249 unsigned char buf
[sizeof (int)];
2257 memcpy (buf
, &diff
, sizeof (int));
2258 write_inferior_memory (from
, buf
, sizeof (int));
2262 amd64_emit_const (LONGEST num
)
2264 unsigned char buf
[16];
2266 CORE_ADDR buildaddr
= current_insn_ptr
;
2269 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
2270 memcpy (&buf
[i
], &num
, sizeof (num
));
2272 append_insns (&buildaddr
, i
, buf
);
2273 current_insn_ptr
= buildaddr
;
2277 amd64_emit_call (CORE_ADDR fn
)
2279 unsigned char buf
[16];
2281 CORE_ADDR buildaddr
;
2284 /* The destination function being in the shared library, may be
2285 >31-bits away off the compiled code pad. */
2287 buildaddr
= current_insn_ptr
;
2289 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
2293 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
2295 /* Offset is too large for a call. Use callq, but that requires
2296 a register, so avoid it if possible. Use r10, since it is
2297 call-clobbered, we don't have to push/pop it. */
2298 buf
[i
++] = 0x48; /* mov $fn,%r10 */
2300 memcpy (buf
+ i
, &fn
, 8);
2302 buf
[i
++] = 0xff; /* callq *%r10 */
2307 int offset32
= offset64
; /* we know we can't overflow here. */
2308 memcpy (buf
+ i
, &offset32
, 4);
2312 append_insns (&buildaddr
, i
, buf
);
2313 current_insn_ptr
= buildaddr
;
2317 amd64_emit_reg (int reg
)
2319 unsigned char buf
[16];
2321 CORE_ADDR buildaddr
;
2323 /* Assume raw_regs is still in %rdi. */
2324 buildaddr
= current_insn_ptr
;
2326 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
2327 memcpy (&buf
[i
], ®
, sizeof (reg
));
2329 append_insns (&buildaddr
, i
, buf
);
2330 current_insn_ptr
= buildaddr
;
2331 amd64_emit_call (get_raw_reg_func_addr ());
2335 amd64_emit_pop (void)
2337 EMIT_ASM (amd64_pop
,
2342 amd64_emit_stack_flush (void)
2344 EMIT_ASM (amd64_stack_flush
,
2349 amd64_emit_zero_ext (int arg
)
2354 EMIT_ASM (amd64_zero_ext_8
,
2358 EMIT_ASM (amd64_zero_ext_16
,
2359 "and $0xffff,%rax");
2362 EMIT_ASM (amd64_zero_ext_32
,
2363 "mov $0xffffffff,%rcx\n\t"
2372 amd64_emit_swap (void)
2374 EMIT_ASM (amd64_swap
,
2381 amd64_emit_stack_adjust (int n
)
2383 unsigned char buf
[16];
2385 CORE_ADDR buildaddr
= current_insn_ptr
;
2388 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2392 /* This only handles adjustments up to 16, but we don't expect any more. */
2394 append_insns (&buildaddr
, i
, buf
);
2395 current_insn_ptr
= buildaddr
;
2398 /* FN's prototype is `LONGEST(*fn)(int)'. */
2401 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2403 unsigned char buf
[16];
2405 CORE_ADDR buildaddr
;
2407 buildaddr
= current_insn_ptr
;
2409 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2410 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2412 append_insns (&buildaddr
, i
, buf
);
2413 current_insn_ptr
= buildaddr
;
2414 amd64_emit_call (fn
);
2417 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2420 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2422 unsigned char buf
[16];
2424 CORE_ADDR buildaddr
;
2426 buildaddr
= current_insn_ptr
;
2428 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2429 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2431 append_insns (&buildaddr
, i
, buf
);
2432 current_insn_ptr
= buildaddr
;
2433 EMIT_ASM (amd64_void_call_2_a
,
2434 /* Save away a copy of the stack top. */
2436 /* Also pass top as the second argument. */
2438 amd64_emit_call (fn
);
2439 EMIT_ASM (amd64_void_call_2_b
,
2440 /* Restore the stack top, %rax may have been trashed. */
2445 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2448 "cmp %rax,(%rsp)\n\t"
2449 "jne .Lamd64_eq_fallthru\n\t"
2450 "lea 0x8(%rsp),%rsp\n\t"
2452 /* jmp, but don't trust the assembler to choose the right jump */
2453 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2454 ".Lamd64_eq_fallthru:\n\t"
2455 "lea 0x8(%rsp),%rsp\n\t"
2465 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2468 "cmp %rax,(%rsp)\n\t"
2469 "je .Lamd64_ne_fallthru\n\t"
2470 "lea 0x8(%rsp),%rsp\n\t"
2472 /* jmp, but don't trust the assembler to choose the right jump */
2473 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2474 ".Lamd64_ne_fallthru:\n\t"
2475 "lea 0x8(%rsp),%rsp\n\t"
2485 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2488 "cmp %rax,(%rsp)\n\t"
2489 "jnl .Lamd64_lt_fallthru\n\t"
2490 "lea 0x8(%rsp),%rsp\n\t"
2492 /* jmp, but don't trust the assembler to choose the right jump */
2493 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2494 ".Lamd64_lt_fallthru:\n\t"
2495 "lea 0x8(%rsp),%rsp\n\t"
2505 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2508 "cmp %rax,(%rsp)\n\t"
2509 "jnle .Lamd64_le_fallthru\n\t"
2510 "lea 0x8(%rsp),%rsp\n\t"
2512 /* jmp, but don't trust the assembler to choose the right jump */
2513 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2514 ".Lamd64_le_fallthru:\n\t"
2515 "lea 0x8(%rsp),%rsp\n\t"
2525 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2528 "cmp %rax,(%rsp)\n\t"
2529 "jng .Lamd64_gt_fallthru\n\t"
2530 "lea 0x8(%rsp),%rsp\n\t"
2532 /* jmp, but don't trust the assembler to choose the right jump */
2533 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2534 ".Lamd64_gt_fallthru:\n\t"
2535 "lea 0x8(%rsp),%rsp\n\t"
2545 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2548 "cmp %rax,(%rsp)\n\t"
2549 "jnge .Lamd64_ge_fallthru\n\t"
2550 ".Lamd64_ge_jump:\n\t"
2551 "lea 0x8(%rsp),%rsp\n\t"
2553 /* jmp, but don't trust the assembler to choose the right jump */
2554 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2555 ".Lamd64_ge_fallthru:\n\t"
2556 "lea 0x8(%rsp),%rsp\n\t"
2565 struct emit_ops amd64_emit_ops
=
2567 amd64_emit_prologue
,
2568 amd64_emit_epilogue
,
2573 amd64_emit_rsh_signed
,
2574 amd64_emit_rsh_unsigned
,
2582 amd64_emit_less_signed
,
2583 amd64_emit_less_unsigned
,
2587 amd64_write_goto_address
,
2592 amd64_emit_stack_flush
,
2593 amd64_emit_zero_ext
,
2595 amd64_emit_stack_adjust
,
2596 amd64_emit_int_call_1
,
2597 amd64_emit_void_call_2
,
2606 #endif /* __x86_64__ */
2609 i386_emit_prologue (void)
2611 EMIT_ASM32 (i386_prologue
,
2615 /* At this point, the raw regs base address is at 8(%ebp), and the
2616 value pointer is at 12(%ebp). */
2620 i386_emit_epilogue (void)
2622 EMIT_ASM32 (i386_epilogue
,
2623 "mov 12(%ebp),%ecx\n\t"
2624 "mov %eax,(%ecx)\n\t"
2625 "mov %ebx,0x4(%ecx)\n\t"
2633 i386_emit_add (void)
2635 EMIT_ASM32 (i386_add
,
2636 "add (%esp),%eax\n\t"
2637 "adc 0x4(%esp),%ebx\n\t"
2638 "lea 0x8(%esp),%esp");
2642 i386_emit_sub (void)
2644 EMIT_ASM32 (i386_sub
,
2645 "subl %eax,(%esp)\n\t"
2646 "sbbl %ebx,4(%esp)\n\t"
2652 i386_emit_mul (void)
2658 i386_emit_lsh (void)
2664 i386_emit_rsh_signed (void)
2670 i386_emit_rsh_unsigned (void)
2676 i386_emit_ext (int arg
)
2681 EMIT_ASM32 (i386_ext_8
,
2684 "movl %eax,%ebx\n\t"
2688 EMIT_ASM32 (i386_ext_16
,
2690 "movl %eax,%ebx\n\t"
2694 EMIT_ASM32 (i386_ext_32
,
2695 "movl %eax,%ebx\n\t"
2704 i386_emit_log_not (void)
2706 EMIT_ASM32 (i386_log_not
,
2708 "test %eax,%eax\n\t"
2715 i386_emit_bit_and (void)
2717 EMIT_ASM32 (i386_and
,
2718 "and (%esp),%eax\n\t"
2719 "and 0x4(%esp),%ebx\n\t"
2720 "lea 0x8(%esp),%esp");
2724 i386_emit_bit_or (void)
2726 EMIT_ASM32 (i386_or
,
2727 "or (%esp),%eax\n\t"
2728 "or 0x4(%esp),%ebx\n\t"
2729 "lea 0x8(%esp),%esp");
2733 i386_emit_bit_xor (void)
2735 EMIT_ASM32 (i386_xor
,
2736 "xor (%esp),%eax\n\t"
2737 "xor 0x4(%esp),%ebx\n\t"
2738 "lea 0x8(%esp),%esp");
2742 i386_emit_bit_not (void)
2744 EMIT_ASM32 (i386_bit_not
,
2745 "xor $0xffffffff,%eax\n\t"
2746 "xor $0xffffffff,%ebx\n\t");
2750 i386_emit_equal (void)
2752 EMIT_ASM32 (i386_equal
,
2753 "cmpl %ebx,4(%esp)\n\t"
2754 "jne .Li386_equal_false\n\t"
2755 "cmpl %eax,(%esp)\n\t"
2756 "je .Li386_equal_true\n\t"
2757 ".Li386_equal_false:\n\t"
2759 "jmp .Li386_equal_end\n\t"
2760 ".Li386_equal_true:\n\t"
2762 ".Li386_equal_end:\n\t"
2764 "lea 0x8(%esp),%esp");
2768 i386_emit_less_signed (void)
2770 EMIT_ASM32 (i386_less_signed
,
2771 "cmpl %ebx,4(%esp)\n\t"
2772 "jl .Li386_less_signed_true\n\t"
2773 "jne .Li386_less_signed_false\n\t"
2774 "cmpl %eax,(%esp)\n\t"
2775 "jl .Li386_less_signed_true\n\t"
2776 ".Li386_less_signed_false:\n\t"
2778 "jmp .Li386_less_signed_end\n\t"
2779 ".Li386_less_signed_true:\n\t"
2781 ".Li386_less_signed_end:\n\t"
2783 "lea 0x8(%esp),%esp");
2787 i386_emit_less_unsigned (void)
2789 EMIT_ASM32 (i386_less_unsigned
,
2790 "cmpl %ebx,4(%esp)\n\t"
2791 "jb .Li386_less_unsigned_true\n\t"
2792 "jne .Li386_less_unsigned_false\n\t"
2793 "cmpl %eax,(%esp)\n\t"
2794 "jb .Li386_less_unsigned_true\n\t"
2795 ".Li386_less_unsigned_false:\n\t"
2797 "jmp .Li386_less_unsigned_end\n\t"
2798 ".Li386_less_unsigned_true:\n\t"
2800 ".Li386_less_unsigned_end:\n\t"
2802 "lea 0x8(%esp),%esp");
2806 i386_emit_ref (int size
)
2811 EMIT_ASM32 (i386_ref1
,
2815 EMIT_ASM32 (i386_ref2
,
2819 EMIT_ASM32 (i386_ref4
,
2820 "movl (%eax),%eax");
2823 EMIT_ASM32 (i386_ref8
,
2824 "movl 4(%eax),%ebx\n\t"
2825 "movl (%eax),%eax");
2831 i386_emit_if_goto (int *offset_p
, int *size_p
)
2833 EMIT_ASM32 (i386_if_goto
,
2839 /* Don't trust the assembler to choose the right jump */
2840 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2843 *offset_p
= 11; /* be sure that this matches the sequence above */
2849 i386_emit_goto (int *offset_p
, int *size_p
)
2851 EMIT_ASM32 (i386_goto
,
2852 /* Don't trust the assembler to choose the right jump */
2853 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2861 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2863 int diff
= (to
- (from
+ size
));
2864 unsigned char buf
[sizeof (int)];
2866 /* We're only doing 4-byte sizes at the moment. */
2873 memcpy (buf
, &diff
, sizeof (int));
2874 write_inferior_memory (from
, buf
, sizeof (int));
2878 i386_emit_const (LONGEST num
)
2880 unsigned char buf
[16];
2882 CORE_ADDR buildaddr
= current_insn_ptr
;
2885 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2886 lo
= num
& 0xffffffff;
2887 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2889 hi
= ((num
>> 32) & 0xffffffff);
2892 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2893 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2898 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2900 append_insns (&buildaddr
, i
, buf
);
2901 current_insn_ptr
= buildaddr
;
2905 i386_emit_call (CORE_ADDR fn
)
2907 unsigned char buf
[16];
2909 CORE_ADDR buildaddr
;
2911 buildaddr
= current_insn_ptr
;
2913 buf
[i
++] = 0xe8; /* call <reladdr> */
2914 offset
= ((int) fn
) - (buildaddr
+ 5);
2915 memcpy (buf
+ 1, &offset
, 4);
2916 append_insns (&buildaddr
, 5, buf
);
2917 current_insn_ptr
= buildaddr
;
2921 i386_emit_reg (int reg
)
2923 unsigned char buf
[16];
2925 CORE_ADDR buildaddr
;
2927 EMIT_ASM32 (i386_reg_a
,
2929 buildaddr
= current_insn_ptr
;
2931 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2932 memcpy (&buf
[i
], ®
, sizeof (reg
));
2934 append_insns (&buildaddr
, i
, buf
);
2935 current_insn_ptr
= buildaddr
;
2936 EMIT_ASM32 (i386_reg_b
,
2937 "mov %eax,4(%esp)\n\t"
2938 "mov 8(%ebp),%eax\n\t"
2940 i386_emit_call (get_raw_reg_func_addr ());
2941 EMIT_ASM32 (i386_reg_c
,
2943 "lea 0x8(%esp),%esp");
2947 i386_emit_pop (void)
2949 EMIT_ASM32 (i386_pop
,
2955 i386_emit_stack_flush (void)
2957 EMIT_ASM32 (i386_stack_flush
,
2963 i386_emit_zero_ext (int arg
)
2968 EMIT_ASM32 (i386_zero_ext_8
,
2969 "and $0xff,%eax\n\t"
2973 EMIT_ASM32 (i386_zero_ext_16
,
2974 "and $0xffff,%eax\n\t"
2978 EMIT_ASM32 (i386_zero_ext_32
,
2987 i386_emit_swap (void)
2989 EMIT_ASM32 (i386_swap
,
2999 i386_emit_stack_adjust (int n
)
3001 unsigned char buf
[16];
3003 CORE_ADDR buildaddr
= current_insn_ptr
;
3006 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
3010 append_insns (&buildaddr
, i
, buf
);
3011 current_insn_ptr
= buildaddr
;
3014 /* FN's prototype is `LONGEST(*fn)(int)'. */
3017 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
3019 unsigned char buf
[16];
3021 CORE_ADDR buildaddr
;
3023 EMIT_ASM32 (i386_int_call_1_a
,
3024 /* Reserve a bit of stack space. */
3026 /* Put the one argument on the stack. */
3027 buildaddr
= current_insn_ptr
;
3029 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3032 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3034 append_insns (&buildaddr
, i
, buf
);
3035 current_insn_ptr
= buildaddr
;
3036 i386_emit_call (fn
);
3037 EMIT_ASM32 (i386_int_call_1_c
,
3039 "lea 0x8(%esp),%esp");
3042 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3045 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
3047 unsigned char buf
[16];
3049 CORE_ADDR buildaddr
;
3051 EMIT_ASM32 (i386_void_call_2_a
,
3052 /* Preserve %eax only; we don't have to worry about %ebx. */
3054 /* Reserve a bit of stack space for arguments. */
3055 "sub $0x10,%esp\n\t"
3056 /* Copy "top" to the second argument position. (Note that
3057 we can't assume function won't scribble on its
3058 arguments, so don't try to restore from this.) */
3059 "mov %eax,4(%esp)\n\t"
3060 "mov %ebx,8(%esp)");
3061 /* Put the first argument on the stack. */
3062 buildaddr
= current_insn_ptr
;
3064 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3067 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3069 append_insns (&buildaddr
, i
, buf
);
3070 current_insn_ptr
= buildaddr
;
3071 i386_emit_call (fn
);
3072 EMIT_ASM32 (i386_void_call_2_b
,
3073 "lea 0x10(%esp),%esp\n\t"
3074 /* Restore original stack top. */
3080 i386_emit_eq_goto (int *offset_p
, int *size_p
)
3083 /* Check low half first, more likely to be decider */
3084 "cmpl %eax,(%esp)\n\t"
3085 "jne .Leq_fallthru\n\t"
3086 "cmpl %ebx,4(%esp)\n\t"
3087 "jne .Leq_fallthru\n\t"
3088 "lea 0x8(%esp),%esp\n\t"
3091 /* jmp, but don't trust the assembler to choose the right jump */
3092 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3093 ".Leq_fallthru:\n\t"
3094 "lea 0x8(%esp),%esp\n\t"
3105 i386_emit_ne_goto (int *offset_p
, int *size_p
)
3108 /* Check low half first, more likely to be decider */
3109 "cmpl %eax,(%esp)\n\t"
3111 "cmpl %ebx,4(%esp)\n\t"
3112 "je .Lne_fallthru\n\t"
3114 "lea 0x8(%esp),%esp\n\t"
3117 /* jmp, but don't trust the assembler to choose the right jump */
3118 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3119 ".Lne_fallthru:\n\t"
3120 "lea 0x8(%esp),%esp\n\t"
3131 i386_emit_lt_goto (int *offset_p
, int *size_p
)
3134 "cmpl %ebx,4(%esp)\n\t"
3136 "jne .Llt_fallthru\n\t"
3137 "cmpl %eax,(%esp)\n\t"
3138 "jnl .Llt_fallthru\n\t"
3140 "lea 0x8(%esp),%esp\n\t"
3143 /* jmp, but don't trust the assembler to choose the right jump */
3144 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3145 ".Llt_fallthru:\n\t"
3146 "lea 0x8(%esp),%esp\n\t"
3157 i386_emit_le_goto (int *offset_p
, int *size_p
)
3160 "cmpl %ebx,4(%esp)\n\t"
3162 "jne .Lle_fallthru\n\t"
3163 "cmpl %eax,(%esp)\n\t"
3164 "jnle .Lle_fallthru\n\t"
3166 "lea 0x8(%esp),%esp\n\t"
3169 /* jmp, but don't trust the assembler to choose the right jump */
3170 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3171 ".Lle_fallthru:\n\t"
3172 "lea 0x8(%esp),%esp\n\t"
3183 i386_emit_gt_goto (int *offset_p
, int *size_p
)
3186 "cmpl %ebx,4(%esp)\n\t"
3188 "jne .Lgt_fallthru\n\t"
3189 "cmpl %eax,(%esp)\n\t"
3190 "jng .Lgt_fallthru\n\t"
3192 "lea 0x8(%esp),%esp\n\t"
3195 /* jmp, but don't trust the assembler to choose the right jump */
3196 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3197 ".Lgt_fallthru:\n\t"
3198 "lea 0x8(%esp),%esp\n\t"
3209 i386_emit_ge_goto (int *offset_p
, int *size_p
)
3212 "cmpl %ebx,4(%esp)\n\t"
3214 "jne .Lge_fallthru\n\t"
3215 "cmpl %eax,(%esp)\n\t"
3216 "jnge .Lge_fallthru\n\t"
3218 "lea 0x8(%esp),%esp\n\t"
3221 /* jmp, but don't trust the assembler to choose the right jump */
3222 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3223 ".Lge_fallthru:\n\t"
3224 "lea 0x8(%esp),%esp\n\t"
3234 struct emit_ops i386_emit_ops
=
3242 i386_emit_rsh_signed
,
3243 i386_emit_rsh_unsigned
,
3251 i386_emit_less_signed
,
3252 i386_emit_less_unsigned
,
3256 i386_write_goto_address
,
3261 i386_emit_stack_flush
,
3264 i386_emit_stack_adjust
,
3265 i386_emit_int_call_1
,
3266 i386_emit_void_call_2
,
3276 static struct emit_ops
*
3280 if (is_64bit_tdesc ())
3281 return &amd64_emit_ops
;
3284 return &i386_emit_ops
;
3288 x86_supports_range_stepping (void)
3293 /* This is initialized assuming an amd64 target.
3294 x86_arch_setup will correct it for i386 or amd64 targets. */
3296 struct linux_target_ops the_low_target
=
3299 x86_linux_regs_info
,
3300 x86_cannot_fetch_register
,
3301 x86_cannot_store_register
,
3302 NULL
, /* fetch_register */
3312 x86_stopped_by_watchpoint
,
3313 x86_stopped_data_address
,
3314 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3315 native i386 case (no registers smaller than an xfer unit), and are not
3316 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3319 /* need to fix up i386 siginfo if host is amd64 */
3321 x86_linux_new_process
,
3322 x86_linux_new_thread
,
3323 x86_linux_prepare_to_resume
,
3324 x86_linux_process_qsupported
,
3325 x86_supports_tracepoints
,
3326 x86_get_thread_area
,
3327 x86_install_fast_tracepoint_jump_pad
,
3329 x86_get_min_fast_tracepoint_insn_len
,
3330 x86_supports_range_stepping
,
3334 initialize_low_arch (void)
3336 /* Initialize the Linux target descriptions. */
3338 init_registers_amd64_linux ();
3339 init_registers_amd64_avx_linux ();
3340 init_registers_x32_linux ();
3341 init_registers_x32_avx_linux ();
3343 tdesc_amd64_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3344 copy_target_description (tdesc_amd64_linux_no_xml
, tdesc_amd64_linux
);
3345 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
3347 init_registers_i386_linux ();
3348 init_registers_i386_mmx_linux ();
3349 init_registers_i386_avx_linux ();
3351 tdesc_i386_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3352 copy_target_description (tdesc_i386_linux_no_xml
, tdesc_i386_linux
);
3353 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3355 initialize_regsets_info (&x86_regsets_info
);