1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "linux-low.h"
28 #include "i386-xstate.h"
29 #include "elf/common.h"
31 #include "gdb_proc_service.h"
33 /* Defined in auto-generated file i386-linux.c. */
34 void init_registers_i386_linux (void);
35 /* Defined in auto-generated file amd64-linux.c. */
36 void init_registers_amd64_linux (void);
37 /* Defined in auto-generated file i386-avx-linux.c. */
38 void init_registers_i386_avx_linux (void);
39 /* Defined in auto-generated file amd64-avx-linux.c. */
40 void init_registers_amd64_avx_linux (void);
41 /* Defined in auto-generated file i386-mmx-linux.c. */
42 void init_registers_i386_mmx_linux (void);
44 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
46 /* Backward compatibility for gdb without XML support. */
48 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
49 <architecture>i386</architecture>\
50 <osabi>GNU/Linux</osabi>\
54 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
55 <architecture>i386:x86-64</architecture>\
56 <osabi>GNU/Linux</osabi>\
61 #include <sys/procfs.h>
62 #include <sys/ptrace.h>
65 #ifndef PTRACE_GETREGSET
66 #define PTRACE_GETREGSET 0x4204
69 #ifndef PTRACE_SETREGSET
70 #define PTRACE_SETREGSET 0x4205
74 #ifndef PTRACE_GET_THREAD_AREA
75 #define PTRACE_GET_THREAD_AREA 25
78 /* This definition comes from prctl.h, but some kernels may not have it. */
79 #ifndef PTRACE_ARCH_PRCTL
80 #define PTRACE_ARCH_PRCTL 30
83 /* The following definitions come from prctl.h, but may be absent
84 for certain configurations. */
86 #define ARCH_SET_GS 0x1001
87 #define ARCH_SET_FS 0x1002
88 #define ARCH_GET_FS 0x1003
89 #define ARCH_GET_GS 0x1004
92 /* Per-process arch-specific data we want to keep. */
94 struct arch_process_info
96 struct i386_debug_reg_state debug_reg_state
;
99 /* Per-thread arch-specific data we want to keep. */
103 /* Non-zero if our copy differs from what's recorded in the thread. */
104 int debug_registers_changed
;
109 /* Mapping between the general-purpose registers in `struct user'
110 format and GDB's register array layout.
111 Note that the transfer layout uses 64-bit regs. */
112 static /*const*/ int i386_regmap
[] =
114 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
115 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
116 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
117 DS
* 8, ES
* 8, FS
* 8, GS
* 8
120 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
122 /* So code below doesn't have to care, i386 or amd64. */
123 #define ORIG_EAX ORIG_RAX
125 static const int x86_64_regmap
[] =
127 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
128 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
129 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
130 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
131 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
132 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
133 -1, -1, -1, -1, -1, -1, -1, -1,
134 -1, -1, -1, -1, -1, -1, -1, -1,
135 -1, -1, -1, -1, -1, -1, -1, -1,
136 -1, -1, -1, -1, -1, -1, -1, -1, -1,
140 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
142 #else /* ! __x86_64__ */
144 /* Mapping between the general-purpose registers in `struct user'
145 format and GDB's register array layout. */
146 static /*const*/ int i386_regmap
[] =
148 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
149 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
150 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
151 DS
* 4, ES
* 4, FS
* 4, GS
* 4
154 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
158 /* Called by libthread_db. */
161 ps_get_thread_area (const struct ps_prochandle
*ph
,
162 lwpid_t lwpid
, int idx
, void **base
)
165 int use_64bit
= register_size (0) == 8;
172 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
176 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
187 unsigned int desc
[4];
189 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
190 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
193 *(int *)base
= desc
[1];
198 /* Get the thread area address. This is used to recognize which
199 thread is which when tracing with the in-process agent library. We
200 don't read anything from the address, and treat it as opaque; it's
201 the address itself that we assume is unique per-thread. */
204 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
207 int use_64bit
= register_size (0) == 8;
212 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
214 *addr
= (CORE_ADDR
) (uintptr_t) base
;
223 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
224 struct regcache
*regcache
= get_thread_regcache (get_lwp_thread (lwp
), 1);
225 unsigned int desc
[4];
227 const int reg_thread_area
= 3; /* bits to scale down register value. */
230 collect_register_by_name (regcache
, "gs", &gs
);
232 idx
= gs
>> reg_thread_area
;
234 if (ptrace (PTRACE_GET_THREAD_AREA
,
235 lwpid_of (lwp
), (void *) (long) idx
, (unsigned long) &desc
) < 0)
246 i386_cannot_store_register (int regno
)
248 return regno
>= I386_NUM_REGS
;
252 i386_cannot_fetch_register (int regno
)
254 return regno
>= I386_NUM_REGS
;
258 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
263 if (register_size (0) == 8)
265 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
266 if (x86_64_regmap
[i
] != -1)
267 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
272 for (i
= 0; i
< I386_NUM_REGS
; i
++)
273 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
275 collect_register_by_name (regcache
, "orig_eax",
276 ((char *) buf
) + ORIG_EAX
* 4);
280 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
285 if (register_size (0) == 8)
287 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
288 if (x86_64_regmap
[i
] != -1)
289 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
294 for (i
= 0; i
< I386_NUM_REGS
; i
++)
295 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
297 supply_register_by_name (regcache
, "orig_eax",
298 ((char *) buf
) + ORIG_EAX
* 4);
302 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
305 i387_cache_to_fxsave (regcache
, buf
);
307 i387_cache_to_fsave (regcache
, buf
);
312 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
315 i387_fxsave_to_cache (regcache
, buf
);
317 i387_fsave_to_cache (regcache
, buf
);
324 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
326 i387_cache_to_fxsave (regcache
, buf
);
330 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
332 i387_fxsave_to_cache (regcache
, buf
);
338 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
340 i387_cache_to_xsave (regcache
, buf
);
344 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
346 i387_xsave_to_cache (regcache
, buf
);
349 /* ??? The non-biarch i386 case stores all the i387 regs twice.
350 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
351 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
352 doesn't work. IWBN to avoid the duplication in the case where it
353 does work. Maybe the arch_setup routine could check whether it works
354 and update target_regsets accordingly, maybe by moving target_regsets
355 to linux_target_ops and set the right one there, rather than having to
356 modify the target_regsets global. */
358 struct regset_info target_regsets
[] =
360 #ifdef HAVE_PTRACE_GETREGS
361 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
363 x86_fill_gregset
, x86_store_gregset
},
364 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
365 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
367 # ifdef HAVE_PTRACE_GETFPXREGS
368 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
370 x86_fill_fpxregset
, x86_store_fpxregset
},
373 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
375 x86_fill_fpregset
, x86_store_fpregset
},
376 #endif /* HAVE_PTRACE_GETREGS */
377 { 0, 0, 0, -1, -1, NULL
, NULL
}
381 x86_get_pc (struct regcache
*regcache
)
383 int use_64bit
= register_size (0) == 8;
388 collect_register_by_name (regcache
, "rip", &pc
);
389 return (CORE_ADDR
) pc
;
394 collect_register_by_name (regcache
, "eip", &pc
);
395 return (CORE_ADDR
) pc
;
400 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
402 int use_64bit
= register_size (0) == 8;
406 unsigned long newpc
= pc
;
407 supply_register_by_name (regcache
, "rip", &newpc
);
411 unsigned int newpc
= pc
;
412 supply_register_by_name (regcache
, "eip", &newpc
);
416 static const unsigned char x86_breakpoint
[] = { 0xCC };
417 #define x86_breakpoint_len 1
420 x86_breakpoint_at (CORE_ADDR pc
)
424 (*the_target
->read_memory
) (pc
, &c
, 1);
431 /* Support for debug registers. */
434 x86_linux_dr_get (ptid_t ptid
, int regnum
)
439 tid
= ptid_get_lwp (ptid
);
442 value
= ptrace (PTRACE_PEEKUSER
, tid
,
443 offsetof (struct user
, u_debugreg
[regnum
]), 0);
445 error ("Couldn't read debug register");
451 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
455 tid
= ptid_get_lwp (ptid
);
458 ptrace (PTRACE_POKEUSER
, tid
,
459 offsetof (struct user
, u_debugreg
[regnum
]), value
);
461 error ("Couldn't write debug register");
465 update_debug_registers_callback (struct inferior_list_entry
*entry
,
468 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
469 int pid
= *(int *) pid_p
;
471 /* Only update the threads of this process. */
472 if (pid_of (lwp
) == pid
)
474 /* The actual update is done later just before resuming the lwp,
475 we just mark that the registers need updating. */
476 lwp
->arch_private
->debug_registers_changed
= 1;
478 /* If the lwp isn't stopped, force it to momentarily pause, so
479 we can update its debug registers. */
481 linux_stop_lwp (lwp
);
487 /* Update the inferior's debug register REGNUM from STATE. */
490 i386_dr_low_set_addr (const struct i386_debug_reg_state
*state
, int regnum
)
492 /* Only update the threads of this process. */
493 int pid
= pid_of (get_thread_lwp (current_inferior
));
495 if (! (regnum
>= 0 && regnum
<= DR_LASTADDR
- DR_FIRSTADDR
))
496 fatal ("Invalid debug register %d", regnum
);
498 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
501 /* Return the inferior's debug register REGNUM. */
504 i386_dr_low_get_addr (int regnum
)
506 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
507 ptid_t ptid
= ptid_of (lwp
);
509 /* DR6 and DR7 are retrieved with some other way. */
510 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
< DR_LASTADDR
);
512 return x86_linux_dr_get (ptid
, regnum
);
515 /* Update the inferior's DR7 debug control register from STATE. */
518 i386_dr_low_set_control (const struct i386_debug_reg_state
*state
)
520 /* Only update the threads of this process. */
521 int pid
= pid_of (get_thread_lwp (current_inferior
));
523 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
526 /* Return the inferior's DR7 debug control register. */
529 i386_dr_low_get_control (void)
531 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
532 ptid_t ptid
= ptid_of (lwp
);
534 return x86_linux_dr_get (ptid
, DR_CONTROL
);
537 /* Get the value of the DR6 debug status register from the inferior
538 and record it in STATE. */
541 i386_dr_low_get_status (void)
543 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
544 ptid_t ptid
= ptid_of (lwp
);
546 return x86_linux_dr_get (ptid
, DR_STATUS
);
549 /* Breakpoint/Watchpoint support. */
552 x86_insert_point (char type
, CORE_ADDR addr
, int len
)
554 struct process_info
*proc
= current_process ();
561 ret
= prepare_to_access_memory ();
564 ret
= set_gdb_breakpoint_at (addr
);
565 done_accessing_memory ();
571 return i386_low_insert_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
580 x86_remove_point (char type
, CORE_ADDR addr
, int len
)
582 struct process_info
*proc
= current_process ();
589 ret
= prepare_to_access_memory ();
592 ret
= delete_gdb_breakpoint_at (addr
);
593 done_accessing_memory ();
599 return i386_low_remove_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
608 x86_stopped_by_watchpoint (void)
610 struct process_info
*proc
= current_process ();
611 return i386_low_stopped_by_watchpoint (&proc
->private->arch_private
->debug_reg_state
);
615 x86_stopped_data_address (void)
617 struct process_info
*proc
= current_process ();
619 if (i386_low_stopped_data_address (&proc
->private->arch_private
->debug_reg_state
,
625 /* Called when a new process is created. */
627 static struct arch_process_info
*
628 x86_linux_new_process (void)
630 struct arch_process_info
*info
= xcalloc (1, sizeof (*info
));
632 i386_low_init_dregs (&info
->debug_reg_state
);
637 /* Called when a new thread is detected. */
639 static struct arch_lwp_info
*
640 x86_linux_new_thread (void)
642 struct arch_lwp_info
*info
= xcalloc (1, sizeof (*info
));
644 info
->debug_registers_changed
= 1;
649 /* Called when resuming a thread.
650 If the debug regs have changed, update the thread's copies. */
653 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
655 ptid_t ptid
= ptid_of (lwp
);
657 if (lwp
->arch_private
->debug_registers_changed
)
660 int pid
= ptid_get_pid (ptid
);
661 struct process_info
*proc
= find_process_pid (pid
);
662 struct i386_debug_reg_state
*state
= &proc
->private->arch_private
->debug_reg_state
;
664 for (i
= DR_FIRSTADDR
; i
<= DR_LASTADDR
; i
++)
665 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
667 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
669 lwp
->arch_private
->debug_registers_changed
= 0;
672 if (lwp
->stopped_by_watchpoint
)
673 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
676 /* When GDBSERVER is built as a 64-bit application on linux, the
677 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
678 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
679 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
680 conversion in-place ourselves. */
682 /* These types below (compat_*) define a siginfo type that is layout
683 compatible with the siginfo type exported by the 32-bit userspace
688 typedef int compat_int_t
;
689 typedef unsigned int compat_uptr_t
;
691 typedef int compat_time_t
;
692 typedef int compat_timer_t
;
693 typedef int compat_clock_t
;
695 struct compat_timeval
697 compat_time_t tv_sec
;
701 typedef union compat_sigval
703 compat_int_t sival_int
;
704 compat_uptr_t sival_ptr
;
707 typedef struct compat_siginfo
715 int _pad
[((128 / sizeof (int)) - 3)];
724 /* POSIX.1b timers */
729 compat_sigval_t _sigval
;
732 /* POSIX.1b signals */
737 compat_sigval_t _sigval
;
746 compat_clock_t _utime
;
747 compat_clock_t _stime
;
750 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
765 #define cpt_si_pid _sifields._kill._pid
766 #define cpt_si_uid _sifields._kill._uid
767 #define cpt_si_timerid _sifields._timer._tid
768 #define cpt_si_overrun _sifields._timer._overrun
769 #define cpt_si_status _sifields._sigchld._status
770 #define cpt_si_utime _sifields._sigchld._utime
771 #define cpt_si_stime _sifields._sigchld._stime
772 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
773 #define cpt_si_addr _sifields._sigfault._addr
774 #define cpt_si_band _sifields._sigpoll._band
775 #define cpt_si_fd _sifields._sigpoll._fd
777 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
778 In their place is si_timer1,si_timer2. */
780 #define si_timerid si_timer1
783 #define si_overrun si_timer2
787 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
789 memset (to
, 0, sizeof (*to
));
791 to
->si_signo
= from
->si_signo
;
792 to
->si_errno
= from
->si_errno
;
793 to
->si_code
= from
->si_code
;
795 if (to
->si_code
== SI_TIMER
)
797 to
->cpt_si_timerid
= from
->si_timerid
;
798 to
->cpt_si_overrun
= from
->si_overrun
;
799 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
801 else if (to
->si_code
== SI_USER
)
803 to
->cpt_si_pid
= from
->si_pid
;
804 to
->cpt_si_uid
= from
->si_uid
;
806 else if (to
->si_code
< 0)
808 to
->cpt_si_pid
= from
->si_pid
;
809 to
->cpt_si_uid
= from
->si_uid
;
810 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
814 switch (to
->si_signo
)
817 to
->cpt_si_pid
= from
->si_pid
;
818 to
->cpt_si_uid
= from
->si_uid
;
819 to
->cpt_si_status
= from
->si_status
;
820 to
->cpt_si_utime
= from
->si_utime
;
821 to
->cpt_si_stime
= from
->si_stime
;
827 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
830 to
->cpt_si_band
= from
->si_band
;
831 to
->cpt_si_fd
= from
->si_fd
;
834 to
->cpt_si_pid
= from
->si_pid
;
835 to
->cpt_si_uid
= from
->si_uid
;
836 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
843 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
845 memset (to
, 0, sizeof (*to
));
847 to
->si_signo
= from
->si_signo
;
848 to
->si_errno
= from
->si_errno
;
849 to
->si_code
= from
->si_code
;
851 if (to
->si_code
== SI_TIMER
)
853 to
->si_timerid
= from
->cpt_si_timerid
;
854 to
->si_overrun
= from
->cpt_si_overrun
;
855 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
857 else if (to
->si_code
== SI_USER
)
859 to
->si_pid
= from
->cpt_si_pid
;
860 to
->si_uid
= from
->cpt_si_uid
;
862 else if (to
->si_code
< 0)
864 to
->si_pid
= from
->cpt_si_pid
;
865 to
->si_uid
= from
->cpt_si_uid
;
866 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
870 switch (to
->si_signo
)
873 to
->si_pid
= from
->cpt_si_pid
;
874 to
->si_uid
= from
->cpt_si_uid
;
875 to
->si_status
= from
->cpt_si_status
;
876 to
->si_utime
= from
->cpt_si_utime
;
877 to
->si_stime
= from
->cpt_si_stime
;
883 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
886 to
->si_band
= from
->cpt_si_band
;
887 to
->si_fd
= from
->cpt_si_fd
;
890 to
->si_pid
= from
->cpt_si_pid
;
891 to
->si_uid
= from
->cpt_si_uid
;
892 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
898 #endif /* __x86_64__ */
900 /* Convert a native/host siginfo object, into/from the siginfo in the
901 layout of the inferiors' architecture. Returns true if any
902 conversion was done; false otherwise. If DIRECTION is 1, then copy
903 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
907 x86_siginfo_fixup (struct siginfo
*native
, void *inf
, int direction
)
910 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
911 if (register_size (0) == 4)
913 if (sizeof (struct siginfo
) != sizeof (compat_siginfo_t
))
914 fatal ("unexpected difference in siginfo");
917 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
919 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
930 /* Update gdbserver_xmltarget. */
933 x86_linux_update_xmltarget (void)
936 struct regset_info
*regset
;
937 static unsigned long long xcr0
;
938 static int have_ptrace_getregset
= -1;
939 #if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
940 static int have_ptrace_getfpxregs
= -1;
943 if (!current_inferior
)
946 /* Before changing the register cache internal layout or the target
947 regsets, flush the contents of the current valid caches back to
949 regcache_invalidate ();
951 pid
= pid_of (get_thread_lwp (current_inferior
));
953 if (num_xmm_registers
== 8)
954 init_registers_i386_linux ();
956 init_registers_amd64_linux ();
959 # ifdef HAVE_PTRACE_GETFPXREGS
960 if (have_ptrace_getfpxregs
== -1)
962 elf_fpxregset_t fpxregs
;
964 if (ptrace (PTRACE_GETFPXREGS
, pid
, 0, (int) &fpxregs
) < 0)
966 have_ptrace_getfpxregs
= 0;
967 x86_xcr0
= I386_XSTATE_X87_MASK
;
969 /* Disable PTRACE_GETFPXREGS. */
970 for (regset
= target_regsets
;
971 regset
->fill_function
!= NULL
; regset
++)
972 if (regset
->get_request
== PTRACE_GETFPXREGS
)
979 have_ptrace_getfpxregs
= 1;
982 if (!have_ptrace_getfpxregs
)
984 init_registers_i386_mmx_linux ();
988 init_registers_i386_linux ();
996 if (num_xmm_registers
== 8)
997 gdbserver_xmltarget
= xmltarget_i386_linux_no_xml
;
999 gdbserver_xmltarget
= xmltarget_amd64_linux_no_xml
;
1001 gdbserver_xmltarget
= xmltarget_i386_linux_no_xml
;
1004 x86_xcr0
= I386_XSTATE_SSE_MASK
;
1009 /* Check if XSAVE extended state is supported. */
1010 if (have_ptrace_getregset
== -1)
1012 unsigned long long xstateregs
[I386_XSTATE_SSE_SIZE
/ sizeof (long long)];
1015 iov
.iov_base
= xstateregs
;
1016 iov
.iov_len
= sizeof (xstateregs
);
1018 /* Check if PTRACE_GETREGSET works. */
1019 if (ptrace (PTRACE_GETREGSET
, pid
, (unsigned int) NT_X86_XSTATE
,
1022 have_ptrace_getregset
= 0;
1026 have_ptrace_getregset
= 1;
1028 /* Get XCR0 from XSAVE extended state at byte 464. */
1029 xcr0
= xstateregs
[464 / sizeof (long long)];
1031 /* Use PTRACE_GETREGSET if it is available. */
1032 for (regset
= target_regsets
;
1033 regset
->fill_function
!= NULL
; regset
++)
1034 if (regset
->get_request
== PTRACE_GETREGSET
)
1035 regset
->size
= I386_XSTATE_SIZE (xcr0
);
1036 else if (regset
->type
!= GENERAL_REGS
)
1040 if (have_ptrace_getregset
)
1042 /* AVX is the highest feature we support. */
1043 if ((xcr0
& I386_XSTATE_AVX_MASK
) == I386_XSTATE_AVX_MASK
)
1048 /* I386 has 8 xmm regs. */
1049 if (num_xmm_registers
== 8)
1050 init_registers_i386_avx_linux ();
1052 init_registers_amd64_avx_linux ();
1054 init_registers_i386_avx_linux ();
1060 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1061 PTRACE_GETREGSET. */
1064 x86_linux_process_qsupported (const char *query
)
1066 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1067 with "i386" in qSupported query, it supports x86 XML target
1070 if (query
!= NULL
&& strncmp (query
, "xmlRegisters=", 13) == 0)
1072 char *copy
= xstrdup (query
+ 13);
1075 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1077 if (strcmp (p
, "i386") == 0)
1087 x86_linux_update_xmltarget ();
1090 /* Initialize gdbserver for the architecture of the inferior. */
1093 x86_arch_setup (void)
1096 int pid
= pid_of (get_thread_lwp (current_inferior
));
1097 char *file
= linux_child_pid_to_exec_file (pid
);
1098 int use_64bit
= elf_64_file_p (file
);
1104 /* This can only happen if /proc/<pid>/exe is unreadable,
1105 but "that can't happen" if we've gotten this far.
1106 Fall through and assume this is a 32-bit program. */
1110 /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
1111 the_low_target
.num_regs
= -1;
1112 the_low_target
.regmap
= NULL
;
1113 the_low_target
.cannot_fetch_register
= NULL
;
1114 the_low_target
.cannot_store_register
= NULL
;
1116 /* Amd64 has 16 xmm regs. */
1117 num_xmm_registers
= 16;
1119 x86_linux_update_xmltarget ();
1124 /* Ok we have a 32-bit inferior. */
1126 the_low_target
.num_regs
= I386_NUM_REGS
;
1127 the_low_target
.regmap
= i386_regmap
;
1128 the_low_target
.cannot_fetch_register
= i386_cannot_fetch_register
;
1129 the_low_target
.cannot_store_register
= i386_cannot_store_register
;
1131 /* I386 has 8 xmm regs. */
1132 num_xmm_registers
= 8;
1134 x86_linux_update_xmltarget ();
1138 x86_supports_tracepoints (void)
1144 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1146 write_inferior_memory (*to
, buf
, len
);
1151 push_opcode (unsigned char *buf
, char *op
)
1153 unsigned char *buf_org
= buf
;
1158 unsigned long ul
= strtoul (op
, &endptr
, 16);
1167 return buf
- buf_org
;
1172 /* Build a jump pad that saves registers and calls a collection
1173 function. Writes a jump instruction to the jump pad to
1174 JJUMPAD_INSN. The caller is responsible to write it in at the
1175 tracepoint address. */
1178 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1179 CORE_ADDR collector
,
1182 CORE_ADDR
*jump_entry
,
1183 unsigned char *jjump_pad_insn
,
1184 ULONGEST
*jjump_pad_insn_size
,
1185 CORE_ADDR
*adjusted_insn_addr
,
1186 CORE_ADDR
*adjusted_insn_addr_end
)
1188 unsigned char buf
[40];
1190 CORE_ADDR buildaddr
= *jump_entry
;
1192 /* Build the jump pad. */
1194 /* First, do tracepoint data collection. Save registers. */
1196 /* Need to ensure stack pointer saved first. */
1197 buf
[i
++] = 0x54; /* push %rsp */
1198 buf
[i
++] = 0x55; /* push %rbp */
1199 buf
[i
++] = 0x57; /* push %rdi */
1200 buf
[i
++] = 0x56; /* push %rsi */
1201 buf
[i
++] = 0x52; /* push %rdx */
1202 buf
[i
++] = 0x51; /* push %rcx */
1203 buf
[i
++] = 0x53; /* push %rbx */
1204 buf
[i
++] = 0x50; /* push %rax */
1205 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1206 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1207 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1208 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1209 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1210 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1211 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1212 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1213 buf
[i
++] = 0x9c; /* pushfq */
1214 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1216 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1217 i
+= sizeof (unsigned long);
1218 buf
[i
++] = 0x57; /* push %rdi */
1219 append_insns (&buildaddr
, i
, buf
);
1221 /* Stack space for the collecting_t object. */
1223 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1224 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1225 memcpy (buf
+ i
, &tpoint
, 8);
1227 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1228 i
+= push_opcode (&buf
[i
],
1229 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1230 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1231 append_insns (&buildaddr
, i
, buf
);
1235 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1236 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1238 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1239 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1240 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1241 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1242 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1243 append_insns (&buildaddr
, i
, buf
);
1245 /* Set up the gdb_collect call. */
1246 /* At this point, (stack pointer + 0x18) is the base of our saved
1250 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1251 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1253 /* tpoint address may be 64-bit wide. */
1254 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1255 memcpy (buf
+ i
, &tpoint
, 8);
1257 append_insns (&buildaddr
, i
, buf
);
1259 /* The collector function being in the shared library, may be
1260 >31-bits away off the jump pad. */
1262 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1263 memcpy (buf
+ i
, &collector
, 8);
1265 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1266 append_insns (&buildaddr
, i
, buf
);
1268 /* Clear the spin-lock. */
1270 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1271 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1272 memcpy (buf
+ i
, &lockaddr
, 8);
1274 append_insns (&buildaddr
, i
, buf
);
1276 /* Remove stack that had been used for the collect_t object. */
1278 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1279 append_insns (&buildaddr
, i
, buf
);
1281 /* Restore register state. */
1283 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1287 buf
[i
++] = 0x9d; /* popfq */
1288 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1289 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1290 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1291 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1292 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1293 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1294 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1295 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1296 buf
[i
++] = 0x58; /* pop %rax */
1297 buf
[i
++] = 0x5b; /* pop %rbx */
1298 buf
[i
++] = 0x59; /* pop %rcx */
1299 buf
[i
++] = 0x5a; /* pop %rdx */
1300 buf
[i
++] = 0x5e; /* pop %rsi */
1301 buf
[i
++] = 0x5f; /* pop %rdi */
1302 buf
[i
++] = 0x5d; /* pop %rbp */
1303 buf
[i
++] = 0x5c; /* pop %rsp */
1304 append_insns (&buildaddr
, i
, buf
);
1306 /* Now, adjust the original instruction to execute in the jump
1308 *adjusted_insn_addr
= buildaddr
;
1309 relocate_instruction (&buildaddr
, tpaddr
);
1310 *adjusted_insn_addr_end
= buildaddr
;
1312 /* Finally, write a jump back to the program. */
1313 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1314 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1315 memcpy (buf
+ 1, &offset
, 4);
1316 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1318 /* The jump pad is now built. Wire in a jump to our jump pad. This
1319 is always done last (by our caller actually), so that we can
1320 install fast tracepoints with threads running. This relies on
1321 the agent's atomic write support. */
1322 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1323 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1324 memcpy (buf
+ 1, &offset
, 4);
1325 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1326 *jjump_pad_insn_size
= sizeof (jump_insn
);
1328 /* Return the end address of our pad. */
1329 *jump_entry
= buildaddr
;
1334 #endif /* __x86_64__ */
1336 /* Build a jump pad that saves registers and calls a collection
1337 function. Writes a jump instruction to the jump pad to
1338 JJUMPAD_INSN. The caller is responsible to write it in at the
1339 tracepoint address. */
1342 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1343 CORE_ADDR collector
,
1346 CORE_ADDR
*jump_entry
,
1347 unsigned char *jjump_pad_insn
,
1348 ULONGEST
*jjump_pad_insn_size
,
1349 CORE_ADDR
*adjusted_insn_addr
,
1350 CORE_ADDR
*adjusted_insn_addr_end
)
1352 unsigned char buf
[0x100];
1354 CORE_ADDR buildaddr
= *jump_entry
;
1356 /* Build the jump pad. */
1358 /* First, do tracepoint data collection. Save registers. */
1360 buf
[i
++] = 0x60; /* pushad */
1361 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1362 *((int *)(buf
+ i
)) = (int) tpaddr
;
1364 buf
[i
++] = 0x9c; /* pushf */
1365 buf
[i
++] = 0x1e; /* push %ds */
1366 buf
[i
++] = 0x06; /* push %es */
1367 buf
[i
++] = 0x0f; /* push %fs */
1369 buf
[i
++] = 0x0f; /* push %gs */
1371 buf
[i
++] = 0x16; /* push %ss */
1372 buf
[i
++] = 0x0e; /* push %cs */
1373 append_insns (&buildaddr
, i
, buf
);
1375 /* Stack space for the collecting_t object. */
1377 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1379 /* Build the object. */
1380 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1381 memcpy (buf
+ i
, &tpoint
, 4);
1383 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1385 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1386 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1387 append_insns (&buildaddr
, i
, buf
);
1389 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1390 If we cared for it, this could be using xchg alternatively. */
1393 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1394 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1396 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1398 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1399 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1400 append_insns (&buildaddr
, i
, buf
);
1403 /* Set up arguments to the gdb_collect call. */
1405 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1406 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1407 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1408 append_insns (&buildaddr
, i
, buf
);
1411 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1412 append_insns (&buildaddr
, i
, buf
);
1415 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1416 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1418 append_insns (&buildaddr
, i
, buf
);
1420 buf
[0] = 0xe8; /* call <reladdr> */
1421 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1422 memcpy (buf
+ 1, &offset
, 4);
1423 append_insns (&buildaddr
, 5, buf
);
1424 /* Clean up after the call. */
1425 buf
[0] = 0x83; /* add $0x8,%esp */
1428 append_insns (&buildaddr
, 3, buf
);
1431 /* Clear the spin-lock. This would need the LOCK prefix on older
1434 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1435 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1436 memcpy (buf
+ i
, &lockaddr
, 4);
1438 append_insns (&buildaddr
, i
, buf
);
1441 /* Remove stack that had been used for the collect_t object. */
1443 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1444 append_insns (&buildaddr
, i
, buf
);
1447 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1450 buf
[i
++] = 0x17; /* pop %ss */
1451 buf
[i
++] = 0x0f; /* pop %gs */
1453 buf
[i
++] = 0x0f; /* pop %fs */
1455 buf
[i
++] = 0x07; /* pop %es */
1456 buf
[i
++] = 0x1f; /* pop %de */
1457 buf
[i
++] = 0x9d; /* popf */
1458 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1461 buf
[i
++] = 0x61; /* popad */
1462 append_insns (&buildaddr
, i
, buf
);
1464 /* Now, adjust the original instruction to execute in the jump
1466 *adjusted_insn_addr
= buildaddr
;
1467 relocate_instruction (&buildaddr
, tpaddr
);
1468 *adjusted_insn_addr_end
= buildaddr
;
1470 /* Write the jump back to the program. */
1471 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1472 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1473 memcpy (buf
+ 1, &offset
, 4);
1474 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1476 /* The jump pad is now built. Wire in a jump to our jump pad. This
1477 is always done last (by our caller actually), so that we can
1478 install fast tracepoints with threads running. This relies on
1479 the agent's atomic write support. */
1480 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1481 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1482 memcpy (buf
+ 1, &offset
, 4);
1483 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1484 *jjump_pad_insn_size
= sizeof (jump_insn
);
1486 /* Return the end address of our pad. */
1487 *jump_entry
= buildaddr
;
1493 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1494 CORE_ADDR collector
,
1497 CORE_ADDR
*jump_entry
,
1498 unsigned char *jjump_pad_insn
,
1499 ULONGEST
*jjump_pad_insn_size
,
1500 CORE_ADDR
*adjusted_insn_addr
,
1501 CORE_ADDR
*adjusted_insn_addr_end
)
1504 if (register_size (0) == 8)
1505 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1506 collector
, lockaddr
,
1507 orig_size
, jump_entry
,
1509 jjump_pad_insn_size
,
1511 adjusted_insn_addr_end
);
1514 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1515 collector
, lockaddr
,
1516 orig_size
, jump_entry
,
1518 jjump_pad_insn_size
,
1520 adjusted_insn_addr_end
);
1524 add_insns (unsigned char *start
, int len
)
1526 CORE_ADDR buildaddr
= current_insn_ptr
;
1529 fprintf (stderr
, "Adding %d bytes of insn at %s\n",
1530 len
, paddress (buildaddr
));
1532 append_insns (&buildaddr
, len
, start
);
1533 current_insn_ptr
= buildaddr
;
1536 /* Our general strategy for emitting code is to avoid specifying raw
1537 bytes whenever possible, and instead copy a block of inline asm
1538 that is embedded in the function. This is a little messy, because
1539 we need to keep the compiler from discarding what looks like dead
1540 code, plus suppress various warnings. */
1542 #define EMIT_ASM(NAME, INSNS) \
1545 extern unsigned char start_ ## NAME, end_ ## NAME; \
1546 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1547 __asm__ ("jmp end_" #NAME "\n" \
1548 "\t" "start_" #NAME ":" \
1550 "\t" "end_" #NAME ":"); \
1555 #define EMIT_ASM32(NAME,INSNS) \
1558 extern unsigned char start_ ## NAME, end_ ## NAME; \
1559 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1560 __asm__ (".code32\n" \
1561 "\t" "jmp end_" #NAME "\n" \
1562 "\t" "start_" #NAME ":\n" \
1564 "\t" "end_" #NAME ":\n" \
1570 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1577 amd64_emit_prologue (void)
1579 EMIT_ASM (amd64_prologue
,
1581 "movq %rsp,%rbp\n\t"
1582 "sub $0x20,%rsp\n\t"
1583 "movq %rdi,-8(%rbp)\n\t"
1584 "movq %rsi,-16(%rbp)");
1589 amd64_emit_epilogue (void)
1591 EMIT_ASM (amd64_epilogue
,
1592 "movq -16(%rbp),%rdi\n\t"
1593 "movq %rax,(%rdi)\n\t"
1600 amd64_emit_add (void)
1602 EMIT_ASM (amd64_add
,
1603 "add (%rsp),%rax\n\t"
1604 "lea 0x8(%rsp),%rsp");
1608 amd64_emit_sub (void)
1610 EMIT_ASM (amd64_sub
,
1611 "sub %rax,(%rsp)\n\t"
1616 amd64_emit_mul (void)
1622 amd64_emit_lsh (void)
1628 amd64_emit_rsh_signed (void)
1634 amd64_emit_rsh_unsigned (void)
1640 amd64_emit_ext (int arg
)
1645 EMIT_ASM (amd64_ext_8
,
1651 EMIT_ASM (amd64_ext_16
,
1656 EMIT_ASM (amd64_ext_32
,
1665 amd64_emit_log_not (void)
1667 EMIT_ASM (amd64_log_not
,
1668 "test %rax,%rax\n\t"
1674 amd64_emit_bit_and (void)
1676 EMIT_ASM (amd64_and
,
1677 "and (%rsp),%rax\n\t"
1678 "lea 0x8(%rsp),%rsp");
1682 amd64_emit_bit_or (void)
1685 "or (%rsp),%rax\n\t"
1686 "lea 0x8(%rsp),%rsp");
1690 amd64_emit_bit_xor (void)
1692 EMIT_ASM (amd64_xor
,
1693 "xor (%rsp),%rax\n\t"
1694 "lea 0x8(%rsp),%rsp");
1698 amd64_emit_bit_not (void)
1700 EMIT_ASM (amd64_bit_not
,
1701 "xorq $0xffffffffffffffff,%rax");
1705 amd64_emit_equal (void)
1707 EMIT_ASM (amd64_equal
,
1708 "cmp %rax,(%rsp)\n\t"
1709 "je .Lamd64_equal_true\n\t"
1711 "jmp .Lamd64_equal_end\n\t"
1712 ".Lamd64_equal_true:\n\t"
1714 ".Lamd64_equal_end:\n\t"
1715 "lea 0x8(%rsp),%rsp");
1719 amd64_emit_less_signed (void)
1721 EMIT_ASM (amd64_less_signed
,
1722 "cmp %rax,(%rsp)\n\t"
1723 "jl .Lamd64_less_signed_true\n\t"
1725 "jmp .Lamd64_less_signed_end\n\t"
1726 ".Lamd64_less_signed_true:\n\t"
1728 ".Lamd64_less_signed_end:\n\t"
1729 "lea 0x8(%rsp),%rsp");
1733 amd64_emit_less_unsigned (void)
1735 EMIT_ASM (amd64_less_unsigned
,
1736 "cmp %rax,(%rsp)\n\t"
1737 "jb .Lamd64_less_unsigned_true\n\t"
1739 "jmp .Lamd64_less_unsigned_end\n\t"
1740 ".Lamd64_less_unsigned_true:\n\t"
1742 ".Lamd64_less_unsigned_end:\n\t"
1743 "lea 0x8(%rsp),%rsp");
1747 amd64_emit_ref (int size
)
1752 EMIT_ASM (amd64_ref1
,
1756 EMIT_ASM (amd64_ref2
,
1760 EMIT_ASM (amd64_ref4
,
1761 "movl (%rax),%eax");
1764 EMIT_ASM (amd64_ref8
,
1765 "movq (%rax),%rax");
1771 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1773 EMIT_ASM (amd64_if_goto
,
1777 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1785 amd64_emit_goto (int *offset_p
, int *size_p
)
1787 EMIT_ASM (amd64_goto
,
1788 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1796 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1798 int diff
= (to
- (from
+ size
));
1799 unsigned char buf
[sizeof (int)];
1807 memcpy (buf
, &diff
, sizeof (int));
1808 write_inferior_memory (from
, buf
, sizeof (int));
1812 amd64_emit_const (LONGEST num
)
1814 unsigned char buf
[16];
1816 CORE_ADDR buildaddr
= current_insn_ptr
;
1819 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1820 *((LONGEST
*) (&buf
[i
])) = num
;
1822 append_insns (&buildaddr
, i
, buf
);
1823 current_insn_ptr
= buildaddr
;
1827 amd64_emit_call (CORE_ADDR fn
)
1829 unsigned char buf
[16];
1831 CORE_ADDR buildaddr
;
1834 /* The destination function being in the shared library, may be
1835 >31-bits away off the compiled code pad. */
1837 buildaddr
= current_insn_ptr
;
1839 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1843 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1845 /* Offset is too large for a call. Use callq, but that requires
1846 a register, so avoid it if possible. Use r10, since it is
1847 call-clobbered, we don't have to push/pop it. */
1848 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1850 memcpy (buf
+ i
, &fn
, 8);
1852 buf
[i
++] = 0xff; /* callq *%r10 */
1857 int offset32
= offset64
; /* we know we can't overflow here. */
1858 memcpy (buf
+ i
, &offset32
, 4);
1862 append_insns (&buildaddr
, i
, buf
);
1863 current_insn_ptr
= buildaddr
;
1867 amd64_emit_reg (int reg
)
1869 unsigned char buf
[16];
1871 CORE_ADDR buildaddr
;
1873 /* Assume raw_regs is still in %rdi. */
1874 buildaddr
= current_insn_ptr
;
1876 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1877 *((int *) (&buf
[i
])) = reg
;
1879 append_insns (&buildaddr
, i
, buf
);
1880 current_insn_ptr
= buildaddr
;
1881 amd64_emit_call (get_raw_reg_func_addr ());
1885 amd64_emit_pop (void)
1887 EMIT_ASM (amd64_pop
,
1892 amd64_emit_stack_flush (void)
1894 EMIT_ASM (amd64_stack_flush
,
1899 amd64_emit_zero_ext (int arg
)
1904 EMIT_ASM (amd64_zero_ext_8
,
1908 EMIT_ASM (amd64_zero_ext_16
,
1909 "and $0xffff,%rax");
1912 EMIT_ASM (amd64_zero_ext_32
,
1913 "mov $0xffffffff,%rcx\n\t"
1922 amd64_emit_swap (void)
1924 EMIT_ASM (amd64_swap
,
1931 amd64_emit_stack_adjust (int n
)
1933 unsigned char buf
[16];
1935 CORE_ADDR buildaddr
= current_insn_ptr
;
1938 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
1942 /* This only handles adjustments up to 16, but we don't expect any more. */
1944 append_insns (&buildaddr
, i
, buf
);
1945 current_insn_ptr
= buildaddr
;
1948 /* FN's prototype is `LONGEST(*fn)(int)'. */
1951 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
1953 unsigned char buf
[16];
1955 CORE_ADDR buildaddr
;
1957 buildaddr
= current_insn_ptr
;
1959 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1960 *((int *) (&buf
[i
])) = arg1
;
1962 append_insns (&buildaddr
, i
, buf
);
1963 current_insn_ptr
= buildaddr
;
1964 amd64_emit_call (fn
);
1967 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1970 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
1972 unsigned char buf
[16];
1974 CORE_ADDR buildaddr
;
1976 buildaddr
= current_insn_ptr
;
1978 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1979 *((int *) (&buf
[i
])) = arg1
;
1981 append_insns (&buildaddr
, i
, buf
);
1982 current_insn_ptr
= buildaddr
;
1983 EMIT_ASM (amd64_void_call_2_a
,
1984 /* Save away a copy of the stack top. */
1986 /* Also pass top as the second argument. */
1988 amd64_emit_call (fn
);
1989 EMIT_ASM (amd64_void_call_2_b
,
1990 /* Restore the stack top, %rax may have been trashed. */
1994 struct emit_ops amd64_emit_ops
=
1996 amd64_emit_prologue
,
1997 amd64_emit_epilogue
,
2002 amd64_emit_rsh_signed
,
2003 amd64_emit_rsh_unsigned
,
2011 amd64_emit_less_signed
,
2012 amd64_emit_less_unsigned
,
2016 amd64_write_goto_address
,
2021 amd64_emit_stack_flush
,
2022 amd64_emit_zero_ext
,
2024 amd64_emit_stack_adjust
,
2025 amd64_emit_int_call_1
,
2026 amd64_emit_void_call_2
2029 #endif /* __x86_64__ */
2032 i386_emit_prologue (void)
2034 EMIT_ASM32 (i386_prologue
,
2037 /* At this point, the raw regs base address is at 8(%ebp), and the
2038 value pointer is at 12(%ebp). */
2042 i386_emit_epilogue (void)
2044 EMIT_ASM32 (i386_epilogue
,
2045 "mov 12(%ebp),%ecx\n\t"
2046 "mov %eax,(%ecx)\n\t"
2047 "mov %ebx,0x4(%ecx)\n\t"
2054 i386_emit_add (void)
2056 EMIT_ASM32 (i386_add
,
2057 "add (%esp),%eax\n\t"
2058 "adc 0x4(%esp),%ebx\n\t"
2059 "lea 0x8(%esp),%esp");
2063 i386_emit_sub (void)
2065 EMIT_ASM32 (i386_sub
,
2066 "subl %eax,(%esp)\n\t"
2067 "sbbl %ebx,4(%esp)\n\t"
2073 i386_emit_mul (void)
2079 i386_emit_lsh (void)
2085 i386_emit_rsh_signed (void)
2091 i386_emit_rsh_unsigned (void)
2097 i386_emit_ext (int arg
)
2102 EMIT_ASM32 (i386_ext_8
,
2105 "movl %eax,%ebx\n\t"
2109 EMIT_ASM32 (i386_ext_16
,
2111 "movl %eax,%ebx\n\t"
2115 EMIT_ASM32 (i386_ext_32
,
2116 "movl %eax,%ebx\n\t"
2125 i386_emit_log_not (void)
2127 EMIT_ASM32 (i386_log_not
,
2129 "test %eax,%eax\n\t"
2136 i386_emit_bit_and (void)
2138 EMIT_ASM32 (i386_and
,
2139 "and (%esp),%eax\n\t"
2140 "and 0x4(%esp),%ebx\n\t"
2141 "lea 0x8(%esp),%esp");
2145 i386_emit_bit_or (void)
2147 EMIT_ASM32 (i386_or
,
2148 "or (%esp),%eax\n\t"
2149 "or 0x4(%esp),%ebx\n\t"
2150 "lea 0x8(%esp),%esp");
2154 i386_emit_bit_xor (void)
2156 EMIT_ASM32 (i386_xor
,
2157 "xor (%esp),%eax\n\t"
2158 "xor 0x4(%esp),%ebx\n\t"
2159 "lea 0x8(%esp),%esp");
2163 i386_emit_bit_not (void)
2165 EMIT_ASM32 (i386_bit_not
,
2166 "xor $0xffffffff,%eax\n\t"
2167 "xor $0xffffffff,%ebx\n\t");
2171 i386_emit_equal (void)
2173 EMIT_ASM32 (i386_equal
,
2174 "cmpl %ebx,4(%esp)\n\t"
2175 "jne .Li386_equal_false\n\t"
2176 "cmpl %eax,(%esp)\n\t"
2177 "je .Li386_equal_true\n\t"
2178 ".Li386_equal_false:\n\t"
2180 "jmp .Li386_equal_end\n\t"
2181 ".Li386_equal_true:\n\t"
2183 ".Li386_equal_end:\n\t"
2185 "lea 0x8(%esp),%esp");
2189 i386_emit_less_signed (void)
2191 EMIT_ASM32 (i386_less_signed
,
2192 "cmpl %ebx,4(%esp)\n\t"
2193 "jl .Li386_less_signed_true\n\t"
2194 "jne .Li386_less_signed_false\n\t"
2195 "cmpl %eax,(%esp)\n\t"
2196 "jl .Li386_less_signed_true\n\t"
2197 ".Li386_less_signed_false:\n\t"
2199 "jmp .Li386_less_signed_end\n\t"
2200 ".Li386_less_signed_true:\n\t"
2202 ".Li386_less_signed_end:\n\t"
2204 "lea 0x8(%esp),%esp");
2208 i386_emit_less_unsigned (void)
2210 EMIT_ASM32 (i386_less_unsigned
,
2211 "cmpl %ebx,4(%esp)\n\t"
2212 "jb .Li386_less_unsigned_true\n\t"
2213 "jne .Li386_less_unsigned_false\n\t"
2214 "cmpl %eax,(%esp)\n\t"
2215 "jb .Li386_less_unsigned_true\n\t"
2216 ".Li386_less_unsigned_false:\n\t"
2218 "jmp .Li386_less_unsigned_end\n\t"
2219 ".Li386_less_unsigned_true:\n\t"
2221 ".Li386_less_unsigned_end:\n\t"
2223 "lea 0x8(%esp),%esp");
2227 i386_emit_ref (int size
)
2232 EMIT_ASM32 (i386_ref1
,
2236 EMIT_ASM32 (i386_ref2
,
2240 EMIT_ASM32 (i386_ref4
,
2241 "movl (%eax),%eax");
2244 EMIT_ASM32 (i386_ref8
,
2245 "movl 4(%eax),%ebx\n\t"
2246 "movl (%eax),%eax");
2252 i386_emit_if_goto (int *offset_p
, int *size_p
)
2254 EMIT_ASM32 (i386_if_goto
,
2260 /* Don't trust the assembler to choose the right jump */
2261 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2264 *offset_p
= 11; /* be sure that this matches the sequence above */
2270 i386_emit_goto (int *offset_p
, int *size_p
)
2272 EMIT_ASM32 (i386_goto
,
2273 /* Don't trust the assembler to choose the right jump */
2274 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2282 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2284 int diff
= (to
- (from
+ size
));
2285 unsigned char buf
[sizeof (int)];
2287 /* We're only doing 4-byte sizes at the moment. */
2294 memcpy (buf
, &diff
, sizeof (int));
2295 write_inferior_memory (from
, buf
, sizeof (int));
2299 i386_emit_const (LONGEST num
)
2301 unsigned char buf
[16];
2303 CORE_ADDR buildaddr
= current_insn_ptr
;
2306 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2307 *((int *) (&buf
[i
])) = (num
& 0xffffffff);
2309 hi
= ((num
>> 32) & 0xffffffff);
2312 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2313 *((int *) (&buf
[i
])) = hi
;
2318 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2320 append_insns (&buildaddr
, i
, buf
);
2321 current_insn_ptr
= buildaddr
;
2325 i386_emit_call (CORE_ADDR fn
)
2327 unsigned char buf
[16];
2329 CORE_ADDR buildaddr
;
2331 buildaddr
= current_insn_ptr
;
2333 buf
[i
++] = 0xe8; /* call <reladdr> */
2334 offset
= ((int) fn
) - (buildaddr
+ 5);
2335 memcpy (buf
+ 1, &offset
, 4);
2336 append_insns (&buildaddr
, 5, buf
);
2337 current_insn_ptr
= buildaddr
;
2341 i386_emit_reg (int reg
)
2343 unsigned char buf
[16];
2345 CORE_ADDR buildaddr
;
2347 EMIT_ASM32 (i386_reg_a
,
2349 buildaddr
= current_insn_ptr
;
2351 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2352 *((int *) (&buf
[i
])) = reg
;
2354 append_insns (&buildaddr
, i
, buf
);
2355 current_insn_ptr
= buildaddr
;
2356 EMIT_ASM32 (i386_reg_b
,
2357 "mov %eax,4(%esp)\n\t"
2358 "mov 8(%ebp),%eax\n\t"
2360 i386_emit_call (get_raw_reg_func_addr ());
2361 EMIT_ASM32 (i386_reg_c
,
2363 "lea 0x8(%esp),%esp");
2367 i386_emit_pop (void)
2369 EMIT_ASM32 (i386_pop
,
2375 i386_emit_stack_flush (void)
2377 EMIT_ASM32 (i386_stack_flush
,
2383 i386_emit_zero_ext (int arg
)
2388 EMIT_ASM32 (i386_zero_ext_8
,
2389 "and $0xff,%eax\n\t"
2393 EMIT_ASM32 (i386_zero_ext_16
,
2394 "and $0xffff,%eax\n\t"
2398 EMIT_ASM32 (i386_zero_ext_32
,
2407 i386_emit_swap (void)
2409 EMIT_ASM32 (i386_swap
,
2419 i386_emit_stack_adjust (int n
)
2421 unsigned char buf
[16];
2423 CORE_ADDR buildaddr
= current_insn_ptr
;
2426 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2430 append_insns (&buildaddr
, i
, buf
);
2431 current_insn_ptr
= buildaddr
;
2434 /* FN's prototype is `LONGEST(*fn)(int)'. */
2437 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2439 unsigned char buf
[16];
2441 CORE_ADDR buildaddr
;
2443 EMIT_ASM32 (i386_int_call_1_a
,
2444 /* Reserve a bit of stack space. */
2446 /* Put the one argument on the stack. */
2447 buildaddr
= current_insn_ptr
;
2449 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2452 *((int *) (&buf
[i
])) = arg1
;
2454 append_insns (&buildaddr
, i
, buf
);
2455 current_insn_ptr
= buildaddr
;
2456 i386_emit_call (fn
);
2457 EMIT_ASM32 (i386_int_call_1_c
,
2459 "lea 0x8(%esp),%esp");
2462 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2465 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2467 unsigned char buf
[16];
2469 CORE_ADDR buildaddr
;
2471 EMIT_ASM32 (i386_void_call_2_a
,
2472 /* Preserve %eax only; we don't have to worry about %ebx. */
2474 /* Reserve a bit of stack space for arguments. */
2475 "sub $0x10,%esp\n\t"
2476 /* Copy "top" to the second argument position. (Note that
2477 we can't assume function won't scribble on its
2478 arguments, so don't try to restore from this.) */
2479 "mov %eax,4(%esp)\n\t"
2480 "mov %ebx,8(%esp)");
2481 /* Put the first argument on the stack. */
2482 buildaddr
= current_insn_ptr
;
2484 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2487 *((int *) (&buf
[i
])) = arg1
;
2489 append_insns (&buildaddr
, i
, buf
);
2490 current_insn_ptr
= buildaddr
;
2491 i386_emit_call (fn
);
2492 EMIT_ASM32 (i386_void_call_2_b
,
2493 "lea 0x10(%esp),%esp\n\t"
2494 /* Restore original stack top. */
2498 struct emit_ops i386_emit_ops
=
2506 i386_emit_rsh_signed
,
2507 i386_emit_rsh_unsigned
,
2515 i386_emit_less_signed
,
2516 i386_emit_less_unsigned
,
2520 i386_write_goto_address
,
2525 i386_emit_stack_flush
,
2528 i386_emit_stack_adjust
,
2529 i386_emit_int_call_1
,
2530 i386_emit_void_call_2
2534 static struct emit_ops
*
2538 int use_64bit
= register_size (0) == 8;
2541 return &amd64_emit_ops
;
2544 return &i386_emit_ops
;
2547 /* This is initialized assuming an amd64 target.
2548 x86_arch_setup will correct it for i386 or amd64 targets. */
2550 struct linux_target_ops the_low_target
=
2566 x86_stopped_by_watchpoint
,
2567 x86_stopped_data_address
,
2568 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2569 native i386 case (no registers smaller than an xfer unit), and are not
2570 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2573 /* need to fix up i386 siginfo if host is amd64 */
2575 x86_linux_new_process
,
2576 x86_linux_new_thread
,
2577 x86_linux_prepare_to_resume
,
2578 x86_linux_process_qsupported
,
2579 x86_supports_tracepoints
,
2580 x86_get_thread_area
,
2581 x86_install_fast_tracepoint_jump_pad
,