1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "linux-low.h"
28 #include "i386-xstate.h"
29 #include "elf/common.h"
31 #include "gdb_proc_service.h"
33 /* Defined in auto-generated file i386-linux.c. */
34 void init_registers_i386_linux (void);
35 /* Defined in auto-generated file amd64-linux.c. */
36 void init_registers_amd64_linux (void);
37 /* Defined in auto-generated file i386-avx-linux.c. */
38 void init_registers_i386_avx_linux (void);
39 /* Defined in auto-generated file amd64-avx-linux.c. */
40 void init_registers_amd64_avx_linux (void);
41 /* Defined in auto-generated file i386-mmx-linux.c. */
42 void init_registers_i386_mmx_linux (void);
44 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
45 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
47 /* Backward compatibility for gdb without XML support. */
49 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
50 <architecture>i386</architecture>\
51 <osabi>GNU/Linux</osabi>\
55 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
56 <architecture>i386:x86-64</architecture>\
57 <osabi>GNU/Linux</osabi>\
62 #include <sys/procfs.h>
63 #include <sys/ptrace.h>
66 #ifndef PTRACE_GETREGSET
67 #define PTRACE_GETREGSET 0x4204
70 #ifndef PTRACE_SETREGSET
71 #define PTRACE_SETREGSET 0x4205
75 #ifndef PTRACE_GET_THREAD_AREA
76 #define PTRACE_GET_THREAD_AREA 25
79 /* This definition comes from prctl.h, but some kernels may not have it. */
80 #ifndef PTRACE_ARCH_PRCTL
81 #define PTRACE_ARCH_PRCTL 30
84 /* The following definitions come from prctl.h, but may be absent
85 for certain configurations. */
87 #define ARCH_SET_GS 0x1001
88 #define ARCH_SET_FS 0x1002
89 #define ARCH_GET_FS 0x1003
90 #define ARCH_GET_GS 0x1004
93 /* Per-process arch-specific data we want to keep. */
95 struct arch_process_info
97 struct i386_debug_reg_state debug_reg_state
;
100 /* Per-thread arch-specific data we want to keep. */
104 /* Non-zero if our copy differs from what's recorded in the thread. */
105 int debug_registers_changed
;
110 /* Mapping between the general-purpose registers in `struct user'
111 format and GDB's register array layout.
112 Note that the transfer layout uses 64-bit regs. */
113 static /*const*/ int i386_regmap
[] =
115 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
116 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
117 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
118 DS
* 8, ES
* 8, FS
* 8, GS
* 8
121 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
123 /* So code below doesn't have to care, i386 or amd64. */
124 #define ORIG_EAX ORIG_RAX
126 static const int x86_64_regmap
[] =
128 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
129 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
130 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
131 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
132 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
133 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
134 -1, -1, -1, -1, -1, -1, -1, -1,
135 -1, -1, -1, -1, -1, -1, -1, -1,
136 -1, -1, -1, -1, -1, -1, -1, -1,
137 -1, -1, -1, -1, -1, -1, -1, -1, -1,
141 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
143 #else /* ! __x86_64__ */
145 /* Mapping between the general-purpose registers in `struct user'
146 format and GDB's register array layout. */
147 static /*const*/ int i386_regmap
[] =
149 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
150 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
151 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
152 DS
* 4, ES
* 4, FS
* 4, GS
* 4
155 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
159 /* Called by libthread_db. */
162 ps_get_thread_area (const struct ps_prochandle
*ph
,
163 lwpid_t lwpid
, int idx
, void **base
)
166 int use_64bit
= register_size (0) == 8;
173 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
177 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
188 unsigned int desc
[4];
190 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
191 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
194 *(int *)base
= desc
[1];
199 /* Get the thread area address. This is used to recognize which
200 thread is which when tracing with the in-process agent library. We
201 don't read anything from the address, and treat it as opaque; it's
202 the address itself that we assume is unique per-thread. */
205 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
208 int use_64bit
= register_size (0) == 8;
213 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
215 *addr
= (CORE_ADDR
) (uintptr_t) base
;
224 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
225 struct regcache
*regcache
= get_thread_regcache (get_lwp_thread (lwp
), 1);
226 unsigned int desc
[4];
228 const int reg_thread_area
= 3; /* bits to scale down register value. */
231 collect_register_by_name (regcache
, "gs", &gs
);
233 idx
= gs
>> reg_thread_area
;
235 if (ptrace (PTRACE_GET_THREAD_AREA
,
237 (void *) (long) idx
, (unsigned long) &desc
) < 0)
248 i386_cannot_store_register (int regno
)
250 return regno
>= I386_NUM_REGS
;
254 i386_cannot_fetch_register (int regno
)
256 return regno
>= I386_NUM_REGS
;
260 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
265 if (register_size (0) == 8)
267 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
268 if (x86_64_regmap
[i
] != -1)
269 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
274 for (i
= 0; i
< I386_NUM_REGS
; i
++)
275 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
277 collect_register_by_name (regcache
, "orig_eax",
278 ((char *) buf
) + ORIG_EAX
* 4);
282 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
287 if (register_size (0) == 8)
289 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
290 if (x86_64_regmap
[i
] != -1)
291 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
296 for (i
= 0; i
< I386_NUM_REGS
; i
++)
297 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
299 supply_register_by_name (regcache
, "orig_eax",
300 ((char *) buf
) + ORIG_EAX
* 4);
304 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
307 i387_cache_to_fxsave (regcache
, buf
);
309 i387_cache_to_fsave (regcache
, buf
);
314 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
317 i387_fxsave_to_cache (regcache
, buf
);
319 i387_fsave_to_cache (regcache
, buf
);
326 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
328 i387_cache_to_fxsave (regcache
, buf
);
332 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
334 i387_fxsave_to_cache (regcache
, buf
);
340 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
342 i387_cache_to_xsave (regcache
, buf
);
346 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
348 i387_xsave_to_cache (regcache
, buf
);
351 /* ??? The non-biarch i386 case stores all the i387 regs twice.
352 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
353 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
354 doesn't work. IWBN to avoid the duplication in the case where it
355 does work. Maybe the arch_setup routine could check whether it works
356 and update target_regsets accordingly, maybe by moving target_regsets
357 to linux_target_ops and set the right one there, rather than having to
358 modify the target_regsets global. */
360 struct regset_info target_regsets
[] =
362 #ifdef HAVE_PTRACE_GETREGS
363 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
365 x86_fill_gregset
, x86_store_gregset
},
366 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
367 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
369 # ifdef HAVE_PTRACE_GETFPXREGS
370 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
372 x86_fill_fpxregset
, x86_store_fpxregset
},
375 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
377 x86_fill_fpregset
, x86_store_fpregset
},
378 #endif /* HAVE_PTRACE_GETREGS */
379 { 0, 0, 0, -1, -1, NULL
, NULL
}
383 x86_get_pc (struct regcache
*regcache
)
385 int use_64bit
= register_size (0) == 8;
390 collect_register_by_name (regcache
, "rip", &pc
);
391 return (CORE_ADDR
) pc
;
396 collect_register_by_name (regcache
, "eip", &pc
);
397 return (CORE_ADDR
) pc
;
402 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
404 int use_64bit
= register_size (0) == 8;
408 unsigned long newpc
= pc
;
409 supply_register_by_name (regcache
, "rip", &newpc
);
413 unsigned int newpc
= pc
;
414 supply_register_by_name (regcache
, "eip", &newpc
);
418 static const unsigned char x86_breakpoint
[] = { 0xCC };
419 #define x86_breakpoint_len 1
422 x86_breakpoint_at (CORE_ADDR pc
)
426 (*the_target
->read_memory
) (pc
, &c
, 1);
433 /* Support for debug registers. */
436 x86_linux_dr_get (ptid_t ptid
, int regnum
)
441 tid
= ptid_get_lwp (ptid
);
444 value
= ptrace (PTRACE_PEEKUSER
, tid
,
445 offsetof (struct user
, u_debugreg
[regnum
]), 0);
447 error ("Couldn't read debug register");
453 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
457 tid
= ptid_get_lwp (ptid
);
460 ptrace (PTRACE_POKEUSER
, tid
,
461 offsetof (struct user
, u_debugreg
[regnum
]), value
);
463 error ("Couldn't write debug register");
467 update_debug_registers_callback (struct inferior_list_entry
*entry
,
470 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
471 int pid
= *(int *) pid_p
;
473 /* Only update the threads of this process. */
474 if (pid_of (lwp
) == pid
)
476 /* The actual update is done later just before resuming the lwp,
477 we just mark that the registers need updating. */
478 lwp
->arch_private
->debug_registers_changed
= 1;
480 /* If the lwp isn't stopped, force it to momentarily pause, so
481 we can update its debug registers. */
483 linux_stop_lwp (lwp
);
489 /* Update the inferior's debug register REGNUM from STATE. */
492 i386_dr_low_set_addr (const struct i386_debug_reg_state
*state
, int regnum
)
494 /* Only update the threads of this process. */
495 int pid
= pid_of (get_thread_lwp (current_inferior
));
497 if (! (regnum
>= 0 && regnum
<= DR_LASTADDR
- DR_FIRSTADDR
))
498 fatal ("Invalid debug register %d", regnum
);
500 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
503 /* Return the inferior's debug register REGNUM. */
506 i386_dr_low_get_addr (int regnum
)
508 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
509 ptid_t ptid
= ptid_of (lwp
);
511 /* DR6 and DR7 are retrieved with some other way. */
512 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
514 return x86_linux_dr_get (ptid
, regnum
);
517 /* Update the inferior's DR7 debug control register from STATE. */
520 i386_dr_low_set_control (const struct i386_debug_reg_state
*state
)
522 /* Only update the threads of this process. */
523 int pid
= pid_of (get_thread_lwp (current_inferior
));
525 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
528 /* Return the inferior's DR7 debug control register. */
531 i386_dr_low_get_control (void)
533 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
534 ptid_t ptid
= ptid_of (lwp
);
536 return x86_linux_dr_get (ptid
, DR_CONTROL
);
539 /* Get the value of the DR6 debug status register from the inferior
540 and record it in STATE. */
543 i386_dr_low_get_status (void)
545 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
546 ptid_t ptid
= ptid_of (lwp
);
548 return x86_linux_dr_get (ptid
, DR_STATUS
);
551 /* Breakpoint/Watchpoint support. */
554 x86_insert_point (char type
, CORE_ADDR addr
, int len
)
556 struct process_info
*proc
= current_process ();
563 ret
= prepare_to_access_memory ();
566 ret
= set_gdb_breakpoint_at (addr
);
567 done_accessing_memory ();
573 return i386_low_insert_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
582 x86_remove_point (char type
, CORE_ADDR addr
, int len
)
584 struct process_info
*proc
= current_process ();
591 ret
= prepare_to_access_memory ();
594 ret
= delete_gdb_breakpoint_at (addr
);
595 done_accessing_memory ();
601 return i386_low_remove_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
610 x86_stopped_by_watchpoint (void)
612 struct process_info
*proc
= current_process ();
613 return i386_low_stopped_by_watchpoint (&proc
->private->arch_private
->debug_reg_state
);
617 x86_stopped_data_address (void)
619 struct process_info
*proc
= current_process ();
621 if (i386_low_stopped_data_address (&proc
->private->arch_private
->debug_reg_state
,
627 /* Called when a new process is created. */
629 static struct arch_process_info
*
630 x86_linux_new_process (void)
632 struct arch_process_info
*info
= xcalloc (1, sizeof (*info
));
634 i386_low_init_dregs (&info
->debug_reg_state
);
639 /* Called when a new thread is detected. */
641 static struct arch_lwp_info
*
642 x86_linux_new_thread (void)
644 struct arch_lwp_info
*info
= xcalloc (1, sizeof (*info
));
646 info
->debug_registers_changed
= 1;
651 /* Called when resuming a thread.
652 If the debug regs have changed, update the thread's copies. */
655 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
657 ptid_t ptid
= ptid_of (lwp
);
658 int clear_status
= 0;
660 if (lwp
->arch_private
->debug_registers_changed
)
663 int pid
= ptid_get_pid (ptid
);
664 struct process_info
*proc
= find_process_pid (pid
);
665 struct i386_debug_reg_state
*state
666 = &proc
->private->arch_private
->debug_reg_state
;
668 for (i
= DR_FIRSTADDR
; i
<= DR_LASTADDR
; i
++)
669 if (state
->dr_ref_count
[i
] > 0)
671 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
673 /* If we're setting a watchpoint, any change the inferior
674 had done itself to the debug registers needs to be
675 discarded, otherwise, i386_low_stopped_data_address can
680 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
682 lwp
->arch_private
->debug_registers_changed
= 0;
685 if (clear_status
|| lwp
->stopped_by_watchpoint
)
686 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
689 /* When GDBSERVER is built as a 64-bit application on linux, the
690 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
691 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
692 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
693 conversion in-place ourselves. */
695 /* These types below (compat_*) define a siginfo type that is layout
696 compatible with the siginfo type exported by the 32-bit userspace
701 typedef int compat_int_t
;
702 typedef unsigned int compat_uptr_t
;
704 typedef int compat_time_t
;
705 typedef int compat_timer_t
;
706 typedef int compat_clock_t
;
708 struct compat_timeval
710 compat_time_t tv_sec
;
714 typedef union compat_sigval
716 compat_int_t sival_int
;
717 compat_uptr_t sival_ptr
;
720 typedef struct compat_siginfo
728 int _pad
[((128 / sizeof (int)) - 3)];
737 /* POSIX.1b timers */
742 compat_sigval_t _sigval
;
745 /* POSIX.1b signals */
750 compat_sigval_t _sigval
;
759 compat_clock_t _utime
;
760 compat_clock_t _stime
;
763 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
778 #define cpt_si_pid _sifields._kill._pid
779 #define cpt_si_uid _sifields._kill._uid
780 #define cpt_si_timerid _sifields._timer._tid
781 #define cpt_si_overrun _sifields._timer._overrun
782 #define cpt_si_status _sifields._sigchld._status
783 #define cpt_si_utime _sifields._sigchld._utime
784 #define cpt_si_stime _sifields._sigchld._stime
785 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
786 #define cpt_si_addr _sifields._sigfault._addr
787 #define cpt_si_band _sifields._sigpoll._band
788 #define cpt_si_fd _sifields._sigpoll._fd
790 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
791 In their place is si_timer1,si_timer2. */
793 #define si_timerid si_timer1
796 #define si_overrun si_timer2
800 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
802 memset (to
, 0, sizeof (*to
));
804 to
->si_signo
= from
->si_signo
;
805 to
->si_errno
= from
->si_errno
;
806 to
->si_code
= from
->si_code
;
808 if (to
->si_code
== SI_TIMER
)
810 to
->cpt_si_timerid
= from
->si_timerid
;
811 to
->cpt_si_overrun
= from
->si_overrun
;
812 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
814 else if (to
->si_code
== SI_USER
)
816 to
->cpt_si_pid
= from
->si_pid
;
817 to
->cpt_si_uid
= from
->si_uid
;
819 else if (to
->si_code
< 0)
821 to
->cpt_si_pid
= from
->si_pid
;
822 to
->cpt_si_uid
= from
->si_uid
;
823 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
827 switch (to
->si_signo
)
830 to
->cpt_si_pid
= from
->si_pid
;
831 to
->cpt_si_uid
= from
->si_uid
;
832 to
->cpt_si_status
= from
->si_status
;
833 to
->cpt_si_utime
= from
->si_utime
;
834 to
->cpt_si_stime
= from
->si_stime
;
840 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
843 to
->cpt_si_band
= from
->si_band
;
844 to
->cpt_si_fd
= from
->si_fd
;
847 to
->cpt_si_pid
= from
->si_pid
;
848 to
->cpt_si_uid
= from
->si_uid
;
849 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
856 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
858 memset (to
, 0, sizeof (*to
));
860 to
->si_signo
= from
->si_signo
;
861 to
->si_errno
= from
->si_errno
;
862 to
->si_code
= from
->si_code
;
864 if (to
->si_code
== SI_TIMER
)
866 to
->si_timerid
= from
->cpt_si_timerid
;
867 to
->si_overrun
= from
->cpt_si_overrun
;
868 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
870 else if (to
->si_code
== SI_USER
)
872 to
->si_pid
= from
->cpt_si_pid
;
873 to
->si_uid
= from
->cpt_si_uid
;
875 else if (to
->si_code
< 0)
877 to
->si_pid
= from
->cpt_si_pid
;
878 to
->si_uid
= from
->cpt_si_uid
;
879 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
883 switch (to
->si_signo
)
886 to
->si_pid
= from
->cpt_si_pid
;
887 to
->si_uid
= from
->cpt_si_uid
;
888 to
->si_status
= from
->cpt_si_status
;
889 to
->si_utime
= from
->cpt_si_utime
;
890 to
->si_stime
= from
->cpt_si_stime
;
896 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
899 to
->si_band
= from
->cpt_si_band
;
900 to
->si_fd
= from
->cpt_si_fd
;
903 to
->si_pid
= from
->cpt_si_pid
;
904 to
->si_uid
= from
->cpt_si_uid
;
905 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
911 #endif /* __x86_64__ */
913 /* Convert a native/host siginfo object, into/from the siginfo in the
914 layout of the inferiors' architecture. Returns true if any
915 conversion was done; false otherwise. If DIRECTION is 1, then copy
916 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
920 x86_siginfo_fixup (struct siginfo
*native
, void *inf
, int direction
)
923 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
924 if (register_size (0) == 4)
926 if (sizeof (struct siginfo
) != sizeof (compat_siginfo_t
))
927 fatal ("unexpected difference in siginfo");
930 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
932 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
943 /* Update gdbserver_xmltarget. */
946 x86_linux_update_xmltarget (void)
949 struct regset_info
*regset
;
950 static unsigned long long xcr0
;
951 static int have_ptrace_getregset
= -1;
952 #if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
953 static int have_ptrace_getfpxregs
= -1;
956 if (!current_inferior
)
959 /* Before changing the register cache internal layout or the target
960 regsets, flush the contents of the current valid caches back to
962 regcache_invalidate ();
964 pid
= pid_of (get_thread_lwp (current_inferior
));
966 if (num_xmm_registers
== 8)
967 init_registers_i386_linux ();
969 init_registers_amd64_linux ();
972 # ifdef HAVE_PTRACE_GETFPXREGS
973 if (have_ptrace_getfpxregs
== -1)
975 elf_fpxregset_t fpxregs
;
977 if (ptrace (PTRACE_GETFPXREGS
, pid
, 0, (int) &fpxregs
) < 0)
979 have_ptrace_getfpxregs
= 0;
980 x86_xcr0
= I386_XSTATE_X87_MASK
;
982 /* Disable PTRACE_GETFPXREGS. */
983 for (regset
= target_regsets
;
984 regset
->fill_function
!= NULL
; regset
++)
985 if (regset
->get_request
== PTRACE_GETFPXREGS
)
992 have_ptrace_getfpxregs
= 1;
995 if (!have_ptrace_getfpxregs
)
997 init_registers_i386_mmx_linux ();
1001 init_registers_i386_linux ();
1007 /* Don't use XML. */
1009 if (num_xmm_registers
== 8)
1010 gdbserver_xmltarget
= xmltarget_i386_linux_no_xml
;
1012 gdbserver_xmltarget
= xmltarget_amd64_linux_no_xml
;
1014 gdbserver_xmltarget
= xmltarget_i386_linux_no_xml
;
1017 x86_xcr0
= I386_XSTATE_SSE_MASK
;
1022 /* Check if XSAVE extended state is supported. */
1023 if (have_ptrace_getregset
== -1)
1025 unsigned long long xstateregs
[I386_XSTATE_SSE_SIZE
/ sizeof (long long)];
1028 iov
.iov_base
= xstateregs
;
1029 iov
.iov_len
= sizeof (xstateregs
);
1031 /* Check if PTRACE_GETREGSET works. */
1032 if (ptrace (PTRACE_GETREGSET
, pid
, (unsigned int) NT_X86_XSTATE
,
1035 have_ptrace_getregset
= 0;
1039 have_ptrace_getregset
= 1;
1041 /* Get XCR0 from XSAVE extended state at byte 464. */
1042 xcr0
= xstateregs
[464 / sizeof (long long)];
1044 /* Use PTRACE_GETREGSET if it is available. */
1045 for (regset
= target_regsets
;
1046 regset
->fill_function
!= NULL
; regset
++)
1047 if (regset
->get_request
== PTRACE_GETREGSET
)
1048 regset
->size
= I386_XSTATE_SIZE (xcr0
);
1049 else if (regset
->type
!= GENERAL_REGS
)
1053 if (have_ptrace_getregset
)
1055 /* AVX is the highest feature we support. */
1056 if ((xcr0
& I386_XSTATE_AVX_MASK
) == I386_XSTATE_AVX_MASK
)
1061 /* I386 has 8 xmm regs. */
1062 if (num_xmm_registers
== 8)
1063 init_registers_i386_avx_linux ();
1065 init_registers_amd64_avx_linux ();
1067 init_registers_i386_avx_linux ();
1073 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1074 PTRACE_GETREGSET. */
1077 x86_linux_process_qsupported (const char *query
)
1079 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1080 with "i386" in qSupported query, it supports x86 XML target
1083 if (query
!= NULL
&& strncmp (query
, "xmlRegisters=", 13) == 0)
1085 char *copy
= xstrdup (query
+ 13);
1088 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1090 if (strcmp (p
, "i386") == 0)
1100 x86_linux_update_xmltarget ();
1103 /* Initialize gdbserver for the architecture of the inferior. */
1106 x86_arch_setup (void)
1109 int pid
= pid_of (get_thread_lwp (current_inferior
));
1110 char *file
= linux_child_pid_to_exec_file (pid
);
1111 int use_64bit
= elf_64_file_p (file
);
1117 /* This can only happen if /proc/<pid>/exe is unreadable,
1118 but "that can't happen" if we've gotten this far.
1119 Fall through and assume this is a 32-bit program. */
1123 /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
1124 the_low_target
.num_regs
= -1;
1125 the_low_target
.regmap
= NULL
;
1126 the_low_target
.cannot_fetch_register
= NULL
;
1127 the_low_target
.cannot_store_register
= NULL
;
1129 /* Amd64 has 16 xmm regs. */
1130 num_xmm_registers
= 16;
1132 x86_linux_update_xmltarget ();
1137 /* Ok we have a 32-bit inferior. */
1139 the_low_target
.num_regs
= I386_NUM_REGS
;
1140 the_low_target
.regmap
= i386_regmap
;
1141 the_low_target
.cannot_fetch_register
= i386_cannot_fetch_register
;
1142 the_low_target
.cannot_store_register
= i386_cannot_store_register
;
1144 /* I386 has 8 xmm regs. */
1145 num_xmm_registers
= 8;
1147 x86_linux_update_xmltarget ();
1151 x86_supports_tracepoints (void)
1157 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1159 write_inferior_memory (*to
, buf
, len
);
1164 push_opcode (unsigned char *buf
, char *op
)
1166 unsigned char *buf_org
= buf
;
1171 unsigned long ul
= strtoul (op
, &endptr
, 16);
1180 return buf
- buf_org
;
1185 /* Build a jump pad that saves registers and calls a collection
1186 function. Writes a jump instruction to the jump pad to
1187 JJUMPAD_INSN. The caller is responsible to write it in at the
1188 tracepoint address. */
1191 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1192 CORE_ADDR collector
,
1195 CORE_ADDR
*jump_entry
,
1196 CORE_ADDR
*trampoline
,
1197 ULONGEST
*trampoline_size
,
1198 unsigned char *jjump_pad_insn
,
1199 ULONGEST
*jjump_pad_insn_size
,
1200 CORE_ADDR
*adjusted_insn_addr
,
1201 CORE_ADDR
*adjusted_insn_addr_end
,
1204 unsigned char buf
[40];
1206 CORE_ADDR buildaddr
= *jump_entry
;
1208 /* Build the jump pad. */
1210 /* First, do tracepoint data collection. Save registers. */
1212 /* Need to ensure stack pointer saved first. */
1213 buf
[i
++] = 0x54; /* push %rsp */
1214 buf
[i
++] = 0x55; /* push %rbp */
1215 buf
[i
++] = 0x57; /* push %rdi */
1216 buf
[i
++] = 0x56; /* push %rsi */
1217 buf
[i
++] = 0x52; /* push %rdx */
1218 buf
[i
++] = 0x51; /* push %rcx */
1219 buf
[i
++] = 0x53; /* push %rbx */
1220 buf
[i
++] = 0x50; /* push %rax */
1221 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1222 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1223 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1224 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1225 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1226 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1227 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1228 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1229 buf
[i
++] = 0x9c; /* pushfq */
1230 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1232 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1233 i
+= sizeof (unsigned long);
1234 buf
[i
++] = 0x57; /* push %rdi */
1235 append_insns (&buildaddr
, i
, buf
);
1237 /* Stack space for the collecting_t object. */
1239 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1240 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1241 memcpy (buf
+ i
, &tpoint
, 8);
1243 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1244 i
+= push_opcode (&buf
[i
],
1245 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1246 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1247 append_insns (&buildaddr
, i
, buf
);
1251 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1252 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1254 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1255 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1256 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1257 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1258 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1259 append_insns (&buildaddr
, i
, buf
);
1261 /* Set up the gdb_collect call. */
1262 /* At this point, (stack pointer + 0x18) is the base of our saved
1266 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1267 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1269 /* tpoint address may be 64-bit wide. */
1270 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1271 memcpy (buf
+ i
, &tpoint
, 8);
1273 append_insns (&buildaddr
, i
, buf
);
1275 /* The collector function being in the shared library, may be
1276 >31-bits away off the jump pad. */
1278 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1279 memcpy (buf
+ i
, &collector
, 8);
1281 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1282 append_insns (&buildaddr
, i
, buf
);
1284 /* Clear the spin-lock. */
1286 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1287 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1288 memcpy (buf
+ i
, &lockaddr
, 8);
1290 append_insns (&buildaddr
, i
, buf
);
1292 /* Remove stack that had been used for the collect_t object. */
1294 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1295 append_insns (&buildaddr
, i
, buf
);
1297 /* Restore register state. */
1299 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1303 buf
[i
++] = 0x9d; /* popfq */
1304 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1305 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1306 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1307 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1308 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1309 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1310 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1311 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1312 buf
[i
++] = 0x58; /* pop %rax */
1313 buf
[i
++] = 0x5b; /* pop %rbx */
1314 buf
[i
++] = 0x59; /* pop %rcx */
1315 buf
[i
++] = 0x5a; /* pop %rdx */
1316 buf
[i
++] = 0x5e; /* pop %rsi */
1317 buf
[i
++] = 0x5f; /* pop %rdi */
1318 buf
[i
++] = 0x5d; /* pop %rbp */
1319 buf
[i
++] = 0x5c; /* pop %rsp */
1320 append_insns (&buildaddr
, i
, buf
);
1322 /* Now, adjust the original instruction to execute in the jump
1324 *adjusted_insn_addr
= buildaddr
;
1325 relocate_instruction (&buildaddr
, tpaddr
);
1326 *adjusted_insn_addr_end
= buildaddr
;
1328 /* Finally, write a jump back to the program. */
1329 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1330 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1331 memcpy (buf
+ 1, &offset
, 4);
1332 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1334 /* The jump pad is now built. Wire in a jump to our jump pad. This
1335 is always done last (by our caller actually), so that we can
1336 install fast tracepoints with threads running. This relies on
1337 the agent's atomic write support. */
1338 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1339 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1340 memcpy (buf
+ 1, &offset
, 4);
1341 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1342 *jjump_pad_insn_size
= sizeof (jump_insn
);
1344 /* Return the end address of our pad. */
1345 *jump_entry
= buildaddr
;
1350 #endif /* __x86_64__ */
1352 /* Build a jump pad that saves registers and calls a collection
1353 function. Writes a jump instruction to the jump pad to
1354 JJUMPAD_INSN. The caller is responsible to write it in at the
1355 tracepoint address. */
1358 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1359 CORE_ADDR collector
,
1362 CORE_ADDR
*jump_entry
,
1363 CORE_ADDR
*trampoline
,
1364 ULONGEST
*trampoline_size
,
1365 unsigned char *jjump_pad_insn
,
1366 ULONGEST
*jjump_pad_insn_size
,
1367 CORE_ADDR
*adjusted_insn_addr
,
1368 CORE_ADDR
*adjusted_insn_addr_end
,
1371 unsigned char buf
[0x100];
1373 CORE_ADDR buildaddr
= *jump_entry
;
1375 /* Build the jump pad. */
1377 /* First, do tracepoint data collection. Save registers. */
1379 buf
[i
++] = 0x60; /* pushad */
1380 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1381 *((int *)(buf
+ i
)) = (int) tpaddr
;
1383 buf
[i
++] = 0x9c; /* pushf */
1384 buf
[i
++] = 0x1e; /* push %ds */
1385 buf
[i
++] = 0x06; /* push %es */
1386 buf
[i
++] = 0x0f; /* push %fs */
1388 buf
[i
++] = 0x0f; /* push %gs */
1390 buf
[i
++] = 0x16; /* push %ss */
1391 buf
[i
++] = 0x0e; /* push %cs */
1392 append_insns (&buildaddr
, i
, buf
);
1394 /* Stack space for the collecting_t object. */
1396 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1398 /* Build the object. */
1399 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1400 memcpy (buf
+ i
, &tpoint
, 4);
1402 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1404 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1405 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1406 append_insns (&buildaddr
, i
, buf
);
1408 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1409 If we cared for it, this could be using xchg alternatively. */
1412 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1413 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1415 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1417 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1418 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1419 append_insns (&buildaddr
, i
, buf
);
1422 /* Set up arguments to the gdb_collect call. */
1424 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1425 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1426 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1427 append_insns (&buildaddr
, i
, buf
);
1430 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1431 append_insns (&buildaddr
, i
, buf
);
1434 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1435 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1437 append_insns (&buildaddr
, i
, buf
);
1439 buf
[0] = 0xe8; /* call <reladdr> */
1440 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1441 memcpy (buf
+ 1, &offset
, 4);
1442 append_insns (&buildaddr
, 5, buf
);
1443 /* Clean up after the call. */
1444 buf
[0] = 0x83; /* add $0x8,%esp */
1447 append_insns (&buildaddr
, 3, buf
);
1450 /* Clear the spin-lock. This would need the LOCK prefix on older
1453 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1454 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1455 memcpy (buf
+ i
, &lockaddr
, 4);
1457 append_insns (&buildaddr
, i
, buf
);
1460 /* Remove stack that had been used for the collect_t object. */
1462 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1463 append_insns (&buildaddr
, i
, buf
);
1466 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1469 buf
[i
++] = 0x17; /* pop %ss */
1470 buf
[i
++] = 0x0f; /* pop %gs */
1472 buf
[i
++] = 0x0f; /* pop %fs */
1474 buf
[i
++] = 0x07; /* pop %es */
1475 buf
[i
++] = 0x1f; /* pop %ds */
1476 buf
[i
++] = 0x9d; /* popf */
1477 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1480 buf
[i
++] = 0x61; /* popad */
1481 append_insns (&buildaddr
, i
, buf
);
1483 /* Now, adjust the original instruction to execute in the jump
1485 *adjusted_insn_addr
= buildaddr
;
1486 relocate_instruction (&buildaddr
, tpaddr
);
1487 *adjusted_insn_addr_end
= buildaddr
;
1489 /* Write the jump back to the program. */
1490 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1491 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1492 memcpy (buf
+ 1, &offset
, 4);
1493 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1495 /* The jump pad is now built. Wire in a jump to our jump pad. This
1496 is always done last (by our caller actually), so that we can
1497 install fast tracepoints with threads running. This relies on
1498 the agent's atomic write support. */
1501 /* Create a trampoline. */
1502 *trampoline_size
= sizeof (jump_insn
);
1503 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1505 /* No trampoline space available. */
1507 "E.Cannot allocate trampoline space needed for fast "
1508 "tracepoints on 4-byte instructions.");
1512 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1513 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1514 memcpy (buf
+ 1, &offset
, 4);
1515 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1517 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1518 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1519 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1520 memcpy (buf
+ 2, &offset
, 2);
1521 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1522 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1526 /* Else use a 32-bit relative jump instruction. */
1527 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1528 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1529 memcpy (buf
+ 1, &offset
, 4);
1530 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1531 *jjump_pad_insn_size
= sizeof (jump_insn
);
1534 /* Return the end address of our pad. */
1535 *jump_entry
= buildaddr
;
1541 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1542 CORE_ADDR collector
,
1545 CORE_ADDR
*jump_entry
,
1546 CORE_ADDR
*trampoline
,
1547 ULONGEST
*trampoline_size
,
1548 unsigned char *jjump_pad_insn
,
1549 ULONGEST
*jjump_pad_insn_size
,
1550 CORE_ADDR
*adjusted_insn_addr
,
1551 CORE_ADDR
*adjusted_insn_addr_end
,
1555 if (register_size (0) == 8)
1556 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1557 collector
, lockaddr
,
1558 orig_size
, jump_entry
,
1559 trampoline
, trampoline_size
,
1561 jjump_pad_insn_size
,
1563 adjusted_insn_addr_end
,
1567 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1568 collector
, lockaddr
,
1569 orig_size
, jump_entry
,
1570 trampoline
, trampoline_size
,
1572 jjump_pad_insn_size
,
1574 adjusted_insn_addr_end
,
1578 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1582 x86_get_min_fast_tracepoint_insn_len (void)
1584 static int warned_about_fast_tracepoints
= 0;
1587 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1588 used for fast tracepoints. */
1589 if (register_size (0) == 8)
1593 if (in_process_agent_loaded ())
1595 char errbuf
[IPA_BUFSIZ
];
1599 /* On x86, if trampolines are available, then 4-byte jump instructions
1600 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1601 with a 4-byte offset are used instead. */
1602 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1606 /* GDB has no channel to explain to user why a shorter fast
1607 tracepoint is not possible, but at least make GDBserver
1608 mention that something has gone awry. */
1609 if (!warned_about_fast_tracepoints
)
1611 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
1612 warned_about_fast_tracepoints
= 1;
1619 /* Indicate that the minimum length is currently unknown since the IPA
1620 has not loaded yet. */
1626 add_insns (unsigned char *start
, int len
)
1628 CORE_ADDR buildaddr
= current_insn_ptr
;
1631 fprintf (stderr
, "Adding %d bytes of insn at %s\n",
1632 len
, paddress (buildaddr
));
1634 append_insns (&buildaddr
, len
, start
);
1635 current_insn_ptr
= buildaddr
;
1638 /* Our general strategy for emitting code is to avoid specifying raw
1639 bytes whenever possible, and instead copy a block of inline asm
1640 that is embedded in the function. This is a little messy, because
1641 we need to keep the compiler from discarding what looks like dead
1642 code, plus suppress various warnings. */
1644 #define EMIT_ASM(NAME, INSNS) \
1647 extern unsigned char start_ ## NAME, end_ ## NAME; \
1648 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1649 __asm__ ("jmp end_" #NAME "\n" \
1650 "\t" "start_" #NAME ":" \
1652 "\t" "end_" #NAME ":"); \
1657 #define EMIT_ASM32(NAME,INSNS) \
1660 extern unsigned char start_ ## NAME, end_ ## NAME; \
1661 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1662 __asm__ (".code32\n" \
1663 "\t" "jmp end_" #NAME "\n" \
1664 "\t" "start_" #NAME ":\n" \
1666 "\t" "end_" #NAME ":\n" \
1672 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1679 amd64_emit_prologue (void)
1681 EMIT_ASM (amd64_prologue
,
1683 "movq %rsp,%rbp\n\t"
1684 "sub $0x20,%rsp\n\t"
1685 "movq %rdi,-8(%rbp)\n\t"
1686 "movq %rsi,-16(%rbp)");
1691 amd64_emit_epilogue (void)
1693 EMIT_ASM (amd64_epilogue
,
1694 "movq -16(%rbp),%rdi\n\t"
1695 "movq %rax,(%rdi)\n\t"
1702 amd64_emit_add (void)
1704 EMIT_ASM (amd64_add
,
1705 "add (%rsp),%rax\n\t"
1706 "lea 0x8(%rsp),%rsp");
1710 amd64_emit_sub (void)
1712 EMIT_ASM (amd64_sub
,
1713 "sub %rax,(%rsp)\n\t"
1718 amd64_emit_mul (void)
1724 amd64_emit_lsh (void)
1730 amd64_emit_rsh_signed (void)
1736 amd64_emit_rsh_unsigned (void)
1742 amd64_emit_ext (int arg
)
1747 EMIT_ASM (amd64_ext_8
,
1753 EMIT_ASM (amd64_ext_16
,
1758 EMIT_ASM (amd64_ext_32
,
1767 amd64_emit_log_not (void)
1769 EMIT_ASM (amd64_log_not
,
1770 "test %rax,%rax\n\t"
1776 amd64_emit_bit_and (void)
1778 EMIT_ASM (amd64_and
,
1779 "and (%rsp),%rax\n\t"
1780 "lea 0x8(%rsp),%rsp");
1784 amd64_emit_bit_or (void)
1787 "or (%rsp),%rax\n\t"
1788 "lea 0x8(%rsp),%rsp");
1792 amd64_emit_bit_xor (void)
1794 EMIT_ASM (amd64_xor
,
1795 "xor (%rsp),%rax\n\t"
1796 "lea 0x8(%rsp),%rsp");
1800 amd64_emit_bit_not (void)
1802 EMIT_ASM (amd64_bit_not
,
1803 "xorq $0xffffffffffffffff,%rax");
1807 amd64_emit_equal (void)
1809 EMIT_ASM (amd64_equal
,
1810 "cmp %rax,(%rsp)\n\t"
1811 "je .Lamd64_equal_true\n\t"
1813 "jmp .Lamd64_equal_end\n\t"
1814 ".Lamd64_equal_true:\n\t"
1816 ".Lamd64_equal_end:\n\t"
1817 "lea 0x8(%rsp),%rsp");
1821 amd64_emit_less_signed (void)
1823 EMIT_ASM (amd64_less_signed
,
1824 "cmp %rax,(%rsp)\n\t"
1825 "jl .Lamd64_less_signed_true\n\t"
1827 "jmp .Lamd64_less_signed_end\n\t"
1828 ".Lamd64_less_signed_true:\n\t"
1830 ".Lamd64_less_signed_end:\n\t"
1831 "lea 0x8(%rsp),%rsp");
1835 amd64_emit_less_unsigned (void)
1837 EMIT_ASM (amd64_less_unsigned
,
1838 "cmp %rax,(%rsp)\n\t"
1839 "jb .Lamd64_less_unsigned_true\n\t"
1841 "jmp .Lamd64_less_unsigned_end\n\t"
1842 ".Lamd64_less_unsigned_true:\n\t"
1844 ".Lamd64_less_unsigned_end:\n\t"
1845 "lea 0x8(%rsp),%rsp");
1849 amd64_emit_ref (int size
)
1854 EMIT_ASM (amd64_ref1
,
1858 EMIT_ASM (amd64_ref2
,
1862 EMIT_ASM (amd64_ref4
,
1863 "movl (%rax),%eax");
1866 EMIT_ASM (amd64_ref8
,
1867 "movq (%rax),%rax");
1873 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1875 EMIT_ASM (amd64_if_goto
,
1879 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1887 amd64_emit_goto (int *offset_p
, int *size_p
)
1889 EMIT_ASM (amd64_goto
,
1890 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1898 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1900 int diff
= (to
- (from
+ size
));
1901 unsigned char buf
[sizeof (int)];
1909 memcpy (buf
, &diff
, sizeof (int));
1910 write_inferior_memory (from
, buf
, sizeof (int));
1914 amd64_emit_const (LONGEST num
)
1916 unsigned char buf
[16];
1918 CORE_ADDR buildaddr
= current_insn_ptr
;
1921 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1922 memcpy (&buf
[i
], &num
, sizeof (num
));
1924 append_insns (&buildaddr
, i
, buf
);
1925 current_insn_ptr
= buildaddr
;
1929 amd64_emit_call (CORE_ADDR fn
)
1931 unsigned char buf
[16];
1933 CORE_ADDR buildaddr
;
1936 /* The destination function being in the shared library, may be
1937 >31-bits away off the compiled code pad. */
1939 buildaddr
= current_insn_ptr
;
1941 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1945 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1947 /* Offset is too large for a call. Use callq, but that requires
1948 a register, so avoid it if possible. Use r10, since it is
1949 call-clobbered, we don't have to push/pop it. */
1950 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1952 memcpy (buf
+ i
, &fn
, 8);
1954 buf
[i
++] = 0xff; /* callq *%r10 */
1959 int offset32
= offset64
; /* we know we can't overflow here. */
1960 memcpy (buf
+ i
, &offset32
, 4);
1964 append_insns (&buildaddr
, i
, buf
);
1965 current_insn_ptr
= buildaddr
;
1969 amd64_emit_reg (int reg
)
1971 unsigned char buf
[16];
1973 CORE_ADDR buildaddr
;
1975 /* Assume raw_regs is still in %rdi. */
1976 buildaddr
= current_insn_ptr
;
1978 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1979 memcpy (&buf
[i
], ®
, sizeof (reg
));
1981 append_insns (&buildaddr
, i
, buf
);
1982 current_insn_ptr
= buildaddr
;
1983 amd64_emit_call (get_raw_reg_func_addr ());
1987 amd64_emit_pop (void)
1989 EMIT_ASM (amd64_pop
,
1994 amd64_emit_stack_flush (void)
1996 EMIT_ASM (amd64_stack_flush
,
2001 amd64_emit_zero_ext (int arg
)
2006 EMIT_ASM (amd64_zero_ext_8
,
2010 EMIT_ASM (amd64_zero_ext_16
,
2011 "and $0xffff,%rax");
2014 EMIT_ASM (amd64_zero_ext_32
,
2015 "mov $0xffffffff,%rcx\n\t"
2024 amd64_emit_swap (void)
2026 EMIT_ASM (amd64_swap
,
2033 amd64_emit_stack_adjust (int n
)
2035 unsigned char buf
[16];
2037 CORE_ADDR buildaddr
= current_insn_ptr
;
2040 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2044 /* This only handles adjustments up to 16, but we don't expect any more. */
2046 append_insns (&buildaddr
, i
, buf
);
2047 current_insn_ptr
= buildaddr
;
2050 /* FN's prototype is `LONGEST(*fn)(int)'. */
2053 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2055 unsigned char buf
[16];
2057 CORE_ADDR buildaddr
;
2059 buildaddr
= current_insn_ptr
;
2061 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2062 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2064 append_insns (&buildaddr
, i
, buf
);
2065 current_insn_ptr
= buildaddr
;
2066 amd64_emit_call (fn
);
2069 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2072 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2074 unsigned char buf
[16];
2076 CORE_ADDR buildaddr
;
2078 buildaddr
= current_insn_ptr
;
2080 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2081 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2083 append_insns (&buildaddr
, i
, buf
);
2084 current_insn_ptr
= buildaddr
;
2085 EMIT_ASM (amd64_void_call_2_a
,
2086 /* Save away a copy of the stack top. */
2088 /* Also pass top as the second argument. */
2090 amd64_emit_call (fn
);
2091 EMIT_ASM (amd64_void_call_2_b
,
2092 /* Restore the stack top, %rax may have been trashed. */
2097 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2100 "cmp %rax,(%rsp)\n\t"
2101 "jne .Lamd64_eq_fallthru\n\t"
2102 "lea 0x8(%rsp),%rsp\n\t"
2104 /* jmp, but don't trust the assembler to choose the right jump */
2105 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2106 ".Lamd64_eq_fallthru:\n\t"
2107 "lea 0x8(%rsp),%rsp\n\t"
2117 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2120 "cmp %rax,(%rsp)\n\t"
2121 "je .Lamd64_ne_fallthru\n\t"
2122 "lea 0x8(%rsp),%rsp\n\t"
2124 /* jmp, but don't trust the assembler to choose the right jump */
2125 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2126 ".Lamd64_ne_fallthru:\n\t"
2127 "lea 0x8(%rsp),%rsp\n\t"
2137 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2140 "cmp %rax,(%rsp)\n\t"
2141 "jnl .Lamd64_lt_fallthru\n\t"
2142 "lea 0x8(%rsp),%rsp\n\t"
2144 /* jmp, but don't trust the assembler to choose the right jump */
2145 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2146 ".Lamd64_lt_fallthru:\n\t"
2147 "lea 0x8(%rsp),%rsp\n\t"
2157 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2160 "cmp %rax,(%rsp)\n\t"
2161 "jnle .Lamd64_le_fallthru\n\t"
2162 "lea 0x8(%rsp),%rsp\n\t"
2164 /* jmp, but don't trust the assembler to choose the right jump */
2165 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2166 ".Lamd64_le_fallthru:\n\t"
2167 "lea 0x8(%rsp),%rsp\n\t"
2177 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2180 "cmp %rax,(%rsp)\n\t"
2181 "jng .Lamd64_gt_fallthru\n\t"
2182 "lea 0x8(%rsp),%rsp\n\t"
2184 /* jmp, but don't trust the assembler to choose the right jump */
2185 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2186 ".Lamd64_gt_fallthru:\n\t"
2187 "lea 0x8(%rsp),%rsp\n\t"
2197 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2200 "cmp %rax,(%rsp)\n\t"
2201 "jnge .Lamd64_ge_fallthru\n\t"
2202 ".Lamd64_ge_jump:\n\t"
2203 "lea 0x8(%rsp),%rsp\n\t"
2205 /* jmp, but don't trust the assembler to choose the right jump */
2206 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2207 ".Lamd64_ge_fallthru:\n\t"
2208 "lea 0x8(%rsp),%rsp\n\t"
2217 struct emit_ops amd64_emit_ops
=
2219 amd64_emit_prologue
,
2220 amd64_emit_epilogue
,
2225 amd64_emit_rsh_signed
,
2226 amd64_emit_rsh_unsigned
,
2234 amd64_emit_less_signed
,
2235 amd64_emit_less_unsigned
,
2239 amd64_write_goto_address
,
2244 amd64_emit_stack_flush
,
2245 amd64_emit_zero_ext
,
2247 amd64_emit_stack_adjust
,
2248 amd64_emit_int_call_1
,
2249 amd64_emit_void_call_2
,
2258 #endif /* __x86_64__ */
2261 i386_emit_prologue (void)
2263 EMIT_ASM32 (i386_prologue
,
2267 /* At this point, the raw regs base address is at 8(%ebp), and the
2268 value pointer is at 12(%ebp). */
2272 i386_emit_epilogue (void)
2274 EMIT_ASM32 (i386_epilogue
,
2275 "mov 12(%ebp),%ecx\n\t"
2276 "mov %eax,(%ecx)\n\t"
2277 "mov %ebx,0x4(%ecx)\n\t"
2285 i386_emit_add (void)
2287 EMIT_ASM32 (i386_add
,
2288 "add (%esp),%eax\n\t"
2289 "adc 0x4(%esp),%ebx\n\t"
2290 "lea 0x8(%esp),%esp");
2294 i386_emit_sub (void)
2296 EMIT_ASM32 (i386_sub
,
2297 "subl %eax,(%esp)\n\t"
2298 "sbbl %ebx,4(%esp)\n\t"
2304 i386_emit_mul (void)
2310 i386_emit_lsh (void)
2316 i386_emit_rsh_signed (void)
2322 i386_emit_rsh_unsigned (void)
2328 i386_emit_ext (int arg
)
2333 EMIT_ASM32 (i386_ext_8
,
2336 "movl %eax,%ebx\n\t"
2340 EMIT_ASM32 (i386_ext_16
,
2342 "movl %eax,%ebx\n\t"
2346 EMIT_ASM32 (i386_ext_32
,
2347 "movl %eax,%ebx\n\t"
2356 i386_emit_log_not (void)
2358 EMIT_ASM32 (i386_log_not
,
2360 "test %eax,%eax\n\t"
2367 i386_emit_bit_and (void)
2369 EMIT_ASM32 (i386_and
,
2370 "and (%esp),%eax\n\t"
2371 "and 0x4(%esp),%ebx\n\t"
2372 "lea 0x8(%esp),%esp");
2376 i386_emit_bit_or (void)
2378 EMIT_ASM32 (i386_or
,
2379 "or (%esp),%eax\n\t"
2380 "or 0x4(%esp),%ebx\n\t"
2381 "lea 0x8(%esp),%esp");
2385 i386_emit_bit_xor (void)
2387 EMIT_ASM32 (i386_xor
,
2388 "xor (%esp),%eax\n\t"
2389 "xor 0x4(%esp),%ebx\n\t"
2390 "lea 0x8(%esp),%esp");
2394 i386_emit_bit_not (void)
2396 EMIT_ASM32 (i386_bit_not
,
2397 "xor $0xffffffff,%eax\n\t"
2398 "xor $0xffffffff,%ebx\n\t");
2402 i386_emit_equal (void)
2404 EMIT_ASM32 (i386_equal
,
2405 "cmpl %ebx,4(%esp)\n\t"
2406 "jne .Li386_equal_false\n\t"
2407 "cmpl %eax,(%esp)\n\t"
2408 "je .Li386_equal_true\n\t"
2409 ".Li386_equal_false:\n\t"
2411 "jmp .Li386_equal_end\n\t"
2412 ".Li386_equal_true:\n\t"
2414 ".Li386_equal_end:\n\t"
2416 "lea 0x8(%esp),%esp");
2420 i386_emit_less_signed (void)
2422 EMIT_ASM32 (i386_less_signed
,
2423 "cmpl %ebx,4(%esp)\n\t"
2424 "jl .Li386_less_signed_true\n\t"
2425 "jne .Li386_less_signed_false\n\t"
2426 "cmpl %eax,(%esp)\n\t"
2427 "jl .Li386_less_signed_true\n\t"
2428 ".Li386_less_signed_false:\n\t"
2430 "jmp .Li386_less_signed_end\n\t"
2431 ".Li386_less_signed_true:\n\t"
2433 ".Li386_less_signed_end:\n\t"
2435 "lea 0x8(%esp),%esp");
2439 i386_emit_less_unsigned (void)
2441 EMIT_ASM32 (i386_less_unsigned
,
2442 "cmpl %ebx,4(%esp)\n\t"
2443 "jb .Li386_less_unsigned_true\n\t"
2444 "jne .Li386_less_unsigned_false\n\t"
2445 "cmpl %eax,(%esp)\n\t"
2446 "jb .Li386_less_unsigned_true\n\t"
2447 ".Li386_less_unsigned_false:\n\t"
2449 "jmp .Li386_less_unsigned_end\n\t"
2450 ".Li386_less_unsigned_true:\n\t"
2452 ".Li386_less_unsigned_end:\n\t"
2454 "lea 0x8(%esp),%esp");
2458 i386_emit_ref (int size
)
2463 EMIT_ASM32 (i386_ref1
,
2467 EMIT_ASM32 (i386_ref2
,
2471 EMIT_ASM32 (i386_ref4
,
2472 "movl (%eax),%eax");
2475 EMIT_ASM32 (i386_ref8
,
2476 "movl 4(%eax),%ebx\n\t"
2477 "movl (%eax),%eax");
2483 i386_emit_if_goto (int *offset_p
, int *size_p
)
2485 EMIT_ASM32 (i386_if_goto
,
2491 /* Don't trust the assembler to choose the right jump */
2492 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2495 *offset_p
= 11; /* be sure that this matches the sequence above */
2501 i386_emit_goto (int *offset_p
, int *size_p
)
2503 EMIT_ASM32 (i386_goto
,
2504 /* Don't trust the assembler to choose the right jump */
2505 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2513 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2515 int diff
= (to
- (from
+ size
));
2516 unsigned char buf
[sizeof (int)];
2518 /* We're only doing 4-byte sizes at the moment. */
2525 memcpy (buf
, &diff
, sizeof (int));
2526 write_inferior_memory (from
, buf
, sizeof (int));
2530 i386_emit_const (LONGEST num
)
2532 unsigned char buf
[16];
2534 CORE_ADDR buildaddr
= current_insn_ptr
;
2537 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2538 lo
= num
& 0xffffffff;
2539 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2541 hi
= ((num
>> 32) & 0xffffffff);
2544 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2545 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2550 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2552 append_insns (&buildaddr
, i
, buf
);
2553 current_insn_ptr
= buildaddr
;
2557 i386_emit_call (CORE_ADDR fn
)
2559 unsigned char buf
[16];
2561 CORE_ADDR buildaddr
;
2563 buildaddr
= current_insn_ptr
;
2565 buf
[i
++] = 0xe8; /* call <reladdr> */
2566 offset
= ((int) fn
) - (buildaddr
+ 5);
2567 memcpy (buf
+ 1, &offset
, 4);
2568 append_insns (&buildaddr
, 5, buf
);
2569 current_insn_ptr
= buildaddr
;
2573 i386_emit_reg (int reg
)
2575 unsigned char buf
[16];
2577 CORE_ADDR buildaddr
;
2579 EMIT_ASM32 (i386_reg_a
,
2581 buildaddr
= current_insn_ptr
;
2583 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2584 memcpy (&buf
[i
], ®
, sizeof (reg
));
2586 append_insns (&buildaddr
, i
, buf
);
2587 current_insn_ptr
= buildaddr
;
2588 EMIT_ASM32 (i386_reg_b
,
2589 "mov %eax,4(%esp)\n\t"
2590 "mov 8(%ebp),%eax\n\t"
2592 i386_emit_call (get_raw_reg_func_addr ());
2593 EMIT_ASM32 (i386_reg_c
,
2595 "lea 0x8(%esp),%esp");
2599 i386_emit_pop (void)
2601 EMIT_ASM32 (i386_pop
,
2607 i386_emit_stack_flush (void)
2609 EMIT_ASM32 (i386_stack_flush
,
2615 i386_emit_zero_ext (int arg
)
2620 EMIT_ASM32 (i386_zero_ext_8
,
2621 "and $0xff,%eax\n\t"
2625 EMIT_ASM32 (i386_zero_ext_16
,
2626 "and $0xffff,%eax\n\t"
2630 EMIT_ASM32 (i386_zero_ext_32
,
2639 i386_emit_swap (void)
2641 EMIT_ASM32 (i386_swap
,
2651 i386_emit_stack_adjust (int n
)
2653 unsigned char buf
[16];
2655 CORE_ADDR buildaddr
= current_insn_ptr
;
2658 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2662 append_insns (&buildaddr
, i
, buf
);
2663 current_insn_ptr
= buildaddr
;
2666 /* FN's prototype is `LONGEST(*fn)(int)'. */
2669 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2671 unsigned char buf
[16];
2673 CORE_ADDR buildaddr
;
2675 EMIT_ASM32 (i386_int_call_1_a
,
2676 /* Reserve a bit of stack space. */
2678 /* Put the one argument on the stack. */
2679 buildaddr
= current_insn_ptr
;
2681 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2684 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2686 append_insns (&buildaddr
, i
, buf
);
2687 current_insn_ptr
= buildaddr
;
2688 i386_emit_call (fn
);
2689 EMIT_ASM32 (i386_int_call_1_c
,
2691 "lea 0x8(%esp),%esp");
2694 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2697 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2699 unsigned char buf
[16];
2701 CORE_ADDR buildaddr
;
2703 EMIT_ASM32 (i386_void_call_2_a
,
2704 /* Preserve %eax only; we don't have to worry about %ebx. */
2706 /* Reserve a bit of stack space for arguments. */
2707 "sub $0x10,%esp\n\t"
2708 /* Copy "top" to the second argument position. (Note that
2709 we can't assume function won't scribble on its
2710 arguments, so don't try to restore from this.) */
2711 "mov %eax,4(%esp)\n\t"
2712 "mov %ebx,8(%esp)");
2713 /* Put the first argument on the stack. */
2714 buildaddr
= current_insn_ptr
;
2716 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2719 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2721 append_insns (&buildaddr
, i
, buf
);
2722 current_insn_ptr
= buildaddr
;
2723 i386_emit_call (fn
);
2724 EMIT_ASM32 (i386_void_call_2_b
,
2725 "lea 0x10(%esp),%esp\n\t"
2726 /* Restore original stack top. */
2732 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2735 /* Check low half first, more likely to be decider */
2736 "cmpl %eax,(%esp)\n\t"
2737 "jne .Leq_fallthru\n\t"
2738 "cmpl %ebx,4(%esp)\n\t"
2739 "jne .Leq_fallthru\n\t"
2740 "lea 0x8(%esp),%esp\n\t"
2743 /* jmp, but don't trust the assembler to choose the right jump */
2744 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2745 ".Leq_fallthru:\n\t"
2746 "lea 0x8(%esp),%esp\n\t"
2757 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2760 /* Check low half first, more likely to be decider */
2761 "cmpl %eax,(%esp)\n\t"
2763 "cmpl %ebx,4(%esp)\n\t"
2764 "je .Lne_fallthru\n\t"
2766 "lea 0x8(%esp),%esp\n\t"
2769 /* jmp, but don't trust the assembler to choose the right jump */
2770 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2771 ".Lne_fallthru:\n\t"
2772 "lea 0x8(%esp),%esp\n\t"
2783 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2786 "cmpl %ebx,4(%esp)\n\t"
2788 "jne .Llt_fallthru\n\t"
2789 "cmpl %eax,(%esp)\n\t"
2790 "jnl .Llt_fallthru\n\t"
2792 "lea 0x8(%esp),%esp\n\t"
2795 /* jmp, but don't trust the assembler to choose the right jump */
2796 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2797 ".Llt_fallthru:\n\t"
2798 "lea 0x8(%esp),%esp\n\t"
2809 i386_emit_le_goto (int *offset_p
, int *size_p
)
2812 "cmpl %ebx,4(%esp)\n\t"
2814 "jne .Lle_fallthru\n\t"
2815 "cmpl %eax,(%esp)\n\t"
2816 "jnle .Lle_fallthru\n\t"
2818 "lea 0x8(%esp),%esp\n\t"
2821 /* jmp, but don't trust the assembler to choose the right jump */
2822 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2823 ".Lle_fallthru:\n\t"
2824 "lea 0x8(%esp),%esp\n\t"
2835 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2838 "cmpl %ebx,4(%esp)\n\t"
2840 "jne .Lgt_fallthru\n\t"
2841 "cmpl %eax,(%esp)\n\t"
2842 "jng .Lgt_fallthru\n\t"
2844 "lea 0x8(%esp),%esp\n\t"
2847 /* jmp, but don't trust the assembler to choose the right jump */
2848 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2849 ".Lgt_fallthru:\n\t"
2850 "lea 0x8(%esp),%esp\n\t"
2861 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2864 "cmpl %ebx,4(%esp)\n\t"
2866 "jne .Lge_fallthru\n\t"
2867 "cmpl %eax,(%esp)\n\t"
2868 "jnge .Lge_fallthru\n\t"
2870 "lea 0x8(%esp),%esp\n\t"
2873 /* jmp, but don't trust the assembler to choose the right jump */
2874 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2875 ".Lge_fallthru:\n\t"
2876 "lea 0x8(%esp),%esp\n\t"
2886 struct emit_ops i386_emit_ops
=
2894 i386_emit_rsh_signed
,
2895 i386_emit_rsh_unsigned
,
2903 i386_emit_less_signed
,
2904 i386_emit_less_unsigned
,
2908 i386_write_goto_address
,
2913 i386_emit_stack_flush
,
2916 i386_emit_stack_adjust
,
2917 i386_emit_int_call_1
,
2918 i386_emit_void_call_2
,
2928 static struct emit_ops
*
2932 int use_64bit
= register_size (0) == 8;
2935 return &amd64_emit_ops
;
2938 return &i386_emit_ops
;
2941 /* This is initialized assuming an amd64 target.
2942 x86_arch_setup will correct it for i386 or amd64 targets. */
2944 struct linux_target_ops the_low_target
=
2960 x86_stopped_by_watchpoint
,
2961 x86_stopped_data_address
,
2962 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2963 native i386 case (no registers smaller than an xfer unit), and are not
2964 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2967 /* need to fix up i386 siginfo if host is amd64 */
2969 x86_linux_new_process
,
2970 x86_linux_new_thread
,
2971 x86_linux_prepare_to_resume
,
2972 x86_linux_process_qsupported
,
2973 x86_supports_tracepoints
,
2974 x86_get_thread_area
,
2975 x86_install_fast_tracepoint_jump_pad
,
2977 x86_get_min_fast_tracepoint_insn_len
,