1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002, 2004-2012 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "i386-xstate.h"
28 #include "elf/common.h"
30 #include "gdb_proc_service.h"
33 /* Defined in auto-generated file i386-linux.c. */
34 void init_registers_i386_linux (void);
35 /* Defined in auto-generated file amd64-linux.c. */
36 void init_registers_amd64_linux (void);
37 /* Defined in auto-generated file i386-avx-linux.c. */
38 void init_registers_i386_avx_linux (void);
39 /* Defined in auto-generated file amd64-avx-linux.c. */
40 void init_registers_amd64_avx_linux (void);
41 /* Defined in auto-generated file i386-mmx-linux.c. */
42 void init_registers_i386_mmx_linux (void);
44 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
45 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
47 /* Backward compatibility for gdb without XML support. */
49 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
50 <architecture>i386</architecture>\
51 <osabi>GNU/Linux</osabi>\
55 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
56 <architecture>i386:x86-64</architecture>\
57 <osabi>GNU/Linux</osabi>\
62 #include <sys/procfs.h>
63 #include <sys/ptrace.h>
66 #ifndef PTRACE_GETREGSET
67 #define PTRACE_GETREGSET 0x4204
70 #ifndef PTRACE_SETREGSET
71 #define PTRACE_SETREGSET 0x4205
75 #ifndef PTRACE_GET_THREAD_AREA
76 #define PTRACE_GET_THREAD_AREA 25
79 /* This definition comes from prctl.h, but some kernels may not have it. */
80 #ifndef PTRACE_ARCH_PRCTL
81 #define PTRACE_ARCH_PRCTL 30
84 /* The following definitions come from prctl.h, but may be absent
85 for certain configurations. */
87 #define ARCH_SET_GS 0x1001
88 #define ARCH_SET_FS 0x1002
89 #define ARCH_GET_FS 0x1003
90 #define ARCH_GET_GS 0x1004
93 /* Per-process arch-specific data we want to keep. */
95 struct arch_process_info
97 struct i386_debug_reg_state debug_reg_state
;
100 /* Per-thread arch-specific data we want to keep. */
104 /* Non-zero if our copy differs from what's recorded in the thread. */
105 int debug_registers_changed
;
110 /* Mapping between the general-purpose registers in `struct user'
111 format and GDB's register array layout.
112 Note that the transfer layout uses 64-bit regs. */
113 static /*const*/ int i386_regmap
[] =
115 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
116 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
117 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
118 DS
* 8, ES
* 8, FS
* 8, GS
* 8
121 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
123 /* So code below doesn't have to care, i386 or amd64. */
124 #define ORIG_EAX ORIG_RAX
126 static const int x86_64_regmap
[] =
128 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
129 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
130 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
131 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
132 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
133 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
134 -1, -1, -1, -1, -1, -1, -1, -1,
135 -1, -1, -1, -1, -1, -1, -1, -1,
136 -1, -1, -1, -1, -1, -1, -1, -1,
137 -1, -1, -1, -1, -1, -1, -1, -1, -1,
141 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
143 #else /* ! __x86_64__ */
145 /* Mapping between the general-purpose registers in `struct user'
146 format and GDB's register array layout. */
147 static /*const*/ int i386_regmap
[] =
149 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
150 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
151 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
152 DS
* 4, ES
* 4, FS
* 4, GS
* 4
155 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
159 /* Called by libthread_db. */
162 ps_get_thread_area (const struct ps_prochandle
*ph
,
163 lwpid_t lwpid
, int idx
, void **base
)
166 int use_64bit
= register_size (0) == 8;
173 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
177 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
188 unsigned int desc
[4];
190 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
191 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
194 *(int *)base
= desc
[1];
199 /* Get the thread area address. This is used to recognize which
200 thread is which when tracing with the in-process agent library. We
201 don't read anything from the address, and treat it as opaque; it's
202 the address itself that we assume is unique per-thread. */
205 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
208 int use_64bit
= register_size (0) == 8;
213 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
215 *addr
= (CORE_ADDR
) (uintptr_t) base
;
224 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
225 struct regcache
*regcache
= get_thread_regcache (get_lwp_thread (lwp
), 1);
226 unsigned int desc
[4];
228 const int reg_thread_area
= 3; /* bits to scale down register value. */
231 collect_register_by_name (regcache
, "gs", &gs
);
233 idx
= gs
>> reg_thread_area
;
235 if (ptrace (PTRACE_GET_THREAD_AREA
,
237 (void *) (long) idx
, (unsigned long) &desc
) < 0)
248 i386_cannot_store_register (int regno
)
250 return regno
>= I386_NUM_REGS
;
254 i386_cannot_fetch_register (int regno
)
256 return regno
>= I386_NUM_REGS
;
260 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
265 if (register_size (0) == 8)
267 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
268 if (x86_64_regmap
[i
] != -1)
269 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
274 for (i
= 0; i
< I386_NUM_REGS
; i
++)
275 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
277 collect_register_by_name (regcache
, "orig_eax",
278 ((char *) buf
) + ORIG_EAX
* 4);
282 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
287 if (register_size (0) == 8)
289 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
290 if (x86_64_regmap
[i
] != -1)
291 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
296 for (i
= 0; i
< I386_NUM_REGS
; i
++)
297 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
299 supply_register_by_name (regcache
, "orig_eax",
300 ((char *) buf
) + ORIG_EAX
* 4);
304 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
307 i387_cache_to_fxsave (regcache
, buf
);
309 i387_cache_to_fsave (regcache
, buf
);
314 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
317 i387_fxsave_to_cache (regcache
, buf
);
319 i387_fsave_to_cache (regcache
, buf
);
326 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
328 i387_cache_to_fxsave (regcache
, buf
);
332 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
334 i387_fxsave_to_cache (regcache
, buf
);
340 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
342 i387_cache_to_xsave (regcache
, buf
);
346 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
348 i387_xsave_to_cache (regcache
, buf
);
351 /* ??? The non-biarch i386 case stores all the i387 regs twice.
352 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
353 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
354 doesn't work. IWBN to avoid the duplication in the case where it
355 does work. Maybe the arch_setup routine could check whether it works
356 and update target_regsets accordingly, maybe by moving target_regsets
357 to linux_target_ops and set the right one there, rather than having to
358 modify the target_regsets global. */
360 struct regset_info target_regsets
[] =
362 #ifdef HAVE_PTRACE_GETREGS
363 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
365 x86_fill_gregset
, x86_store_gregset
},
366 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
367 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
369 # ifdef HAVE_PTRACE_GETFPXREGS
370 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
372 x86_fill_fpxregset
, x86_store_fpxregset
},
375 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
377 x86_fill_fpregset
, x86_store_fpregset
},
378 #endif /* HAVE_PTRACE_GETREGS */
379 { 0, 0, 0, -1, -1, NULL
, NULL
}
383 x86_get_pc (struct regcache
*regcache
)
385 int use_64bit
= register_size (0) == 8;
390 collect_register_by_name (regcache
, "rip", &pc
);
391 return (CORE_ADDR
) pc
;
396 collect_register_by_name (regcache
, "eip", &pc
);
397 return (CORE_ADDR
) pc
;
402 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
404 int use_64bit
= register_size (0) == 8;
408 unsigned long newpc
= pc
;
409 supply_register_by_name (regcache
, "rip", &newpc
);
413 unsigned int newpc
= pc
;
414 supply_register_by_name (regcache
, "eip", &newpc
);
418 static const unsigned char x86_breakpoint
[] = { 0xCC };
419 #define x86_breakpoint_len 1
422 x86_breakpoint_at (CORE_ADDR pc
)
426 (*the_target
->read_memory
) (pc
, &c
, 1);
433 /* Support for debug registers. */
436 x86_linux_dr_get (ptid_t ptid
, int regnum
)
441 tid
= ptid_get_lwp (ptid
);
444 value
= ptrace (PTRACE_PEEKUSER
, tid
,
445 offsetof (struct user
, u_debugreg
[regnum
]), 0);
447 error ("Couldn't read debug register");
453 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
457 tid
= ptid_get_lwp (ptid
);
460 ptrace (PTRACE_POKEUSER
, tid
,
461 offsetof (struct user
, u_debugreg
[regnum
]), value
);
463 error ("Couldn't write debug register");
467 update_debug_registers_callback (struct inferior_list_entry
*entry
,
470 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
471 int pid
= *(int *) pid_p
;
473 /* Only update the threads of this process. */
474 if (pid_of (lwp
) == pid
)
476 /* The actual update is done later just before resuming the lwp,
477 we just mark that the registers need updating. */
478 lwp
->arch_private
->debug_registers_changed
= 1;
480 /* If the lwp isn't stopped, force it to momentarily pause, so
481 we can update its debug registers. */
483 linux_stop_lwp (lwp
);
489 /* Update the inferior's debug register REGNUM from STATE. */
492 i386_dr_low_set_addr (const struct i386_debug_reg_state
*state
, int regnum
)
494 /* Only update the threads of this process. */
495 int pid
= pid_of (get_thread_lwp (current_inferior
));
497 if (! (regnum
>= 0 && regnum
<= DR_LASTADDR
- DR_FIRSTADDR
))
498 fatal ("Invalid debug register %d", regnum
);
500 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
503 /* Return the inferior's debug register REGNUM. */
506 i386_dr_low_get_addr (int regnum
)
508 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
509 ptid_t ptid
= ptid_of (lwp
);
511 /* DR6 and DR7 are retrieved with some other way. */
512 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
514 return x86_linux_dr_get (ptid
, regnum
);
517 /* Update the inferior's DR7 debug control register from STATE. */
520 i386_dr_low_set_control (const struct i386_debug_reg_state
*state
)
522 /* Only update the threads of this process. */
523 int pid
= pid_of (get_thread_lwp (current_inferior
));
525 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
528 /* Return the inferior's DR7 debug control register. */
531 i386_dr_low_get_control (void)
533 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
534 ptid_t ptid
= ptid_of (lwp
);
536 return x86_linux_dr_get (ptid
, DR_CONTROL
);
539 /* Get the value of the DR6 debug status register from the inferior
540 and record it in STATE. */
543 i386_dr_low_get_status (void)
545 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
546 ptid_t ptid
= ptid_of (lwp
);
548 return x86_linux_dr_get (ptid
, DR_STATUS
);
551 /* Breakpoint/Watchpoint support. */
554 x86_insert_point (char type
, CORE_ADDR addr
, int len
)
556 struct process_info
*proc
= current_process ();
563 ret
= prepare_to_access_memory ();
566 ret
= set_gdb_breakpoint_at (addr
);
567 done_accessing_memory ();
573 return i386_low_insert_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
582 x86_remove_point (char type
, CORE_ADDR addr
, int len
)
584 struct process_info
*proc
= current_process ();
591 ret
= prepare_to_access_memory ();
594 ret
= delete_gdb_breakpoint_at (addr
);
595 done_accessing_memory ();
601 return i386_low_remove_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
610 x86_stopped_by_watchpoint (void)
612 struct process_info
*proc
= current_process ();
613 return i386_low_stopped_by_watchpoint (&proc
->private->arch_private
->debug_reg_state
);
617 x86_stopped_data_address (void)
619 struct process_info
*proc
= current_process ();
621 if (i386_low_stopped_data_address (&proc
->private->arch_private
->debug_reg_state
,
627 /* Called when a new process is created. */
629 static struct arch_process_info
*
630 x86_linux_new_process (void)
632 struct arch_process_info
*info
= xcalloc (1, sizeof (*info
));
634 i386_low_init_dregs (&info
->debug_reg_state
);
639 /* Called when a new thread is detected. */
641 static struct arch_lwp_info
*
642 x86_linux_new_thread (void)
644 struct arch_lwp_info
*info
= xcalloc (1, sizeof (*info
));
646 info
->debug_registers_changed
= 1;
651 /* Called when resuming a thread.
652 If the debug regs have changed, update the thread's copies. */
655 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
657 ptid_t ptid
= ptid_of (lwp
);
658 int clear_status
= 0;
660 if (lwp
->arch_private
->debug_registers_changed
)
663 int pid
= ptid_get_pid (ptid
);
664 struct process_info
*proc
= find_process_pid (pid
);
665 struct i386_debug_reg_state
*state
666 = &proc
->private->arch_private
->debug_reg_state
;
668 for (i
= DR_FIRSTADDR
; i
<= DR_LASTADDR
; i
++)
669 if (state
->dr_ref_count
[i
] > 0)
671 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
673 /* If we're setting a watchpoint, any change the inferior
674 had done itself to the debug registers needs to be
675 discarded, otherwise, i386_low_stopped_data_address can
680 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
682 lwp
->arch_private
->debug_registers_changed
= 0;
685 if (clear_status
|| lwp
->stopped_by_watchpoint
)
686 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
689 /* When GDBSERVER is built as a 64-bit application on linux, the
690 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
691 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
692 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
693 conversion in-place ourselves. */
695 /* These types below (compat_*) define a siginfo type that is layout
696 compatible with the siginfo type exported by the 32-bit userspace
701 typedef int compat_int_t
;
702 typedef unsigned int compat_uptr_t
;
704 typedef int compat_time_t
;
705 typedef int compat_timer_t
;
706 typedef int compat_clock_t
;
708 struct compat_timeval
710 compat_time_t tv_sec
;
714 typedef union compat_sigval
716 compat_int_t sival_int
;
717 compat_uptr_t sival_ptr
;
720 typedef struct compat_siginfo
728 int _pad
[((128 / sizeof (int)) - 3)];
737 /* POSIX.1b timers */
742 compat_sigval_t _sigval
;
745 /* POSIX.1b signals */
750 compat_sigval_t _sigval
;
759 compat_clock_t _utime
;
760 compat_clock_t _stime
;
763 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
778 #define cpt_si_pid _sifields._kill._pid
779 #define cpt_si_uid _sifields._kill._uid
780 #define cpt_si_timerid _sifields._timer._tid
781 #define cpt_si_overrun _sifields._timer._overrun
782 #define cpt_si_status _sifields._sigchld._status
783 #define cpt_si_utime _sifields._sigchld._utime
784 #define cpt_si_stime _sifields._sigchld._stime
785 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
786 #define cpt_si_addr _sifields._sigfault._addr
787 #define cpt_si_band _sifields._sigpoll._band
788 #define cpt_si_fd _sifields._sigpoll._fd
790 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
791 In their place is si_timer1,si_timer2. */
793 #define si_timerid si_timer1
796 #define si_overrun si_timer2
800 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
802 memset (to
, 0, sizeof (*to
));
804 to
->si_signo
= from
->si_signo
;
805 to
->si_errno
= from
->si_errno
;
806 to
->si_code
= from
->si_code
;
808 if (to
->si_code
== SI_TIMER
)
810 to
->cpt_si_timerid
= from
->si_timerid
;
811 to
->cpt_si_overrun
= from
->si_overrun
;
812 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
814 else if (to
->si_code
== SI_USER
)
816 to
->cpt_si_pid
= from
->si_pid
;
817 to
->cpt_si_uid
= from
->si_uid
;
819 else if (to
->si_code
< 0)
821 to
->cpt_si_pid
= from
->si_pid
;
822 to
->cpt_si_uid
= from
->si_uid
;
823 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
827 switch (to
->si_signo
)
830 to
->cpt_si_pid
= from
->si_pid
;
831 to
->cpt_si_uid
= from
->si_uid
;
832 to
->cpt_si_status
= from
->si_status
;
833 to
->cpt_si_utime
= from
->si_utime
;
834 to
->cpt_si_stime
= from
->si_stime
;
840 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
843 to
->cpt_si_band
= from
->si_band
;
844 to
->cpt_si_fd
= from
->si_fd
;
847 to
->cpt_si_pid
= from
->si_pid
;
848 to
->cpt_si_uid
= from
->si_uid
;
849 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
856 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
858 memset (to
, 0, sizeof (*to
));
860 to
->si_signo
= from
->si_signo
;
861 to
->si_errno
= from
->si_errno
;
862 to
->si_code
= from
->si_code
;
864 if (to
->si_code
== SI_TIMER
)
866 to
->si_timerid
= from
->cpt_si_timerid
;
867 to
->si_overrun
= from
->cpt_si_overrun
;
868 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
870 else if (to
->si_code
== SI_USER
)
872 to
->si_pid
= from
->cpt_si_pid
;
873 to
->si_uid
= from
->cpt_si_uid
;
875 else if (to
->si_code
< 0)
877 to
->si_pid
= from
->cpt_si_pid
;
878 to
->si_uid
= from
->cpt_si_uid
;
879 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
883 switch (to
->si_signo
)
886 to
->si_pid
= from
->cpt_si_pid
;
887 to
->si_uid
= from
->cpt_si_uid
;
888 to
->si_status
= from
->cpt_si_status
;
889 to
->si_utime
= from
->cpt_si_utime
;
890 to
->si_stime
= from
->cpt_si_stime
;
896 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
899 to
->si_band
= from
->cpt_si_band
;
900 to
->si_fd
= from
->cpt_si_fd
;
903 to
->si_pid
= from
->cpt_si_pid
;
904 to
->si_uid
= from
->cpt_si_uid
;
905 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
911 #endif /* __x86_64__ */
913 /* Convert a native/host siginfo object, into/from the siginfo in the
914 layout of the inferiors' architecture. Returns true if any
915 conversion was done; false otherwise. If DIRECTION is 1, then copy
916 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
920 x86_siginfo_fixup (struct siginfo
*native
, void *inf
, int direction
)
923 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
924 if (register_size (0) == 4)
926 if (sizeof (struct siginfo
) != sizeof (compat_siginfo_t
))
927 fatal ("unexpected difference in siginfo");
930 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
932 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
943 /* Update gdbserver_xmltarget. */
946 x86_linux_update_xmltarget (void)
949 struct regset_info
*regset
;
950 static unsigned long long xcr0
;
951 static int have_ptrace_getregset
= -1;
952 #if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
953 static int have_ptrace_getfpxregs
= -1;
956 if (!current_inferior
)
959 /* Before changing the register cache internal layout or the target
960 regsets, flush the contents of the current valid caches back to
962 regcache_invalidate ();
964 pid
= pid_of (get_thread_lwp (current_inferior
));
966 if (num_xmm_registers
== 8)
967 init_registers_i386_linux ();
969 init_registers_amd64_linux ();
972 # ifdef HAVE_PTRACE_GETFPXREGS
973 if (have_ptrace_getfpxregs
== -1)
975 elf_fpxregset_t fpxregs
;
977 if (ptrace (PTRACE_GETFPXREGS
, pid
, 0, (int) &fpxregs
) < 0)
979 have_ptrace_getfpxregs
= 0;
980 x86_xcr0
= I386_XSTATE_X87_MASK
;
982 /* Disable PTRACE_GETFPXREGS. */
983 for (regset
= target_regsets
;
984 regset
->fill_function
!= NULL
; regset
++)
985 if (regset
->get_request
== PTRACE_GETFPXREGS
)
992 have_ptrace_getfpxregs
= 1;
995 if (!have_ptrace_getfpxregs
)
997 init_registers_i386_mmx_linux ();
1001 init_registers_i386_linux ();
1007 /* Don't use XML. */
1009 if (num_xmm_registers
== 8)
1010 gdbserver_xmltarget
= xmltarget_i386_linux_no_xml
;
1012 gdbserver_xmltarget
= xmltarget_amd64_linux_no_xml
;
1014 gdbserver_xmltarget
= xmltarget_i386_linux_no_xml
;
1017 x86_xcr0
= I386_XSTATE_SSE_MASK
;
1022 /* Check if XSAVE extended state is supported. */
1023 if (have_ptrace_getregset
== -1)
1025 unsigned long long xstateregs
[I386_XSTATE_SSE_SIZE
/ sizeof (long long)];
1028 iov
.iov_base
= xstateregs
;
1029 iov
.iov_len
= sizeof (xstateregs
);
1031 /* Check if PTRACE_GETREGSET works. */
1032 if (ptrace (PTRACE_GETREGSET
, pid
, (unsigned int) NT_X86_XSTATE
,
1035 have_ptrace_getregset
= 0;
1039 have_ptrace_getregset
= 1;
1041 /* Get XCR0 from XSAVE extended state at byte 464. */
1042 xcr0
= xstateregs
[464 / sizeof (long long)];
1044 /* Use PTRACE_GETREGSET if it is available. */
1045 for (regset
= target_regsets
;
1046 regset
->fill_function
!= NULL
; regset
++)
1047 if (regset
->get_request
== PTRACE_GETREGSET
)
1048 regset
->size
= I386_XSTATE_SIZE (xcr0
);
1049 else if (regset
->type
!= GENERAL_REGS
)
1053 if (have_ptrace_getregset
)
1055 /* AVX is the highest feature we support. */
1056 if ((xcr0
& I386_XSTATE_AVX_MASK
) == I386_XSTATE_AVX_MASK
)
1061 /* I386 has 8 xmm regs. */
1062 if (num_xmm_registers
== 8)
1063 init_registers_i386_avx_linux ();
1065 init_registers_amd64_avx_linux ();
1067 init_registers_i386_avx_linux ();
1073 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1074 PTRACE_GETREGSET. */
1077 x86_linux_process_qsupported (const char *query
)
1079 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1080 with "i386" in qSupported query, it supports x86 XML target
1083 if (query
!= NULL
&& strncmp (query
, "xmlRegisters=", 13) == 0)
1085 char *copy
= xstrdup (query
+ 13);
1088 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1090 if (strcmp (p
, "i386") == 0)
1100 x86_linux_update_xmltarget ();
1103 /* Initialize gdbserver for the architecture of the inferior. */
1106 x86_arch_setup (void)
1109 int pid
= pid_of (get_thread_lwp (current_inferior
));
1110 int use_64bit
= linux_pid_exe_is_elf_64_file (pid
);
1114 /* This can only happen if /proc/<pid>/exe is unreadable,
1115 but "that can't happen" if we've gotten this far.
1116 Fall through and assume this is a 32-bit program. */
1120 /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
1121 the_low_target
.num_regs
= -1;
1122 the_low_target
.regmap
= NULL
;
1123 the_low_target
.cannot_fetch_register
= NULL
;
1124 the_low_target
.cannot_store_register
= NULL
;
1126 /* Amd64 has 16 xmm regs. */
1127 num_xmm_registers
= 16;
1129 x86_linux_update_xmltarget ();
1134 /* Ok we have a 32-bit inferior. */
1136 the_low_target
.num_regs
= I386_NUM_REGS
;
1137 the_low_target
.regmap
= i386_regmap
;
1138 the_low_target
.cannot_fetch_register
= i386_cannot_fetch_register
;
1139 the_low_target
.cannot_store_register
= i386_cannot_store_register
;
1141 /* I386 has 8 xmm regs. */
1142 num_xmm_registers
= 8;
1144 x86_linux_update_xmltarget ();
1148 x86_supports_tracepoints (void)
1154 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1156 write_inferior_memory (*to
, buf
, len
);
1161 push_opcode (unsigned char *buf
, char *op
)
1163 unsigned char *buf_org
= buf
;
1168 unsigned long ul
= strtoul (op
, &endptr
, 16);
1177 return buf
- buf_org
;
1182 /* Build a jump pad that saves registers and calls a collection
1183 function. Writes a jump instruction to the jump pad to
1184 JJUMPAD_INSN. The caller is responsible to write it in at the
1185 tracepoint address. */
1188 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1189 CORE_ADDR collector
,
1192 CORE_ADDR
*jump_entry
,
1193 CORE_ADDR
*trampoline
,
1194 ULONGEST
*trampoline_size
,
1195 unsigned char *jjump_pad_insn
,
1196 ULONGEST
*jjump_pad_insn_size
,
1197 CORE_ADDR
*adjusted_insn_addr
,
1198 CORE_ADDR
*adjusted_insn_addr_end
,
1201 unsigned char buf
[40];
1203 CORE_ADDR buildaddr
= *jump_entry
;
1205 /* Build the jump pad. */
1207 /* First, do tracepoint data collection. Save registers. */
1209 /* Need to ensure stack pointer saved first. */
1210 buf
[i
++] = 0x54; /* push %rsp */
1211 buf
[i
++] = 0x55; /* push %rbp */
1212 buf
[i
++] = 0x57; /* push %rdi */
1213 buf
[i
++] = 0x56; /* push %rsi */
1214 buf
[i
++] = 0x52; /* push %rdx */
1215 buf
[i
++] = 0x51; /* push %rcx */
1216 buf
[i
++] = 0x53; /* push %rbx */
1217 buf
[i
++] = 0x50; /* push %rax */
1218 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1219 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1220 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1221 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1222 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1223 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1224 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1225 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1226 buf
[i
++] = 0x9c; /* pushfq */
1227 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1229 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1230 i
+= sizeof (unsigned long);
1231 buf
[i
++] = 0x57; /* push %rdi */
1232 append_insns (&buildaddr
, i
, buf
);
1234 /* Stack space for the collecting_t object. */
1236 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1237 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1238 memcpy (buf
+ i
, &tpoint
, 8);
1240 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1241 i
+= push_opcode (&buf
[i
],
1242 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1243 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1244 append_insns (&buildaddr
, i
, buf
);
1248 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1249 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1251 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1252 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1253 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1254 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1255 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1256 append_insns (&buildaddr
, i
, buf
);
1258 /* Set up the gdb_collect call. */
1259 /* At this point, (stack pointer + 0x18) is the base of our saved
1263 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1264 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1266 /* tpoint address may be 64-bit wide. */
1267 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1268 memcpy (buf
+ i
, &tpoint
, 8);
1270 append_insns (&buildaddr
, i
, buf
);
1272 /* The collector function being in the shared library, may be
1273 >31-bits away off the jump pad. */
1275 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1276 memcpy (buf
+ i
, &collector
, 8);
1278 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1279 append_insns (&buildaddr
, i
, buf
);
1281 /* Clear the spin-lock. */
1283 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1284 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1285 memcpy (buf
+ i
, &lockaddr
, 8);
1287 append_insns (&buildaddr
, i
, buf
);
1289 /* Remove stack that had been used for the collect_t object. */
1291 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1292 append_insns (&buildaddr
, i
, buf
);
1294 /* Restore register state. */
1296 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1300 buf
[i
++] = 0x9d; /* popfq */
1301 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1302 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1303 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1304 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1305 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1306 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1307 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1308 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1309 buf
[i
++] = 0x58; /* pop %rax */
1310 buf
[i
++] = 0x5b; /* pop %rbx */
1311 buf
[i
++] = 0x59; /* pop %rcx */
1312 buf
[i
++] = 0x5a; /* pop %rdx */
1313 buf
[i
++] = 0x5e; /* pop %rsi */
1314 buf
[i
++] = 0x5f; /* pop %rdi */
1315 buf
[i
++] = 0x5d; /* pop %rbp */
1316 buf
[i
++] = 0x5c; /* pop %rsp */
1317 append_insns (&buildaddr
, i
, buf
);
1319 /* Now, adjust the original instruction to execute in the jump
1321 *adjusted_insn_addr
= buildaddr
;
1322 relocate_instruction (&buildaddr
, tpaddr
);
1323 *adjusted_insn_addr_end
= buildaddr
;
1325 /* Finally, write a jump back to the program. */
1326 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1327 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1328 memcpy (buf
+ 1, &offset
, 4);
1329 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1331 /* The jump pad is now built. Wire in a jump to our jump pad. This
1332 is always done last (by our caller actually), so that we can
1333 install fast tracepoints with threads running. This relies on
1334 the agent's atomic write support. */
1335 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1336 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1337 memcpy (buf
+ 1, &offset
, 4);
1338 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1339 *jjump_pad_insn_size
= sizeof (jump_insn
);
1341 /* Return the end address of our pad. */
1342 *jump_entry
= buildaddr
;
1347 #endif /* __x86_64__ */
1349 /* Build a jump pad that saves registers and calls a collection
1350 function. Writes a jump instruction to the jump pad to
1351 JJUMPAD_INSN. The caller is responsible to write it in at the
1352 tracepoint address. */
1355 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1356 CORE_ADDR collector
,
1359 CORE_ADDR
*jump_entry
,
1360 CORE_ADDR
*trampoline
,
1361 ULONGEST
*trampoline_size
,
1362 unsigned char *jjump_pad_insn
,
1363 ULONGEST
*jjump_pad_insn_size
,
1364 CORE_ADDR
*adjusted_insn_addr
,
1365 CORE_ADDR
*adjusted_insn_addr_end
,
1368 unsigned char buf
[0x100];
1370 CORE_ADDR buildaddr
= *jump_entry
;
1372 /* Build the jump pad. */
1374 /* First, do tracepoint data collection. Save registers. */
1376 buf
[i
++] = 0x60; /* pushad */
1377 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1378 *((int *)(buf
+ i
)) = (int) tpaddr
;
1380 buf
[i
++] = 0x9c; /* pushf */
1381 buf
[i
++] = 0x1e; /* push %ds */
1382 buf
[i
++] = 0x06; /* push %es */
1383 buf
[i
++] = 0x0f; /* push %fs */
1385 buf
[i
++] = 0x0f; /* push %gs */
1387 buf
[i
++] = 0x16; /* push %ss */
1388 buf
[i
++] = 0x0e; /* push %cs */
1389 append_insns (&buildaddr
, i
, buf
);
1391 /* Stack space for the collecting_t object. */
1393 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1395 /* Build the object. */
1396 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1397 memcpy (buf
+ i
, &tpoint
, 4);
1399 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1401 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1402 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1403 append_insns (&buildaddr
, i
, buf
);
1405 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1406 If we cared for it, this could be using xchg alternatively. */
1409 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1410 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1412 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1414 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1415 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1416 append_insns (&buildaddr
, i
, buf
);
1419 /* Set up arguments to the gdb_collect call. */
1421 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1422 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1423 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1424 append_insns (&buildaddr
, i
, buf
);
1427 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1428 append_insns (&buildaddr
, i
, buf
);
1431 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1432 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1434 append_insns (&buildaddr
, i
, buf
);
1436 buf
[0] = 0xe8; /* call <reladdr> */
1437 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1438 memcpy (buf
+ 1, &offset
, 4);
1439 append_insns (&buildaddr
, 5, buf
);
1440 /* Clean up after the call. */
1441 buf
[0] = 0x83; /* add $0x8,%esp */
1444 append_insns (&buildaddr
, 3, buf
);
1447 /* Clear the spin-lock. This would need the LOCK prefix on older
1450 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1451 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1452 memcpy (buf
+ i
, &lockaddr
, 4);
1454 append_insns (&buildaddr
, i
, buf
);
1457 /* Remove stack that had been used for the collect_t object. */
1459 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1460 append_insns (&buildaddr
, i
, buf
);
1463 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1466 buf
[i
++] = 0x17; /* pop %ss */
1467 buf
[i
++] = 0x0f; /* pop %gs */
1469 buf
[i
++] = 0x0f; /* pop %fs */
1471 buf
[i
++] = 0x07; /* pop %es */
1472 buf
[i
++] = 0x1f; /* pop %ds */
1473 buf
[i
++] = 0x9d; /* popf */
1474 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1477 buf
[i
++] = 0x61; /* popad */
1478 append_insns (&buildaddr
, i
, buf
);
1480 /* Now, adjust the original instruction to execute in the jump
1482 *adjusted_insn_addr
= buildaddr
;
1483 relocate_instruction (&buildaddr
, tpaddr
);
1484 *adjusted_insn_addr_end
= buildaddr
;
1486 /* Write the jump back to the program. */
1487 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1488 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1489 memcpy (buf
+ 1, &offset
, 4);
1490 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1492 /* The jump pad is now built. Wire in a jump to our jump pad. This
1493 is always done last (by our caller actually), so that we can
1494 install fast tracepoints with threads running. This relies on
1495 the agent's atomic write support. */
1498 /* Create a trampoline. */
1499 *trampoline_size
= sizeof (jump_insn
);
1500 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1502 /* No trampoline space available. */
1504 "E.Cannot allocate trampoline space needed for fast "
1505 "tracepoints on 4-byte instructions.");
1509 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1510 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1511 memcpy (buf
+ 1, &offset
, 4);
1512 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1514 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1515 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1516 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1517 memcpy (buf
+ 2, &offset
, 2);
1518 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1519 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1523 /* Else use a 32-bit relative jump instruction. */
1524 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1525 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1526 memcpy (buf
+ 1, &offset
, 4);
1527 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1528 *jjump_pad_insn_size
= sizeof (jump_insn
);
1531 /* Return the end address of our pad. */
1532 *jump_entry
= buildaddr
;
1538 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1539 CORE_ADDR collector
,
1542 CORE_ADDR
*jump_entry
,
1543 CORE_ADDR
*trampoline
,
1544 ULONGEST
*trampoline_size
,
1545 unsigned char *jjump_pad_insn
,
1546 ULONGEST
*jjump_pad_insn_size
,
1547 CORE_ADDR
*adjusted_insn_addr
,
1548 CORE_ADDR
*adjusted_insn_addr_end
,
1552 if (register_size (0) == 8)
1553 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1554 collector
, lockaddr
,
1555 orig_size
, jump_entry
,
1556 trampoline
, trampoline_size
,
1558 jjump_pad_insn_size
,
1560 adjusted_insn_addr_end
,
1564 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1565 collector
, lockaddr
,
1566 orig_size
, jump_entry
,
1567 trampoline
, trampoline_size
,
1569 jjump_pad_insn_size
,
1571 adjusted_insn_addr_end
,
1575 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1579 x86_get_min_fast_tracepoint_insn_len (void)
1581 static int warned_about_fast_tracepoints
= 0;
1584 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1585 used for fast tracepoints. */
1586 if (register_size (0) == 8)
1590 if (agent_loaded_p ())
1592 char errbuf
[IPA_BUFSIZ
];
1596 /* On x86, if trampolines are available, then 4-byte jump instructions
1597 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1598 with a 4-byte offset are used instead. */
1599 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1603 /* GDB has no channel to explain to user why a shorter fast
1604 tracepoint is not possible, but at least make GDBserver
1605 mention that something has gone awry. */
1606 if (!warned_about_fast_tracepoints
)
1608 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
1609 warned_about_fast_tracepoints
= 1;
1616 /* Indicate that the minimum length is currently unknown since the IPA
1617 has not loaded yet. */
1623 add_insns (unsigned char *start
, int len
)
1625 CORE_ADDR buildaddr
= current_insn_ptr
;
1628 fprintf (stderr
, "Adding %d bytes of insn at %s\n",
1629 len
, paddress (buildaddr
));
1631 append_insns (&buildaddr
, len
, start
);
1632 current_insn_ptr
= buildaddr
;
1635 /* Our general strategy for emitting code is to avoid specifying raw
1636 bytes whenever possible, and instead copy a block of inline asm
1637 that is embedded in the function. This is a little messy, because
1638 we need to keep the compiler from discarding what looks like dead
1639 code, plus suppress various warnings. */
1641 #define EMIT_ASM(NAME, INSNS) \
1644 extern unsigned char start_ ## NAME, end_ ## NAME; \
1645 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1646 __asm__ ("jmp end_" #NAME "\n" \
1647 "\t" "start_" #NAME ":" \
1649 "\t" "end_" #NAME ":"); \
1654 #define EMIT_ASM32(NAME,INSNS) \
1657 extern unsigned char start_ ## NAME, end_ ## NAME; \
1658 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1659 __asm__ (".code32\n" \
1660 "\t" "jmp end_" #NAME "\n" \
1661 "\t" "start_" #NAME ":\n" \
1663 "\t" "end_" #NAME ":\n" \
1669 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1676 amd64_emit_prologue (void)
1678 EMIT_ASM (amd64_prologue
,
1680 "movq %rsp,%rbp\n\t"
1681 "sub $0x20,%rsp\n\t"
1682 "movq %rdi,-8(%rbp)\n\t"
1683 "movq %rsi,-16(%rbp)");
1688 amd64_emit_epilogue (void)
1690 EMIT_ASM (amd64_epilogue
,
1691 "movq -16(%rbp),%rdi\n\t"
1692 "movq %rax,(%rdi)\n\t"
1699 amd64_emit_add (void)
1701 EMIT_ASM (amd64_add
,
1702 "add (%rsp),%rax\n\t"
1703 "lea 0x8(%rsp),%rsp");
1707 amd64_emit_sub (void)
1709 EMIT_ASM (amd64_sub
,
1710 "sub %rax,(%rsp)\n\t"
1715 amd64_emit_mul (void)
1721 amd64_emit_lsh (void)
1727 amd64_emit_rsh_signed (void)
1733 amd64_emit_rsh_unsigned (void)
1739 amd64_emit_ext (int arg
)
1744 EMIT_ASM (amd64_ext_8
,
1750 EMIT_ASM (amd64_ext_16
,
1755 EMIT_ASM (amd64_ext_32
,
1764 amd64_emit_log_not (void)
1766 EMIT_ASM (amd64_log_not
,
1767 "test %rax,%rax\n\t"
1773 amd64_emit_bit_and (void)
1775 EMIT_ASM (amd64_and
,
1776 "and (%rsp),%rax\n\t"
1777 "lea 0x8(%rsp),%rsp");
1781 amd64_emit_bit_or (void)
1784 "or (%rsp),%rax\n\t"
1785 "lea 0x8(%rsp),%rsp");
1789 amd64_emit_bit_xor (void)
1791 EMIT_ASM (amd64_xor
,
1792 "xor (%rsp),%rax\n\t"
1793 "lea 0x8(%rsp),%rsp");
1797 amd64_emit_bit_not (void)
1799 EMIT_ASM (amd64_bit_not
,
1800 "xorq $0xffffffffffffffff,%rax");
1804 amd64_emit_equal (void)
1806 EMIT_ASM (amd64_equal
,
1807 "cmp %rax,(%rsp)\n\t"
1808 "je .Lamd64_equal_true\n\t"
1810 "jmp .Lamd64_equal_end\n\t"
1811 ".Lamd64_equal_true:\n\t"
1813 ".Lamd64_equal_end:\n\t"
1814 "lea 0x8(%rsp),%rsp");
1818 amd64_emit_less_signed (void)
1820 EMIT_ASM (amd64_less_signed
,
1821 "cmp %rax,(%rsp)\n\t"
1822 "jl .Lamd64_less_signed_true\n\t"
1824 "jmp .Lamd64_less_signed_end\n\t"
1825 ".Lamd64_less_signed_true:\n\t"
1827 ".Lamd64_less_signed_end:\n\t"
1828 "lea 0x8(%rsp),%rsp");
1832 amd64_emit_less_unsigned (void)
1834 EMIT_ASM (amd64_less_unsigned
,
1835 "cmp %rax,(%rsp)\n\t"
1836 "jb .Lamd64_less_unsigned_true\n\t"
1838 "jmp .Lamd64_less_unsigned_end\n\t"
1839 ".Lamd64_less_unsigned_true:\n\t"
1841 ".Lamd64_less_unsigned_end:\n\t"
1842 "lea 0x8(%rsp),%rsp");
1846 amd64_emit_ref (int size
)
1851 EMIT_ASM (amd64_ref1
,
1855 EMIT_ASM (amd64_ref2
,
1859 EMIT_ASM (amd64_ref4
,
1860 "movl (%rax),%eax");
1863 EMIT_ASM (amd64_ref8
,
1864 "movq (%rax),%rax");
1870 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1872 EMIT_ASM (amd64_if_goto
,
1876 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1884 amd64_emit_goto (int *offset_p
, int *size_p
)
1886 EMIT_ASM (amd64_goto
,
1887 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1895 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1897 int diff
= (to
- (from
+ size
));
1898 unsigned char buf
[sizeof (int)];
1906 memcpy (buf
, &diff
, sizeof (int));
1907 write_inferior_memory (from
, buf
, sizeof (int));
1911 amd64_emit_const (LONGEST num
)
1913 unsigned char buf
[16];
1915 CORE_ADDR buildaddr
= current_insn_ptr
;
1918 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1919 memcpy (&buf
[i
], &num
, sizeof (num
));
1921 append_insns (&buildaddr
, i
, buf
);
1922 current_insn_ptr
= buildaddr
;
1926 amd64_emit_call (CORE_ADDR fn
)
1928 unsigned char buf
[16];
1930 CORE_ADDR buildaddr
;
1933 /* The destination function being in the shared library, may be
1934 >31-bits away off the compiled code pad. */
1936 buildaddr
= current_insn_ptr
;
1938 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1942 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1944 /* Offset is too large for a call. Use callq, but that requires
1945 a register, so avoid it if possible. Use r10, since it is
1946 call-clobbered, we don't have to push/pop it. */
1947 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1949 memcpy (buf
+ i
, &fn
, 8);
1951 buf
[i
++] = 0xff; /* callq *%r10 */
1956 int offset32
= offset64
; /* we know we can't overflow here. */
1957 memcpy (buf
+ i
, &offset32
, 4);
1961 append_insns (&buildaddr
, i
, buf
);
1962 current_insn_ptr
= buildaddr
;
1966 amd64_emit_reg (int reg
)
1968 unsigned char buf
[16];
1970 CORE_ADDR buildaddr
;
1972 /* Assume raw_regs is still in %rdi. */
1973 buildaddr
= current_insn_ptr
;
1975 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1976 memcpy (&buf
[i
], ®
, sizeof (reg
));
1978 append_insns (&buildaddr
, i
, buf
);
1979 current_insn_ptr
= buildaddr
;
1980 amd64_emit_call (get_raw_reg_func_addr ());
1984 amd64_emit_pop (void)
1986 EMIT_ASM (amd64_pop
,
1991 amd64_emit_stack_flush (void)
1993 EMIT_ASM (amd64_stack_flush
,
1998 amd64_emit_zero_ext (int arg
)
2003 EMIT_ASM (amd64_zero_ext_8
,
2007 EMIT_ASM (amd64_zero_ext_16
,
2008 "and $0xffff,%rax");
2011 EMIT_ASM (amd64_zero_ext_32
,
2012 "mov $0xffffffff,%rcx\n\t"
2021 amd64_emit_swap (void)
2023 EMIT_ASM (amd64_swap
,
2030 amd64_emit_stack_adjust (int n
)
2032 unsigned char buf
[16];
2034 CORE_ADDR buildaddr
= current_insn_ptr
;
2037 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2041 /* This only handles adjustments up to 16, but we don't expect any more. */
2043 append_insns (&buildaddr
, i
, buf
);
2044 current_insn_ptr
= buildaddr
;
2047 /* FN's prototype is `LONGEST(*fn)(int)'. */
2050 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2052 unsigned char buf
[16];
2054 CORE_ADDR buildaddr
;
2056 buildaddr
= current_insn_ptr
;
2058 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2059 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2061 append_insns (&buildaddr
, i
, buf
);
2062 current_insn_ptr
= buildaddr
;
2063 amd64_emit_call (fn
);
2066 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2069 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2071 unsigned char buf
[16];
2073 CORE_ADDR buildaddr
;
2075 buildaddr
= current_insn_ptr
;
2077 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2078 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2080 append_insns (&buildaddr
, i
, buf
);
2081 current_insn_ptr
= buildaddr
;
2082 EMIT_ASM (amd64_void_call_2_a
,
2083 /* Save away a copy of the stack top. */
2085 /* Also pass top as the second argument. */
2087 amd64_emit_call (fn
);
2088 EMIT_ASM (amd64_void_call_2_b
,
2089 /* Restore the stack top, %rax may have been trashed. */
2094 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2097 "cmp %rax,(%rsp)\n\t"
2098 "jne .Lamd64_eq_fallthru\n\t"
2099 "lea 0x8(%rsp),%rsp\n\t"
2101 /* jmp, but don't trust the assembler to choose the right jump */
2102 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2103 ".Lamd64_eq_fallthru:\n\t"
2104 "lea 0x8(%rsp),%rsp\n\t"
2114 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2117 "cmp %rax,(%rsp)\n\t"
2118 "je .Lamd64_ne_fallthru\n\t"
2119 "lea 0x8(%rsp),%rsp\n\t"
2121 /* jmp, but don't trust the assembler to choose the right jump */
2122 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2123 ".Lamd64_ne_fallthru:\n\t"
2124 "lea 0x8(%rsp),%rsp\n\t"
2134 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2137 "cmp %rax,(%rsp)\n\t"
2138 "jnl .Lamd64_lt_fallthru\n\t"
2139 "lea 0x8(%rsp),%rsp\n\t"
2141 /* jmp, but don't trust the assembler to choose the right jump */
2142 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2143 ".Lamd64_lt_fallthru:\n\t"
2144 "lea 0x8(%rsp),%rsp\n\t"
2154 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2157 "cmp %rax,(%rsp)\n\t"
2158 "jnle .Lamd64_le_fallthru\n\t"
2159 "lea 0x8(%rsp),%rsp\n\t"
2161 /* jmp, but don't trust the assembler to choose the right jump */
2162 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2163 ".Lamd64_le_fallthru:\n\t"
2164 "lea 0x8(%rsp),%rsp\n\t"
2174 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2177 "cmp %rax,(%rsp)\n\t"
2178 "jng .Lamd64_gt_fallthru\n\t"
2179 "lea 0x8(%rsp),%rsp\n\t"
2181 /* jmp, but don't trust the assembler to choose the right jump */
2182 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2183 ".Lamd64_gt_fallthru:\n\t"
2184 "lea 0x8(%rsp),%rsp\n\t"
2194 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2197 "cmp %rax,(%rsp)\n\t"
2198 "jnge .Lamd64_ge_fallthru\n\t"
2199 ".Lamd64_ge_jump:\n\t"
2200 "lea 0x8(%rsp),%rsp\n\t"
2202 /* jmp, but don't trust the assembler to choose the right jump */
2203 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2204 ".Lamd64_ge_fallthru:\n\t"
2205 "lea 0x8(%rsp),%rsp\n\t"
2214 struct emit_ops amd64_emit_ops
=
2216 amd64_emit_prologue
,
2217 amd64_emit_epilogue
,
2222 amd64_emit_rsh_signed
,
2223 amd64_emit_rsh_unsigned
,
2231 amd64_emit_less_signed
,
2232 amd64_emit_less_unsigned
,
2236 amd64_write_goto_address
,
2241 amd64_emit_stack_flush
,
2242 amd64_emit_zero_ext
,
2244 amd64_emit_stack_adjust
,
2245 amd64_emit_int_call_1
,
2246 amd64_emit_void_call_2
,
2255 #endif /* __x86_64__ */
2258 i386_emit_prologue (void)
2260 EMIT_ASM32 (i386_prologue
,
2264 /* At this point, the raw regs base address is at 8(%ebp), and the
2265 value pointer is at 12(%ebp). */
2269 i386_emit_epilogue (void)
2271 EMIT_ASM32 (i386_epilogue
,
2272 "mov 12(%ebp),%ecx\n\t"
2273 "mov %eax,(%ecx)\n\t"
2274 "mov %ebx,0x4(%ecx)\n\t"
2282 i386_emit_add (void)
2284 EMIT_ASM32 (i386_add
,
2285 "add (%esp),%eax\n\t"
2286 "adc 0x4(%esp),%ebx\n\t"
2287 "lea 0x8(%esp),%esp");
2291 i386_emit_sub (void)
2293 EMIT_ASM32 (i386_sub
,
2294 "subl %eax,(%esp)\n\t"
2295 "sbbl %ebx,4(%esp)\n\t"
2301 i386_emit_mul (void)
2307 i386_emit_lsh (void)
2313 i386_emit_rsh_signed (void)
2319 i386_emit_rsh_unsigned (void)
2325 i386_emit_ext (int arg
)
2330 EMIT_ASM32 (i386_ext_8
,
2333 "movl %eax,%ebx\n\t"
2337 EMIT_ASM32 (i386_ext_16
,
2339 "movl %eax,%ebx\n\t"
2343 EMIT_ASM32 (i386_ext_32
,
2344 "movl %eax,%ebx\n\t"
2353 i386_emit_log_not (void)
2355 EMIT_ASM32 (i386_log_not
,
2357 "test %eax,%eax\n\t"
2364 i386_emit_bit_and (void)
2366 EMIT_ASM32 (i386_and
,
2367 "and (%esp),%eax\n\t"
2368 "and 0x4(%esp),%ebx\n\t"
2369 "lea 0x8(%esp),%esp");
2373 i386_emit_bit_or (void)
2375 EMIT_ASM32 (i386_or
,
2376 "or (%esp),%eax\n\t"
2377 "or 0x4(%esp),%ebx\n\t"
2378 "lea 0x8(%esp),%esp");
2382 i386_emit_bit_xor (void)
2384 EMIT_ASM32 (i386_xor
,
2385 "xor (%esp),%eax\n\t"
2386 "xor 0x4(%esp),%ebx\n\t"
2387 "lea 0x8(%esp),%esp");
2391 i386_emit_bit_not (void)
2393 EMIT_ASM32 (i386_bit_not
,
2394 "xor $0xffffffff,%eax\n\t"
2395 "xor $0xffffffff,%ebx\n\t");
2399 i386_emit_equal (void)
2401 EMIT_ASM32 (i386_equal
,
2402 "cmpl %ebx,4(%esp)\n\t"
2403 "jne .Li386_equal_false\n\t"
2404 "cmpl %eax,(%esp)\n\t"
2405 "je .Li386_equal_true\n\t"
2406 ".Li386_equal_false:\n\t"
2408 "jmp .Li386_equal_end\n\t"
2409 ".Li386_equal_true:\n\t"
2411 ".Li386_equal_end:\n\t"
2413 "lea 0x8(%esp),%esp");
2417 i386_emit_less_signed (void)
2419 EMIT_ASM32 (i386_less_signed
,
2420 "cmpl %ebx,4(%esp)\n\t"
2421 "jl .Li386_less_signed_true\n\t"
2422 "jne .Li386_less_signed_false\n\t"
2423 "cmpl %eax,(%esp)\n\t"
2424 "jl .Li386_less_signed_true\n\t"
2425 ".Li386_less_signed_false:\n\t"
2427 "jmp .Li386_less_signed_end\n\t"
2428 ".Li386_less_signed_true:\n\t"
2430 ".Li386_less_signed_end:\n\t"
2432 "lea 0x8(%esp),%esp");
2436 i386_emit_less_unsigned (void)
2438 EMIT_ASM32 (i386_less_unsigned
,
2439 "cmpl %ebx,4(%esp)\n\t"
2440 "jb .Li386_less_unsigned_true\n\t"
2441 "jne .Li386_less_unsigned_false\n\t"
2442 "cmpl %eax,(%esp)\n\t"
2443 "jb .Li386_less_unsigned_true\n\t"
2444 ".Li386_less_unsigned_false:\n\t"
2446 "jmp .Li386_less_unsigned_end\n\t"
2447 ".Li386_less_unsigned_true:\n\t"
2449 ".Li386_less_unsigned_end:\n\t"
2451 "lea 0x8(%esp),%esp");
2455 i386_emit_ref (int size
)
2460 EMIT_ASM32 (i386_ref1
,
2464 EMIT_ASM32 (i386_ref2
,
2468 EMIT_ASM32 (i386_ref4
,
2469 "movl (%eax),%eax");
2472 EMIT_ASM32 (i386_ref8
,
2473 "movl 4(%eax),%ebx\n\t"
2474 "movl (%eax),%eax");
2480 i386_emit_if_goto (int *offset_p
, int *size_p
)
2482 EMIT_ASM32 (i386_if_goto
,
2488 /* Don't trust the assembler to choose the right jump */
2489 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2492 *offset_p
= 11; /* be sure that this matches the sequence above */
2498 i386_emit_goto (int *offset_p
, int *size_p
)
2500 EMIT_ASM32 (i386_goto
,
2501 /* Don't trust the assembler to choose the right jump */
2502 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2510 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2512 int diff
= (to
- (from
+ size
));
2513 unsigned char buf
[sizeof (int)];
2515 /* We're only doing 4-byte sizes at the moment. */
2522 memcpy (buf
, &diff
, sizeof (int));
2523 write_inferior_memory (from
, buf
, sizeof (int));
2527 i386_emit_const (LONGEST num
)
2529 unsigned char buf
[16];
2531 CORE_ADDR buildaddr
= current_insn_ptr
;
2534 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2535 lo
= num
& 0xffffffff;
2536 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2538 hi
= ((num
>> 32) & 0xffffffff);
2541 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2542 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2547 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2549 append_insns (&buildaddr
, i
, buf
);
2550 current_insn_ptr
= buildaddr
;
2554 i386_emit_call (CORE_ADDR fn
)
2556 unsigned char buf
[16];
2558 CORE_ADDR buildaddr
;
2560 buildaddr
= current_insn_ptr
;
2562 buf
[i
++] = 0xe8; /* call <reladdr> */
2563 offset
= ((int) fn
) - (buildaddr
+ 5);
2564 memcpy (buf
+ 1, &offset
, 4);
2565 append_insns (&buildaddr
, 5, buf
);
2566 current_insn_ptr
= buildaddr
;
2570 i386_emit_reg (int reg
)
2572 unsigned char buf
[16];
2574 CORE_ADDR buildaddr
;
2576 EMIT_ASM32 (i386_reg_a
,
2578 buildaddr
= current_insn_ptr
;
2580 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2581 memcpy (&buf
[i
], ®
, sizeof (reg
));
2583 append_insns (&buildaddr
, i
, buf
);
2584 current_insn_ptr
= buildaddr
;
2585 EMIT_ASM32 (i386_reg_b
,
2586 "mov %eax,4(%esp)\n\t"
2587 "mov 8(%ebp),%eax\n\t"
2589 i386_emit_call (get_raw_reg_func_addr ());
2590 EMIT_ASM32 (i386_reg_c
,
2592 "lea 0x8(%esp),%esp");
2596 i386_emit_pop (void)
2598 EMIT_ASM32 (i386_pop
,
2604 i386_emit_stack_flush (void)
2606 EMIT_ASM32 (i386_stack_flush
,
2612 i386_emit_zero_ext (int arg
)
2617 EMIT_ASM32 (i386_zero_ext_8
,
2618 "and $0xff,%eax\n\t"
2622 EMIT_ASM32 (i386_zero_ext_16
,
2623 "and $0xffff,%eax\n\t"
2627 EMIT_ASM32 (i386_zero_ext_32
,
2636 i386_emit_swap (void)
2638 EMIT_ASM32 (i386_swap
,
2648 i386_emit_stack_adjust (int n
)
2650 unsigned char buf
[16];
2652 CORE_ADDR buildaddr
= current_insn_ptr
;
2655 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2659 append_insns (&buildaddr
, i
, buf
);
2660 current_insn_ptr
= buildaddr
;
2663 /* FN's prototype is `LONGEST(*fn)(int)'. */
2666 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2668 unsigned char buf
[16];
2670 CORE_ADDR buildaddr
;
2672 EMIT_ASM32 (i386_int_call_1_a
,
2673 /* Reserve a bit of stack space. */
2675 /* Put the one argument on the stack. */
2676 buildaddr
= current_insn_ptr
;
2678 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2681 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2683 append_insns (&buildaddr
, i
, buf
);
2684 current_insn_ptr
= buildaddr
;
2685 i386_emit_call (fn
);
2686 EMIT_ASM32 (i386_int_call_1_c
,
2688 "lea 0x8(%esp),%esp");
2691 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2694 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2696 unsigned char buf
[16];
2698 CORE_ADDR buildaddr
;
2700 EMIT_ASM32 (i386_void_call_2_a
,
2701 /* Preserve %eax only; we don't have to worry about %ebx. */
2703 /* Reserve a bit of stack space for arguments. */
2704 "sub $0x10,%esp\n\t"
2705 /* Copy "top" to the second argument position. (Note that
2706 we can't assume function won't scribble on its
2707 arguments, so don't try to restore from this.) */
2708 "mov %eax,4(%esp)\n\t"
2709 "mov %ebx,8(%esp)");
2710 /* Put the first argument on the stack. */
2711 buildaddr
= current_insn_ptr
;
2713 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2716 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2718 append_insns (&buildaddr
, i
, buf
);
2719 current_insn_ptr
= buildaddr
;
2720 i386_emit_call (fn
);
2721 EMIT_ASM32 (i386_void_call_2_b
,
2722 "lea 0x10(%esp),%esp\n\t"
2723 /* Restore original stack top. */
2729 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2732 /* Check low half first, more likely to be decider */
2733 "cmpl %eax,(%esp)\n\t"
2734 "jne .Leq_fallthru\n\t"
2735 "cmpl %ebx,4(%esp)\n\t"
2736 "jne .Leq_fallthru\n\t"
2737 "lea 0x8(%esp),%esp\n\t"
2740 /* jmp, but don't trust the assembler to choose the right jump */
2741 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2742 ".Leq_fallthru:\n\t"
2743 "lea 0x8(%esp),%esp\n\t"
2754 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2757 /* Check low half first, more likely to be decider */
2758 "cmpl %eax,(%esp)\n\t"
2760 "cmpl %ebx,4(%esp)\n\t"
2761 "je .Lne_fallthru\n\t"
2763 "lea 0x8(%esp),%esp\n\t"
2766 /* jmp, but don't trust the assembler to choose the right jump */
2767 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2768 ".Lne_fallthru:\n\t"
2769 "lea 0x8(%esp),%esp\n\t"
2780 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2783 "cmpl %ebx,4(%esp)\n\t"
2785 "jne .Llt_fallthru\n\t"
2786 "cmpl %eax,(%esp)\n\t"
2787 "jnl .Llt_fallthru\n\t"
2789 "lea 0x8(%esp),%esp\n\t"
2792 /* jmp, but don't trust the assembler to choose the right jump */
2793 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2794 ".Llt_fallthru:\n\t"
2795 "lea 0x8(%esp),%esp\n\t"
2806 i386_emit_le_goto (int *offset_p
, int *size_p
)
2809 "cmpl %ebx,4(%esp)\n\t"
2811 "jne .Lle_fallthru\n\t"
2812 "cmpl %eax,(%esp)\n\t"
2813 "jnle .Lle_fallthru\n\t"
2815 "lea 0x8(%esp),%esp\n\t"
2818 /* jmp, but don't trust the assembler to choose the right jump */
2819 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2820 ".Lle_fallthru:\n\t"
2821 "lea 0x8(%esp),%esp\n\t"
2832 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2835 "cmpl %ebx,4(%esp)\n\t"
2837 "jne .Lgt_fallthru\n\t"
2838 "cmpl %eax,(%esp)\n\t"
2839 "jng .Lgt_fallthru\n\t"
2841 "lea 0x8(%esp),%esp\n\t"
2844 /* jmp, but don't trust the assembler to choose the right jump */
2845 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2846 ".Lgt_fallthru:\n\t"
2847 "lea 0x8(%esp),%esp\n\t"
2858 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2861 "cmpl %ebx,4(%esp)\n\t"
2863 "jne .Lge_fallthru\n\t"
2864 "cmpl %eax,(%esp)\n\t"
2865 "jnge .Lge_fallthru\n\t"
2867 "lea 0x8(%esp),%esp\n\t"
2870 /* jmp, but don't trust the assembler to choose the right jump */
2871 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2872 ".Lge_fallthru:\n\t"
2873 "lea 0x8(%esp),%esp\n\t"
2883 struct emit_ops i386_emit_ops
=
2891 i386_emit_rsh_signed
,
2892 i386_emit_rsh_unsigned
,
2900 i386_emit_less_signed
,
2901 i386_emit_less_unsigned
,
2905 i386_write_goto_address
,
2910 i386_emit_stack_flush
,
2913 i386_emit_stack_adjust
,
2914 i386_emit_int_call_1
,
2915 i386_emit_void_call_2
,
2925 static struct emit_ops
*
2929 int use_64bit
= register_size (0) == 8;
2932 return &amd64_emit_ops
;
2935 return &i386_emit_ops
;
2938 /* This is initialized assuming an amd64 target.
2939 x86_arch_setup will correct it for i386 or amd64 targets. */
2941 struct linux_target_ops the_low_target
=
2958 x86_stopped_by_watchpoint
,
2959 x86_stopped_data_address
,
2960 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2961 native i386 case (no registers smaller than an xfer unit), and are not
2962 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2965 /* need to fix up i386 siginfo if host is amd64 */
2967 x86_linux_new_process
,
2968 x86_linux_new_thread
,
2969 x86_linux_prepare_to_resume
,
2970 x86_linux_process_qsupported
,
2971 x86_supports_tracepoints
,
2972 x86_get_thread_area
,
2973 x86_install_fast_tracepoint_jump_pad
,
2975 x86_get_min_fast_tracepoint_insn_len
,