1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2014 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "linux-low.h"
28 #include "i386-xstate.h"
30 #include "gdb_proc_service.h"
31 /* Don't include elf/common.h if linux/elf.h got included by
32 gdb_proc_service.h. */
34 #include "elf/common.h"
39 #include "tracepoint.h"
43 /* Defined in auto-generated file amd64-linux.c. */
44 void init_registers_amd64_linux (void);
45 extern const struct target_desc
*tdesc_amd64_linux
;
47 /* Defined in auto-generated file amd64-avx-linux.c. */
48 void init_registers_amd64_avx_linux (void);
49 extern const struct target_desc
*tdesc_amd64_avx_linux
;
51 /* Defined in auto-generated file amd64-mpx-linux.c. */
52 void init_registers_amd64_mpx_linux (void);
53 extern const struct target_desc
*tdesc_amd64_mpx_linux
;
55 /* Defined in auto-generated file x32-linux.c. */
56 void init_registers_x32_linux (void);
57 extern const struct target_desc
*tdesc_x32_linux
;
59 /* Defined in auto-generated file x32-avx-linux.c. */
60 void init_registers_x32_avx_linux (void);
61 extern const struct target_desc
*tdesc_x32_avx_linux
;
65 /* Defined in auto-generated file i386-linux.c. */
66 void init_registers_i386_linux (void);
67 extern const struct target_desc
*tdesc_i386_linux
;
69 /* Defined in auto-generated file i386-mmx-linux.c. */
70 void init_registers_i386_mmx_linux (void);
71 extern const struct target_desc
*tdesc_i386_mmx_linux
;
73 /* Defined in auto-generated file i386-avx-linux.c. */
74 void init_registers_i386_avx_linux (void);
75 extern const struct target_desc
*tdesc_i386_avx_linux
;
77 /* Defined in auto-generated file i386-mpx-linux.c. */
78 void init_registers_i386_mpx_linux (void);
79 extern const struct target_desc
*tdesc_i386_mpx_linux
;
82 static struct target_desc
*tdesc_amd64_linux_no_xml
;
84 static struct target_desc
*tdesc_i386_linux_no_xml
;
87 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
88 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
90 /* Backward compatibility for gdb without XML support. */
92 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
93 <architecture>i386</architecture>\
94 <osabi>GNU/Linux</osabi>\
98 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
99 <architecture>i386:x86-64</architecture>\
100 <osabi>GNU/Linux</osabi>\
105 #include <sys/procfs.h>
106 #include <sys/ptrace.h>
109 #ifndef PTRACE_GETREGSET
110 #define PTRACE_GETREGSET 0x4204
113 #ifndef PTRACE_SETREGSET
114 #define PTRACE_SETREGSET 0x4205
118 #ifndef PTRACE_GET_THREAD_AREA
119 #define PTRACE_GET_THREAD_AREA 25
122 /* This definition comes from prctl.h, but some kernels may not have it. */
123 #ifndef PTRACE_ARCH_PRCTL
124 #define PTRACE_ARCH_PRCTL 30
127 /* The following definitions come from prctl.h, but may be absent
128 for certain configurations. */
130 #define ARCH_SET_GS 0x1001
131 #define ARCH_SET_FS 0x1002
132 #define ARCH_GET_FS 0x1003
133 #define ARCH_GET_GS 0x1004
136 /* Per-process arch-specific data we want to keep. */
138 struct arch_process_info
140 struct i386_debug_reg_state debug_reg_state
;
143 /* Per-thread arch-specific data we want to keep. */
147 /* Non-zero if our copy differs from what's recorded in the thread. */
148 int debug_registers_changed
;
153 /* Mapping between the general-purpose registers in `struct user'
154 format and GDB's register array layout.
155 Note that the transfer layout uses 64-bit regs. */
156 static /*const*/ int i386_regmap
[] =
158 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
159 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
160 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
161 DS
* 8, ES
* 8, FS
* 8, GS
* 8
164 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
166 /* So code below doesn't have to care, i386 or amd64. */
167 #define ORIG_EAX ORIG_RAX
169 static const int x86_64_regmap
[] =
171 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
172 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
173 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
174 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
175 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
176 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
177 -1, -1, -1, -1, -1, -1, -1, -1,
178 -1, -1, -1, -1, -1, -1, -1, -1,
179 -1, -1, -1, -1, -1, -1, -1, -1,
181 -1, -1, -1, -1, -1, -1, -1, -1,
183 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
184 -1, -1 /* MPX registers BNDCFGU, BNDSTATUS. */
187 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
189 #else /* ! __x86_64__ */
191 /* Mapping between the general-purpose registers in `struct user'
192 format and GDB's register array layout. */
193 static /*const*/ int i386_regmap
[] =
195 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
196 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
197 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
198 DS
* 4, ES
* 4, FS
* 4, GS
* 4
201 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
207 /* Returns true if the current inferior belongs to a x86-64 process,
211 is_64bit_tdesc (void)
213 struct regcache
*regcache
= get_thread_regcache (current_inferior
, 0);
215 return register_size (regcache
->tdesc
, 0) == 8;
221 /* Called by libthread_db. */
224 ps_get_thread_area (const struct ps_prochandle
*ph
,
225 lwpid_t lwpid
, int idx
, void **base
)
228 int use_64bit
= is_64bit_tdesc ();
235 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
239 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
250 unsigned int desc
[4];
252 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
253 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
256 /* Ensure we properly extend the value to 64-bits for x86_64. */
257 *base
= (void *) (uintptr_t) desc
[1];
262 /* Get the thread area address. This is used to recognize which
263 thread is which when tracing with the in-process agent library. We
264 don't read anything from the address, and treat it as opaque; it's
265 the address itself that we assume is unique per-thread. */
268 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
271 int use_64bit
= is_64bit_tdesc ();
276 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
278 *addr
= (CORE_ADDR
) (uintptr_t) base
;
287 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
288 struct thread_info
*thr
= get_lwp_thread (lwp
);
289 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
290 unsigned int desc
[4];
292 const int reg_thread_area
= 3; /* bits to scale down register value. */
295 collect_register_by_name (regcache
, "gs", &gs
);
297 idx
= gs
>> reg_thread_area
;
299 if (ptrace (PTRACE_GET_THREAD_AREA
,
301 (void *) (long) idx
, (unsigned long) &desc
) < 0)
312 x86_cannot_store_register (int regno
)
315 if (is_64bit_tdesc ())
319 return regno
>= I386_NUM_REGS
;
323 x86_cannot_fetch_register (int regno
)
326 if (is_64bit_tdesc ())
330 return regno
>= I386_NUM_REGS
;
334 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
339 if (register_size (regcache
->tdesc
, 0) == 8)
341 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
342 if (x86_64_regmap
[i
] != -1)
343 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
348 for (i
= 0; i
< I386_NUM_REGS
; i
++)
349 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
351 collect_register_by_name (regcache
, "orig_eax",
352 ((char *) buf
) + ORIG_EAX
* 4);
356 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
361 if (register_size (regcache
->tdesc
, 0) == 8)
363 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
364 if (x86_64_regmap
[i
] != -1)
365 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
370 for (i
= 0; i
< I386_NUM_REGS
; i
++)
371 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
373 supply_register_by_name (regcache
, "orig_eax",
374 ((char *) buf
) + ORIG_EAX
* 4);
378 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
381 i387_cache_to_fxsave (regcache
, buf
);
383 i387_cache_to_fsave (regcache
, buf
);
388 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
391 i387_fxsave_to_cache (regcache
, buf
);
393 i387_fsave_to_cache (regcache
, buf
);
400 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
402 i387_cache_to_fxsave (regcache
, buf
);
406 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
408 i387_fxsave_to_cache (regcache
, buf
);
414 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
416 i387_cache_to_xsave (regcache
, buf
);
420 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
422 i387_xsave_to_cache (regcache
, buf
);
425 /* ??? The non-biarch i386 case stores all the i387 regs twice.
426 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
427 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
428 doesn't work. IWBN to avoid the duplication in the case where it
429 does work. Maybe the arch_setup routine could check whether it works
430 and update the supported regsets accordingly. */
432 static struct regset_info x86_regsets
[] =
434 #ifdef HAVE_PTRACE_GETREGS
435 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
437 x86_fill_gregset
, x86_store_gregset
},
438 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
439 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
441 # ifdef HAVE_PTRACE_GETFPXREGS
442 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
444 x86_fill_fpxregset
, x86_store_fpxregset
},
447 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
449 x86_fill_fpregset
, x86_store_fpregset
},
450 #endif /* HAVE_PTRACE_GETREGS */
451 { 0, 0, 0, -1, -1, NULL
, NULL
}
455 x86_get_pc (struct regcache
*regcache
)
457 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
462 collect_register_by_name (regcache
, "rip", &pc
);
463 return (CORE_ADDR
) pc
;
468 collect_register_by_name (regcache
, "eip", &pc
);
469 return (CORE_ADDR
) pc
;
474 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
476 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
480 unsigned long newpc
= pc
;
481 supply_register_by_name (regcache
, "rip", &newpc
);
485 unsigned int newpc
= pc
;
486 supply_register_by_name (regcache
, "eip", &newpc
);
490 static const unsigned char x86_breakpoint
[] = { 0xCC };
491 #define x86_breakpoint_len 1
494 x86_breakpoint_at (CORE_ADDR pc
)
498 (*the_target
->read_memory
) (pc
, &c
, 1);
505 /* Support for debug registers. */
508 x86_linux_dr_get (ptid_t ptid
, int regnum
)
513 tid
= ptid_get_lwp (ptid
);
516 value
= ptrace (PTRACE_PEEKUSER
, tid
,
517 offsetof (struct user
, u_debugreg
[regnum
]), 0);
519 error ("Couldn't read debug register");
525 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
529 tid
= ptid_get_lwp (ptid
);
532 ptrace (PTRACE_POKEUSER
, tid
,
533 offsetof (struct user
, u_debugreg
[regnum
]), value
);
535 error ("Couldn't write debug register");
539 update_debug_registers_callback (struct inferior_list_entry
*entry
,
542 struct thread_info
*thr
= (struct thread_info
*) entry
;
543 struct lwp_info
*lwp
= get_thread_lwp (thr
);
544 int pid
= *(int *) pid_p
;
546 /* Only update the threads of this process. */
547 if (pid_of (thr
) == pid
)
549 /* The actual update is done later just before resuming the lwp,
550 we just mark that the registers need updating. */
551 lwp
->arch_private
->debug_registers_changed
= 1;
553 /* If the lwp isn't stopped, force it to momentarily pause, so
554 we can update its debug registers. */
556 linux_stop_lwp (lwp
);
562 /* Update the inferior's debug register REGNUM from STATE. */
565 i386_dr_low_set_addr (const struct i386_debug_reg_state
*state
, int regnum
)
567 /* Only update the threads of this process. */
568 int pid
= pid_of (current_inferior
);
570 if (! (regnum
>= 0 && regnum
<= DR_LASTADDR
- DR_FIRSTADDR
))
571 fatal ("Invalid debug register %d", regnum
);
573 find_inferior (&all_threads
, update_debug_registers_callback
, &pid
);
576 /* Return the inferior's debug register REGNUM. */
579 i386_dr_low_get_addr (int regnum
)
581 ptid_t ptid
= ptid_of (current_inferior
);
583 /* DR6 and DR7 are retrieved with some other way. */
584 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
586 return x86_linux_dr_get (ptid
, regnum
);
589 /* Update the inferior's DR7 debug control register from STATE. */
592 i386_dr_low_set_control (const struct i386_debug_reg_state
*state
)
594 /* Only update the threads of this process. */
595 int pid
= pid_of (current_inferior
);
597 find_inferior (&all_threads
, update_debug_registers_callback
, &pid
);
600 /* Return the inferior's DR7 debug control register. */
603 i386_dr_low_get_control (void)
605 ptid_t ptid
= ptid_of (current_inferior
);
607 return x86_linux_dr_get (ptid
, DR_CONTROL
);
610 /* Get the value of the DR6 debug status register from the inferior
611 and record it in STATE. */
614 i386_dr_low_get_status (void)
616 ptid_t ptid
= ptid_of (current_inferior
);
618 return x86_linux_dr_get (ptid
, DR_STATUS
);
621 /* Breakpoint/Watchpoint support. */
624 x86_insert_point (char type
, CORE_ADDR addr
, int len
)
626 struct process_info
*proc
= current_process ();
629 case '0': /* software-breakpoint */
633 ret
= prepare_to_access_memory ();
636 ret
= set_gdb_breakpoint_at (addr
);
637 done_accessing_memory ();
640 case '1': /* hardware-breakpoint */
641 case '2': /* write watchpoint */
642 case '3': /* read watchpoint */
643 case '4': /* access watchpoint */
645 enum target_hw_bp_type hw_type
= Z_packet_to_hw_type (type
);
646 struct i386_debug_reg_state
*state
647 = &proc
->private->arch_private
->debug_reg_state
;
649 return i386_low_insert_watchpoint (state
, hw_type
, addr
, len
);
659 x86_remove_point (char type
, CORE_ADDR addr
, int len
)
661 struct process_info
*proc
= current_process ();
664 case '0': /* software-breakpoint */
668 ret
= prepare_to_access_memory ();
671 ret
= delete_gdb_breakpoint_at (addr
);
672 done_accessing_memory ();
675 case '1': /* hardware-breakpoint */
676 case '2': /* write watchpoint */
677 case '3': /* read watchpoint */
678 case '4': /* access watchpoint */
680 enum target_hw_bp_type hw_type
= Z_packet_to_hw_type (type
);
681 struct i386_debug_reg_state
*state
682 = &proc
->private->arch_private
->debug_reg_state
;
684 return i386_low_remove_watchpoint (state
, hw_type
, addr
, len
);
693 x86_stopped_by_watchpoint (void)
695 struct process_info
*proc
= current_process ();
696 return i386_low_stopped_by_watchpoint (&proc
->private->arch_private
->debug_reg_state
);
700 x86_stopped_data_address (void)
702 struct process_info
*proc
= current_process ();
704 if (i386_low_stopped_data_address (&proc
->private->arch_private
->debug_reg_state
,
710 /* Called when a new process is created. */
712 static struct arch_process_info
*
713 x86_linux_new_process (void)
715 struct arch_process_info
*info
= xcalloc (1, sizeof (*info
));
717 i386_low_init_dregs (&info
->debug_reg_state
);
722 /* Called when a new thread is detected. */
724 static struct arch_lwp_info
*
725 x86_linux_new_thread (void)
727 struct arch_lwp_info
*info
= xcalloc (1, sizeof (*info
));
729 info
->debug_registers_changed
= 1;
734 /* Called when resuming a thread.
735 If the debug regs have changed, update the thread's copies. */
738 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
740 ptid_t ptid
= ptid_of (get_lwp_thread (lwp
));
741 int clear_status
= 0;
743 if (lwp
->arch_private
->debug_registers_changed
)
746 int pid
= ptid_get_pid (ptid
);
747 struct process_info
*proc
= find_process_pid (pid
);
748 struct i386_debug_reg_state
*state
749 = &proc
->private->arch_private
->debug_reg_state
;
751 for (i
= DR_FIRSTADDR
; i
<= DR_LASTADDR
; i
++)
752 if (state
->dr_ref_count
[i
] > 0)
754 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
756 /* If we're setting a watchpoint, any change the inferior
757 had done itself to the debug registers needs to be
758 discarded, otherwise, i386_low_stopped_data_address can
763 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
765 lwp
->arch_private
->debug_registers_changed
= 0;
768 if (clear_status
|| lwp
->stopped_by_watchpoint
)
769 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
772 /* When GDBSERVER is built as a 64-bit application on linux, the
773 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
774 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
775 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
776 conversion in-place ourselves. */
778 /* These types below (compat_*) define a siginfo type that is layout
779 compatible with the siginfo type exported by the 32-bit userspace
784 typedef int compat_int_t
;
785 typedef unsigned int compat_uptr_t
;
787 typedef int compat_time_t
;
788 typedef int compat_timer_t
;
789 typedef int compat_clock_t
;
791 struct compat_timeval
793 compat_time_t tv_sec
;
797 typedef union compat_sigval
799 compat_int_t sival_int
;
800 compat_uptr_t sival_ptr
;
803 typedef struct compat_siginfo
811 int _pad
[((128 / sizeof (int)) - 3)];
820 /* POSIX.1b timers */
825 compat_sigval_t _sigval
;
828 /* POSIX.1b signals */
833 compat_sigval_t _sigval
;
842 compat_clock_t _utime
;
843 compat_clock_t _stime
;
846 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
861 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
862 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t
;
864 typedef struct compat_x32_siginfo
872 int _pad
[((128 / sizeof (int)) - 3)];
881 /* POSIX.1b timers */
886 compat_sigval_t _sigval
;
889 /* POSIX.1b signals */
894 compat_sigval_t _sigval
;
903 compat_x32_clock_t _utime
;
904 compat_x32_clock_t _stime
;
907 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
920 } compat_x32_siginfo_t
__attribute__ ((__aligned__ (8)));
922 #define cpt_si_pid _sifields._kill._pid
923 #define cpt_si_uid _sifields._kill._uid
924 #define cpt_si_timerid _sifields._timer._tid
925 #define cpt_si_overrun _sifields._timer._overrun
926 #define cpt_si_status _sifields._sigchld._status
927 #define cpt_si_utime _sifields._sigchld._utime
928 #define cpt_si_stime _sifields._sigchld._stime
929 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
930 #define cpt_si_addr _sifields._sigfault._addr
931 #define cpt_si_band _sifields._sigpoll._band
932 #define cpt_si_fd _sifields._sigpoll._fd
934 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
935 In their place is si_timer1,si_timer2. */
937 #define si_timerid si_timer1
940 #define si_overrun si_timer2
944 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
946 memset (to
, 0, sizeof (*to
));
948 to
->si_signo
= from
->si_signo
;
949 to
->si_errno
= from
->si_errno
;
950 to
->si_code
= from
->si_code
;
952 if (to
->si_code
== SI_TIMER
)
954 to
->cpt_si_timerid
= from
->si_timerid
;
955 to
->cpt_si_overrun
= from
->si_overrun
;
956 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
958 else if (to
->si_code
== SI_USER
)
960 to
->cpt_si_pid
= from
->si_pid
;
961 to
->cpt_si_uid
= from
->si_uid
;
963 else if (to
->si_code
< 0)
965 to
->cpt_si_pid
= from
->si_pid
;
966 to
->cpt_si_uid
= from
->si_uid
;
967 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
971 switch (to
->si_signo
)
974 to
->cpt_si_pid
= from
->si_pid
;
975 to
->cpt_si_uid
= from
->si_uid
;
976 to
->cpt_si_status
= from
->si_status
;
977 to
->cpt_si_utime
= from
->si_utime
;
978 to
->cpt_si_stime
= from
->si_stime
;
984 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
987 to
->cpt_si_band
= from
->si_band
;
988 to
->cpt_si_fd
= from
->si_fd
;
991 to
->cpt_si_pid
= from
->si_pid
;
992 to
->cpt_si_uid
= from
->si_uid
;
993 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1000 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
1002 memset (to
, 0, sizeof (*to
));
1004 to
->si_signo
= from
->si_signo
;
1005 to
->si_errno
= from
->si_errno
;
1006 to
->si_code
= from
->si_code
;
1008 if (to
->si_code
== SI_TIMER
)
1010 to
->si_timerid
= from
->cpt_si_timerid
;
1011 to
->si_overrun
= from
->cpt_si_overrun
;
1012 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1014 else if (to
->si_code
== SI_USER
)
1016 to
->si_pid
= from
->cpt_si_pid
;
1017 to
->si_uid
= from
->cpt_si_uid
;
1019 else if (to
->si_code
< 0)
1021 to
->si_pid
= from
->cpt_si_pid
;
1022 to
->si_uid
= from
->cpt_si_uid
;
1023 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1027 switch (to
->si_signo
)
1030 to
->si_pid
= from
->cpt_si_pid
;
1031 to
->si_uid
= from
->cpt_si_uid
;
1032 to
->si_status
= from
->cpt_si_status
;
1033 to
->si_utime
= from
->cpt_si_utime
;
1034 to
->si_stime
= from
->cpt_si_stime
;
1040 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1043 to
->si_band
= from
->cpt_si_band
;
1044 to
->si_fd
= from
->cpt_si_fd
;
1047 to
->si_pid
= from
->cpt_si_pid
;
1048 to
->si_uid
= from
->cpt_si_uid
;
1049 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1056 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t
*to
,
1059 memset (to
, 0, sizeof (*to
));
1061 to
->si_signo
= from
->si_signo
;
1062 to
->si_errno
= from
->si_errno
;
1063 to
->si_code
= from
->si_code
;
1065 if (to
->si_code
== SI_TIMER
)
1067 to
->cpt_si_timerid
= from
->si_timerid
;
1068 to
->cpt_si_overrun
= from
->si_overrun
;
1069 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1071 else if (to
->si_code
== SI_USER
)
1073 to
->cpt_si_pid
= from
->si_pid
;
1074 to
->cpt_si_uid
= from
->si_uid
;
1076 else if (to
->si_code
< 0)
1078 to
->cpt_si_pid
= from
->si_pid
;
1079 to
->cpt_si_uid
= from
->si_uid
;
1080 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1084 switch (to
->si_signo
)
1087 to
->cpt_si_pid
= from
->si_pid
;
1088 to
->cpt_si_uid
= from
->si_uid
;
1089 to
->cpt_si_status
= from
->si_status
;
1090 to
->cpt_si_utime
= from
->si_utime
;
1091 to
->cpt_si_stime
= from
->si_stime
;
1097 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1100 to
->cpt_si_band
= from
->si_band
;
1101 to
->cpt_si_fd
= from
->si_fd
;
1104 to
->cpt_si_pid
= from
->si_pid
;
1105 to
->cpt_si_uid
= from
->si_uid
;
1106 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1113 siginfo_from_compat_x32_siginfo (siginfo_t
*to
,
1114 compat_x32_siginfo_t
*from
)
1116 memset (to
, 0, sizeof (*to
));
1118 to
->si_signo
= from
->si_signo
;
1119 to
->si_errno
= from
->si_errno
;
1120 to
->si_code
= from
->si_code
;
1122 if (to
->si_code
== SI_TIMER
)
1124 to
->si_timerid
= from
->cpt_si_timerid
;
1125 to
->si_overrun
= from
->cpt_si_overrun
;
1126 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1128 else if (to
->si_code
== SI_USER
)
1130 to
->si_pid
= from
->cpt_si_pid
;
1131 to
->si_uid
= from
->cpt_si_uid
;
1133 else if (to
->si_code
< 0)
1135 to
->si_pid
= from
->cpt_si_pid
;
1136 to
->si_uid
= from
->cpt_si_uid
;
1137 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1141 switch (to
->si_signo
)
1144 to
->si_pid
= from
->cpt_si_pid
;
1145 to
->si_uid
= from
->cpt_si_uid
;
1146 to
->si_status
= from
->cpt_si_status
;
1147 to
->si_utime
= from
->cpt_si_utime
;
1148 to
->si_stime
= from
->cpt_si_stime
;
1154 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1157 to
->si_band
= from
->cpt_si_band
;
1158 to
->si_fd
= from
->cpt_si_fd
;
1161 to
->si_pid
= from
->cpt_si_pid
;
1162 to
->si_uid
= from
->cpt_si_uid
;
1163 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1169 #endif /* __x86_64__ */
1171 /* Convert a native/host siginfo object, into/from the siginfo in the
1172 layout of the inferiors' architecture. Returns true if any
1173 conversion was done; false otherwise. If DIRECTION is 1, then copy
1174 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1178 x86_siginfo_fixup (siginfo_t
*native
, void *inf
, int direction
)
1181 unsigned int machine
;
1182 int tid
= lwpid_of (current_inferior
);
1183 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1185 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1186 if (!is_64bit_tdesc ())
1188 if (sizeof (siginfo_t
) != sizeof (compat_siginfo_t
))
1189 fatal ("unexpected difference in siginfo");
1192 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
1194 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
1198 /* No fixup for native x32 GDB. */
1199 else if (!is_elf64
&& sizeof (void *) == 8)
1201 if (sizeof (siginfo_t
) != sizeof (compat_x32_siginfo_t
))
1202 fatal ("unexpected difference in siginfo");
1205 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo
*) inf
,
1208 siginfo_from_compat_x32_siginfo (native
,
1209 (struct compat_x32_siginfo
*) inf
);
1220 /* Format of XSAVE extended state is:
1223 fxsave_bytes[0..463]
1224 sw_usable_bytes[464..511]
1225 xstate_hdr_bytes[512..575]
1230 Same memory layout will be used for the coredump NT_X86_XSTATE
1231 representing the XSAVE extended state registers.
1233 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1234 extended state mask, which is the same as the extended control register
1235 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1236 together with the mask saved in the xstate_hdr_bytes to determine what
1237 states the processor/OS supports and what state, used or initialized,
1238 the process/thread is in. */
1239 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1241 /* Does the current host support the GETFPXREGS request? The header
1242 file may or may not define it, and even if it is defined, the
1243 kernel will return EIO if it's running on a pre-SSE processor. */
1244 int have_ptrace_getfpxregs
=
1245 #ifdef HAVE_PTRACE_GETFPXREGS
1252 /* Does the current host support PTRACE_GETREGSET? */
1253 static int have_ptrace_getregset
= -1;
1255 /* Get Linux/x86 target description from running target. */
1257 static const struct target_desc
*
1258 x86_linux_read_description (void)
1260 unsigned int machine
;
1264 static uint64_t xcr0
;
1265 struct regset_info
*regset
;
1267 tid
= lwpid_of (current_inferior
);
1269 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1271 if (sizeof (void *) == 4)
1274 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1276 else if (machine
== EM_X86_64
)
1277 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1281 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1282 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
1284 elf_fpxregset_t fpxregs
;
1286 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
1288 have_ptrace_getfpxregs
= 0;
1289 have_ptrace_getregset
= 0;
1290 return tdesc_i386_mmx_linux
;
1293 have_ptrace_getfpxregs
= 1;
1299 x86_xcr0
= I386_XSTATE_SSE_MASK
;
1301 /* Don't use XML. */
1303 if (machine
== EM_X86_64
)
1304 return tdesc_amd64_linux_no_xml
;
1307 return tdesc_i386_linux_no_xml
;
1310 if (have_ptrace_getregset
== -1)
1312 uint64_t xstateregs
[(I386_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
1315 iov
.iov_base
= xstateregs
;
1316 iov
.iov_len
= sizeof (xstateregs
);
1318 /* Check if PTRACE_GETREGSET works. */
1319 if (ptrace (PTRACE_GETREGSET
, tid
,
1320 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
1321 have_ptrace_getregset
= 0;
1324 have_ptrace_getregset
= 1;
1326 /* Get XCR0 from XSAVE extended state. */
1327 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
1328 / sizeof (uint64_t))];
1330 /* Use PTRACE_GETREGSET if it is available. */
1331 for (regset
= x86_regsets
;
1332 regset
->fill_function
!= NULL
; regset
++)
1333 if (regset
->get_request
== PTRACE_GETREGSET
)
1334 regset
->size
= I386_XSTATE_SIZE (xcr0
);
1335 else if (regset
->type
!= GENERAL_REGS
)
1340 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1341 xcr0_features
= (have_ptrace_getregset
1342 && (xcr0
& I386_XSTATE_ALL_MASK
));
1347 if (machine
== EM_X86_64
)
1354 switch (xcr0
& I386_XSTATE_ALL_MASK
)
1356 case I386_XSTATE_MPX_MASK
:
1357 return tdesc_amd64_mpx_linux
;
1359 case I386_XSTATE_AVX_MASK
:
1360 return tdesc_amd64_avx_linux
;
1363 return tdesc_amd64_linux
;
1367 return tdesc_amd64_linux
;
1373 switch (xcr0
& I386_XSTATE_ALL_MASK
)
1375 case I386_XSTATE_MPX_MASK
: /* No MPX on x32. */
1376 case I386_XSTATE_AVX_MASK
:
1377 return tdesc_x32_avx_linux
;
1380 return tdesc_x32_linux
;
1384 return tdesc_x32_linux
;
1392 switch (xcr0
& I386_XSTATE_ALL_MASK
)
1394 case (I386_XSTATE_MPX_MASK
):
1395 return tdesc_i386_mpx_linux
;
1397 case (I386_XSTATE_AVX_MASK
):
1398 return tdesc_i386_avx_linux
;
1401 return tdesc_i386_linux
;
1405 return tdesc_i386_linux
;
1408 gdb_assert_not_reached ("failed to return tdesc");
1411 /* Callback for find_inferior. Stops iteration when a thread with a
1412 given PID is found. */
1415 same_process_callback (struct inferior_list_entry
*entry
, void *data
)
1417 int pid
= *(int *) data
;
1419 return (ptid_get_pid (entry
->id
) == pid
);
1422 /* Callback for for_each_inferior. Calls the arch_setup routine for
1426 x86_arch_setup_process_callback (struct inferior_list_entry
*entry
)
1428 int pid
= ptid_get_pid (entry
->id
);
1430 /* Look up any thread of this processes. */
1432 = (struct thread_info
*) find_inferior (&all_threads
,
1433 same_process_callback
, &pid
);
1435 the_low_target
.arch_setup ();
1438 /* Update all the target description of all processes; a new GDB
1439 connected, and it may or not support xml target descriptions. */
1442 x86_linux_update_xmltarget (void)
1444 struct thread_info
*save_inferior
= current_inferior
;
1446 /* Before changing the register cache's internal layout, flush the
1447 contents of the current valid caches back to the threads, and
1448 release the current regcache objects. */
1449 regcache_release ();
1451 for_each_inferior (&all_processes
, x86_arch_setup_process_callback
);
1453 current_inferior
= save_inferior
;
1456 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1457 PTRACE_GETREGSET. */
1460 x86_linux_process_qsupported (const char *query
)
1462 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1463 with "i386" in qSupported query, it supports x86 XML target
1466 if (query
!= NULL
&& strncmp (query
, "xmlRegisters=", 13) == 0)
1468 char *copy
= xstrdup (query
+ 13);
1471 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1473 if (strcmp (p
, "i386") == 0)
1483 x86_linux_update_xmltarget ();
1486 /* Common for x86/x86-64. */
1488 static struct regsets_info x86_regsets_info
=
1490 x86_regsets
, /* regsets */
1491 0, /* num_regsets */
1492 NULL
, /* disabled_regsets */
1496 static struct regs_info amd64_linux_regs_info
=
1498 NULL
, /* regset_bitmap */
1499 NULL
, /* usrregs_info */
1503 static struct usrregs_info i386_linux_usrregs_info
=
1509 static struct regs_info i386_linux_regs_info
=
1511 NULL
, /* regset_bitmap */
1512 &i386_linux_usrregs_info
,
1516 const struct regs_info
*
1517 x86_linux_regs_info (void)
1520 if (is_64bit_tdesc ())
1521 return &amd64_linux_regs_info
;
1524 return &i386_linux_regs_info
;
1527 /* Initialize the target description for the architecture of the
1531 x86_arch_setup (void)
1533 current_process ()->tdesc
= x86_linux_read_description ();
1537 x86_supports_tracepoints (void)
1543 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1545 write_inferior_memory (*to
, buf
, len
);
1550 push_opcode (unsigned char *buf
, char *op
)
1552 unsigned char *buf_org
= buf
;
1557 unsigned long ul
= strtoul (op
, &endptr
, 16);
1566 return buf
- buf_org
;
1571 /* Build a jump pad that saves registers and calls a collection
1572 function. Writes a jump instruction to the jump pad to
1573 JJUMPAD_INSN. The caller is responsible to write it in at the
1574 tracepoint address. */
1577 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1578 CORE_ADDR collector
,
1581 CORE_ADDR
*jump_entry
,
1582 CORE_ADDR
*trampoline
,
1583 ULONGEST
*trampoline_size
,
1584 unsigned char *jjump_pad_insn
,
1585 ULONGEST
*jjump_pad_insn_size
,
1586 CORE_ADDR
*adjusted_insn_addr
,
1587 CORE_ADDR
*adjusted_insn_addr_end
,
1590 unsigned char buf
[40];
1594 CORE_ADDR buildaddr
= *jump_entry
;
1596 /* Build the jump pad. */
1598 /* First, do tracepoint data collection. Save registers. */
1600 /* Need to ensure stack pointer saved first. */
1601 buf
[i
++] = 0x54; /* push %rsp */
1602 buf
[i
++] = 0x55; /* push %rbp */
1603 buf
[i
++] = 0x57; /* push %rdi */
1604 buf
[i
++] = 0x56; /* push %rsi */
1605 buf
[i
++] = 0x52; /* push %rdx */
1606 buf
[i
++] = 0x51; /* push %rcx */
1607 buf
[i
++] = 0x53; /* push %rbx */
1608 buf
[i
++] = 0x50; /* push %rax */
1609 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1610 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1611 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1612 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1613 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1614 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1615 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1616 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1617 buf
[i
++] = 0x9c; /* pushfq */
1618 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1620 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1621 i
+= sizeof (unsigned long);
1622 buf
[i
++] = 0x57; /* push %rdi */
1623 append_insns (&buildaddr
, i
, buf
);
1625 /* Stack space for the collecting_t object. */
1627 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1628 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1629 memcpy (buf
+ i
, &tpoint
, 8);
1631 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1632 i
+= push_opcode (&buf
[i
],
1633 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1634 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1635 append_insns (&buildaddr
, i
, buf
);
1639 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1640 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1642 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1643 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1644 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1645 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1646 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1647 append_insns (&buildaddr
, i
, buf
);
1649 /* Set up the gdb_collect call. */
1650 /* At this point, (stack pointer + 0x18) is the base of our saved
1654 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1655 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1657 /* tpoint address may be 64-bit wide. */
1658 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1659 memcpy (buf
+ i
, &tpoint
, 8);
1661 append_insns (&buildaddr
, i
, buf
);
1663 /* The collector function being in the shared library, may be
1664 >31-bits away off the jump pad. */
1666 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1667 memcpy (buf
+ i
, &collector
, 8);
1669 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1670 append_insns (&buildaddr
, i
, buf
);
1672 /* Clear the spin-lock. */
1674 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1675 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1676 memcpy (buf
+ i
, &lockaddr
, 8);
1678 append_insns (&buildaddr
, i
, buf
);
1680 /* Remove stack that had been used for the collect_t object. */
1682 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1683 append_insns (&buildaddr
, i
, buf
);
1685 /* Restore register state. */
1687 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1691 buf
[i
++] = 0x9d; /* popfq */
1692 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1693 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1694 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1695 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1696 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1697 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1698 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1699 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1700 buf
[i
++] = 0x58; /* pop %rax */
1701 buf
[i
++] = 0x5b; /* pop %rbx */
1702 buf
[i
++] = 0x59; /* pop %rcx */
1703 buf
[i
++] = 0x5a; /* pop %rdx */
1704 buf
[i
++] = 0x5e; /* pop %rsi */
1705 buf
[i
++] = 0x5f; /* pop %rdi */
1706 buf
[i
++] = 0x5d; /* pop %rbp */
1707 buf
[i
++] = 0x5c; /* pop %rsp */
1708 append_insns (&buildaddr
, i
, buf
);
1710 /* Now, adjust the original instruction to execute in the jump
1712 *adjusted_insn_addr
= buildaddr
;
1713 relocate_instruction (&buildaddr
, tpaddr
);
1714 *adjusted_insn_addr_end
= buildaddr
;
1716 /* Finally, write a jump back to the program. */
1718 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1719 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1722 "E.Jump back from jump pad too far from tracepoint "
1723 "(offset 0x%" PRIx64
" > int32).", loffset
);
1727 offset
= (int) loffset
;
1728 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1729 memcpy (buf
+ 1, &offset
, 4);
1730 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1732 /* The jump pad is now built. Wire in a jump to our jump pad. This
1733 is always done last (by our caller actually), so that we can
1734 install fast tracepoints with threads running. This relies on
1735 the agent's atomic write support. */
1736 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1737 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1740 "E.Jump pad too far from tracepoint "
1741 "(offset 0x%" PRIx64
" > int32).", loffset
);
1745 offset
= (int) loffset
;
1747 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1748 memcpy (buf
+ 1, &offset
, 4);
1749 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1750 *jjump_pad_insn_size
= sizeof (jump_insn
);
1752 /* Return the end address of our pad. */
1753 *jump_entry
= buildaddr
;
1758 #endif /* __x86_64__ */
1760 /* Build a jump pad that saves registers and calls a collection
1761 function. Writes a jump instruction to the jump pad to
1762 JJUMPAD_INSN. The caller is responsible to write it in at the
1763 tracepoint address. */
1766 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1767 CORE_ADDR collector
,
1770 CORE_ADDR
*jump_entry
,
1771 CORE_ADDR
*trampoline
,
1772 ULONGEST
*trampoline_size
,
1773 unsigned char *jjump_pad_insn
,
1774 ULONGEST
*jjump_pad_insn_size
,
1775 CORE_ADDR
*adjusted_insn_addr
,
1776 CORE_ADDR
*adjusted_insn_addr_end
,
1779 unsigned char buf
[0x100];
1781 CORE_ADDR buildaddr
= *jump_entry
;
1783 /* Build the jump pad. */
1785 /* First, do tracepoint data collection. Save registers. */
1787 buf
[i
++] = 0x60; /* pushad */
1788 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1789 *((int *)(buf
+ i
)) = (int) tpaddr
;
1791 buf
[i
++] = 0x9c; /* pushf */
1792 buf
[i
++] = 0x1e; /* push %ds */
1793 buf
[i
++] = 0x06; /* push %es */
1794 buf
[i
++] = 0x0f; /* push %fs */
1796 buf
[i
++] = 0x0f; /* push %gs */
1798 buf
[i
++] = 0x16; /* push %ss */
1799 buf
[i
++] = 0x0e; /* push %cs */
1800 append_insns (&buildaddr
, i
, buf
);
1802 /* Stack space for the collecting_t object. */
1804 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1806 /* Build the object. */
1807 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1808 memcpy (buf
+ i
, &tpoint
, 4);
1810 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1812 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1813 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1814 append_insns (&buildaddr
, i
, buf
);
1816 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1817 If we cared for it, this could be using xchg alternatively. */
1820 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1821 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1823 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1825 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1826 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1827 append_insns (&buildaddr
, i
, buf
);
1830 /* Set up arguments to the gdb_collect call. */
1832 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1833 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1834 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1835 append_insns (&buildaddr
, i
, buf
);
1838 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1839 append_insns (&buildaddr
, i
, buf
);
1842 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1843 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1845 append_insns (&buildaddr
, i
, buf
);
1847 buf
[0] = 0xe8; /* call <reladdr> */
1848 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1849 memcpy (buf
+ 1, &offset
, 4);
1850 append_insns (&buildaddr
, 5, buf
);
1851 /* Clean up after the call. */
1852 buf
[0] = 0x83; /* add $0x8,%esp */
1855 append_insns (&buildaddr
, 3, buf
);
1858 /* Clear the spin-lock. This would need the LOCK prefix on older
1861 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1862 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1863 memcpy (buf
+ i
, &lockaddr
, 4);
1865 append_insns (&buildaddr
, i
, buf
);
1868 /* Remove stack that had been used for the collect_t object. */
1870 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1871 append_insns (&buildaddr
, i
, buf
);
1874 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1877 buf
[i
++] = 0x17; /* pop %ss */
1878 buf
[i
++] = 0x0f; /* pop %gs */
1880 buf
[i
++] = 0x0f; /* pop %fs */
1882 buf
[i
++] = 0x07; /* pop %es */
1883 buf
[i
++] = 0x1f; /* pop %ds */
1884 buf
[i
++] = 0x9d; /* popf */
1885 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1888 buf
[i
++] = 0x61; /* popad */
1889 append_insns (&buildaddr
, i
, buf
);
1891 /* Now, adjust the original instruction to execute in the jump
1893 *adjusted_insn_addr
= buildaddr
;
1894 relocate_instruction (&buildaddr
, tpaddr
);
1895 *adjusted_insn_addr_end
= buildaddr
;
1897 /* Write the jump back to the program. */
1898 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1899 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1900 memcpy (buf
+ 1, &offset
, 4);
1901 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1903 /* The jump pad is now built. Wire in a jump to our jump pad. This
1904 is always done last (by our caller actually), so that we can
1905 install fast tracepoints with threads running. This relies on
1906 the agent's atomic write support. */
1909 /* Create a trampoline. */
1910 *trampoline_size
= sizeof (jump_insn
);
1911 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1913 /* No trampoline space available. */
1915 "E.Cannot allocate trampoline space needed for fast "
1916 "tracepoints on 4-byte instructions.");
1920 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1921 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1922 memcpy (buf
+ 1, &offset
, 4);
1923 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1925 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1926 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1927 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1928 memcpy (buf
+ 2, &offset
, 2);
1929 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1930 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1934 /* Else use a 32-bit relative jump instruction. */
1935 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1936 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1937 memcpy (buf
+ 1, &offset
, 4);
1938 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1939 *jjump_pad_insn_size
= sizeof (jump_insn
);
1942 /* Return the end address of our pad. */
1943 *jump_entry
= buildaddr
;
1949 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1950 CORE_ADDR collector
,
1953 CORE_ADDR
*jump_entry
,
1954 CORE_ADDR
*trampoline
,
1955 ULONGEST
*trampoline_size
,
1956 unsigned char *jjump_pad_insn
,
1957 ULONGEST
*jjump_pad_insn_size
,
1958 CORE_ADDR
*adjusted_insn_addr
,
1959 CORE_ADDR
*adjusted_insn_addr_end
,
1963 if (is_64bit_tdesc ())
1964 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1965 collector
, lockaddr
,
1966 orig_size
, jump_entry
,
1967 trampoline
, trampoline_size
,
1969 jjump_pad_insn_size
,
1971 adjusted_insn_addr_end
,
1975 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1976 collector
, lockaddr
,
1977 orig_size
, jump_entry
,
1978 trampoline
, trampoline_size
,
1980 jjump_pad_insn_size
,
1982 adjusted_insn_addr_end
,
1986 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1990 x86_get_min_fast_tracepoint_insn_len (void)
1992 static int warned_about_fast_tracepoints
= 0;
1995 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1996 used for fast tracepoints. */
1997 if (is_64bit_tdesc ())
2001 if (agent_loaded_p ())
2003 char errbuf
[IPA_BUFSIZ
];
2007 /* On x86, if trampolines are available, then 4-byte jump instructions
2008 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2009 with a 4-byte offset are used instead. */
2010 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
2014 /* GDB has no channel to explain to user why a shorter fast
2015 tracepoint is not possible, but at least make GDBserver
2016 mention that something has gone awry. */
2017 if (!warned_about_fast_tracepoints
)
2019 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
2020 warned_about_fast_tracepoints
= 1;
2027 /* Indicate that the minimum length is currently unknown since the IPA
2028 has not loaded yet. */
2034 add_insns (unsigned char *start
, int len
)
2036 CORE_ADDR buildaddr
= current_insn_ptr
;
2039 debug_printf ("Adding %d bytes of insn at %s\n",
2040 len
, paddress (buildaddr
));
2042 append_insns (&buildaddr
, len
, start
);
2043 current_insn_ptr
= buildaddr
;
2046 /* Our general strategy for emitting code is to avoid specifying raw
2047 bytes whenever possible, and instead copy a block of inline asm
2048 that is embedded in the function. This is a little messy, because
2049 we need to keep the compiler from discarding what looks like dead
2050 code, plus suppress various warnings. */
2052 #define EMIT_ASM(NAME, INSNS) \
2055 extern unsigned char start_ ## NAME, end_ ## NAME; \
2056 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2057 __asm__ ("jmp end_" #NAME "\n" \
2058 "\t" "start_" #NAME ":" \
2060 "\t" "end_" #NAME ":"); \
2065 #define EMIT_ASM32(NAME,INSNS) \
2068 extern unsigned char start_ ## NAME, end_ ## NAME; \
2069 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2070 __asm__ (".code32\n" \
2071 "\t" "jmp end_" #NAME "\n" \
2072 "\t" "start_" #NAME ":\n" \
2074 "\t" "end_" #NAME ":\n" \
2080 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2087 amd64_emit_prologue (void)
2089 EMIT_ASM (amd64_prologue
,
2091 "movq %rsp,%rbp\n\t"
2092 "sub $0x20,%rsp\n\t"
2093 "movq %rdi,-8(%rbp)\n\t"
2094 "movq %rsi,-16(%rbp)");
2099 amd64_emit_epilogue (void)
2101 EMIT_ASM (amd64_epilogue
,
2102 "movq -16(%rbp),%rdi\n\t"
2103 "movq %rax,(%rdi)\n\t"
2110 amd64_emit_add (void)
2112 EMIT_ASM (amd64_add
,
2113 "add (%rsp),%rax\n\t"
2114 "lea 0x8(%rsp),%rsp");
2118 amd64_emit_sub (void)
2120 EMIT_ASM (amd64_sub
,
2121 "sub %rax,(%rsp)\n\t"
2126 amd64_emit_mul (void)
2132 amd64_emit_lsh (void)
2138 amd64_emit_rsh_signed (void)
2144 amd64_emit_rsh_unsigned (void)
2150 amd64_emit_ext (int arg
)
2155 EMIT_ASM (amd64_ext_8
,
2161 EMIT_ASM (amd64_ext_16
,
2166 EMIT_ASM (amd64_ext_32
,
2175 amd64_emit_log_not (void)
2177 EMIT_ASM (amd64_log_not
,
2178 "test %rax,%rax\n\t"
2184 amd64_emit_bit_and (void)
2186 EMIT_ASM (amd64_and
,
2187 "and (%rsp),%rax\n\t"
2188 "lea 0x8(%rsp),%rsp");
2192 amd64_emit_bit_or (void)
2195 "or (%rsp),%rax\n\t"
2196 "lea 0x8(%rsp),%rsp");
2200 amd64_emit_bit_xor (void)
2202 EMIT_ASM (amd64_xor
,
2203 "xor (%rsp),%rax\n\t"
2204 "lea 0x8(%rsp),%rsp");
2208 amd64_emit_bit_not (void)
2210 EMIT_ASM (amd64_bit_not
,
2211 "xorq $0xffffffffffffffff,%rax");
2215 amd64_emit_equal (void)
2217 EMIT_ASM (amd64_equal
,
2218 "cmp %rax,(%rsp)\n\t"
2219 "je .Lamd64_equal_true\n\t"
2221 "jmp .Lamd64_equal_end\n\t"
2222 ".Lamd64_equal_true:\n\t"
2224 ".Lamd64_equal_end:\n\t"
2225 "lea 0x8(%rsp),%rsp");
2229 amd64_emit_less_signed (void)
2231 EMIT_ASM (amd64_less_signed
,
2232 "cmp %rax,(%rsp)\n\t"
2233 "jl .Lamd64_less_signed_true\n\t"
2235 "jmp .Lamd64_less_signed_end\n\t"
2236 ".Lamd64_less_signed_true:\n\t"
2238 ".Lamd64_less_signed_end:\n\t"
2239 "lea 0x8(%rsp),%rsp");
2243 amd64_emit_less_unsigned (void)
2245 EMIT_ASM (amd64_less_unsigned
,
2246 "cmp %rax,(%rsp)\n\t"
2247 "jb .Lamd64_less_unsigned_true\n\t"
2249 "jmp .Lamd64_less_unsigned_end\n\t"
2250 ".Lamd64_less_unsigned_true:\n\t"
2252 ".Lamd64_less_unsigned_end:\n\t"
2253 "lea 0x8(%rsp),%rsp");
2257 amd64_emit_ref (int size
)
2262 EMIT_ASM (amd64_ref1
,
2266 EMIT_ASM (amd64_ref2
,
2270 EMIT_ASM (amd64_ref4
,
2271 "movl (%rax),%eax");
2274 EMIT_ASM (amd64_ref8
,
2275 "movq (%rax),%rax");
2281 amd64_emit_if_goto (int *offset_p
, int *size_p
)
2283 EMIT_ASM (amd64_if_goto
,
2287 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2295 amd64_emit_goto (int *offset_p
, int *size_p
)
2297 EMIT_ASM (amd64_goto
,
2298 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2306 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2308 int diff
= (to
- (from
+ size
));
2309 unsigned char buf
[sizeof (int)];
2317 memcpy (buf
, &diff
, sizeof (int));
2318 write_inferior_memory (from
, buf
, sizeof (int));
2322 amd64_emit_const (LONGEST num
)
2324 unsigned char buf
[16];
2326 CORE_ADDR buildaddr
= current_insn_ptr
;
2329 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
2330 memcpy (&buf
[i
], &num
, sizeof (num
));
2332 append_insns (&buildaddr
, i
, buf
);
2333 current_insn_ptr
= buildaddr
;
2337 amd64_emit_call (CORE_ADDR fn
)
2339 unsigned char buf
[16];
2341 CORE_ADDR buildaddr
;
2344 /* The destination function being in the shared library, may be
2345 >31-bits away off the compiled code pad. */
2347 buildaddr
= current_insn_ptr
;
2349 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
2353 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
2355 /* Offset is too large for a call. Use callq, but that requires
2356 a register, so avoid it if possible. Use r10, since it is
2357 call-clobbered, we don't have to push/pop it. */
2358 buf
[i
++] = 0x48; /* mov $fn,%r10 */
2360 memcpy (buf
+ i
, &fn
, 8);
2362 buf
[i
++] = 0xff; /* callq *%r10 */
2367 int offset32
= offset64
; /* we know we can't overflow here. */
2368 memcpy (buf
+ i
, &offset32
, 4);
2372 append_insns (&buildaddr
, i
, buf
);
2373 current_insn_ptr
= buildaddr
;
2377 amd64_emit_reg (int reg
)
2379 unsigned char buf
[16];
2381 CORE_ADDR buildaddr
;
2383 /* Assume raw_regs is still in %rdi. */
2384 buildaddr
= current_insn_ptr
;
2386 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
2387 memcpy (&buf
[i
], ®
, sizeof (reg
));
2389 append_insns (&buildaddr
, i
, buf
);
2390 current_insn_ptr
= buildaddr
;
2391 amd64_emit_call (get_raw_reg_func_addr ());
2395 amd64_emit_pop (void)
2397 EMIT_ASM (amd64_pop
,
2402 amd64_emit_stack_flush (void)
2404 EMIT_ASM (amd64_stack_flush
,
2409 amd64_emit_zero_ext (int arg
)
2414 EMIT_ASM (amd64_zero_ext_8
,
2418 EMIT_ASM (amd64_zero_ext_16
,
2419 "and $0xffff,%rax");
2422 EMIT_ASM (amd64_zero_ext_32
,
2423 "mov $0xffffffff,%rcx\n\t"
2432 amd64_emit_swap (void)
2434 EMIT_ASM (amd64_swap
,
2441 amd64_emit_stack_adjust (int n
)
2443 unsigned char buf
[16];
2445 CORE_ADDR buildaddr
= current_insn_ptr
;
2448 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2452 /* This only handles adjustments up to 16, but we don't expect any more. */
2454 append_insns (&buildaddr
, i
, buf
);
2455 current_insn_ptr
= buildaddr
;
2458 /* FN's prototype is `LONGEST(*fn)(int)'. */
2461 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2463 unsigned char buf
[16];
2465 CORE_ADDR buildaddr
;
2467 buildaddr
= current_insn_ptr
;
2469 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2470 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2472 append_insns (&buildaddr
, i
, buf
);
2473 current_insn_ptr
= buildaddr
;
2474 amd64_emit_call (fn
);
2477 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2480 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2482 unsigned char buf
[16];
2484 CORE_ADDR buildaddr
;
2486 buildaddr
= current_insn_ptr
;
2488 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2489 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2491 append_insns (&buildaddr
, i
, buf
);
2492 current_insn_ptr
= buildaddr
;
2493 EMIT_ASM (amd64_void_call_2_a
,
2494 /* Save away a copy of the stack top. */
2496 /* Also pass top as the second argument. */
2498 amd64_emit_call (fn
);
2499 EMIT_ASM (amd64_void_call_2_b
,
2500 /* Restore the stack top, %rax may have been trashed. */
2505 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2508 "cmp %rax,(%rsp)\n\t"
2509 "jne .Lamd64_eq_fallthru\n\t"
2510 "lea 0x8(%rsp),%rsp\n\t"
2512 /* jmp, but don't trust the assembler to choose the right jump */
2513 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2514 ".Lamd64_eq_fallthru:\n\t"
2515 "lea 0x8(%rsp),%rsp\n\t"
2525 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2528 "cmp %rax,(%rsp)\n\t"
2529 "je .Lamd64_ne_fallthru\n\t"
2530 "lea 0x8(%rsp),%rsp\n\t"
2532 /* jmp, but don't trust the assembler to choose the right jump */
2533 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2534 ".Lamd64_ne_fallthru:\n\t"
2535 "lea 0x8(%rsp),%rsp\n\t"
2545 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2548 "cmp %rax,(%rsp)\n\t"
2549 "jnl .Lamd64_lt_fallthru\n\t"
2550 "lea 0x8(%rsp),%rsp\n\t"
2552 /* jmp, but don't trust the assembler to choose the right jump */
2553 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2554 ".Lamd64_lt_fallthru:\n\t"
2555 "lea 0x8(%rsp),%rsp\n\t"
2565 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2568 "cmp %rax,(%rsp)\n\t"
2569 "jnle .Lamd64_le_fallthru\n\t"
2570 "lea 0x8(%rsp),%rsp\n\t"
2572 /* jmp, but don't trust the assembler to choose the right jump */
2573 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2574 ".Lamd64_le_fallthru:\n\t"
2575 "lea 0x8(%rsp),%rsp\n\t"
2585 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2588 "cmp %rax,(%rsp)\n\t"
2589 "jng .Lamd64_gt_fallthru\n\t"
2590 "lea 0x8(%rsp),%rsp\n\t"
2592 /* jmp, but don't trust the assembler to choose the right jump */
2593 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2594 ".Lamd64_gt_fallthru:\n\t"
2595 "lea 0x8(%rsp),%rsp\n\t"
2605 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2608 "cmp %rax,(%rsp)\n\t"
2609 "jnge .Lamd64_ge_fallthru\n\t"
2610 ".Lamd64_ge_jump:\n\t"
2611 "lea 0x8(%rsp),%rsp\n\t"
2613 /* jmp, but don't trust the assembler to choose the right jump */
2614 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2615 ".Lamd64_ge_fallthru:\n\t"
2616 "lea 0x8(%rsp),%rsp\n\t"
2625 struct emit_ops amd64_emit_ops
=
2627 amd64_emit_prologue
,
2628 amd64_emit_epilogue
,
2633 amd64_emit_rsh_signed
,
2634 amd64_emit_rsh_unsigned
,
2642 amd64_emit_less_signed
,
2643 amd64_emit_less_unsigned
,
2647 amd64_write_goto_address
,
2652 amd64_emit_stack_flush
,
2653 amd64_emit_zero_ext
,
2655 amd64_emit_stack_adjust
,
2656 amd64_emit_int_call_1
,
2657 amd64_emit_void_call_2
,
2666 #endif /* __x86_64__ */
2669 i386_emit_prologue (void)
2671 EMIT_ASM32 (i386_prologue
,
2675 /* At this point, the raw regs base address is at 8(%ebp), and the
2676 value pointer is at 12(%ebp). */
2680 i386_emit_epilogue (void)
2682 EMIT_ASM32 (i386_epilogue
,
2683 "mov 12(%ebp),%ecx\n\t"
2684 "mov %eax,(%ecx)\n\t"
2685 "mov %ebx,0x4(%ecx)\n\t"
2693 i386_emit_add (void)
2695 EMIT_ASM32 (i386_add
,
2696 "add (%esp),%eax\n\t"
2697 "adc 0x4(%esp),%ebx\n\t"
2698 "lea 0x8(%esp),%esp");
2702 i386_emit_sub (void)
2704 EMIT_ASM32 (i386_sub
,
2705 "subl %eax,(%esp)\n\t"
2706 "sbbl %ebx,4(%esp)\n\t"
2712 i386_emit_mul (void)
2718 i386_emit_lsh (void)
2724 i386_emit_rsh_signed (void)
2730 i386_emit_rsh_unsigned (void)
2736 i386_emit_ext (int arg
)
2741 EMIT_ASM32 (i386_ext_8
,
2744 "movl %eax,%ebx\n\t"
2748 EMIT_ASM32 (i386_ext_16
,
2750 "movl %eax,%ebx\n\t"
2754 EMIT_ASM32 (i386_ext_32
,
2755 "movl %eax,%ebx\n\t"
2764 i386_emit_log_not (void)
2766 EMIT_ASM32 (i386_log_not
,
2768 "test %eax,%eax\n\t"
2775 i386_emit_bit_and (void)
2777 EMIT_ASM32 (i386_and
,
2778 "and (%esp),%eax\n\t"
2779 "and 0x4(%esp),%ebx\n\t"
2780 "lea 0x8(%esp),%esp");
2784 i386_emit_bit_or (void)
2786 EMIT_ASM32 (i386_or
,
2787 "or (%esp),%eax\n\t"
2788 "or 0x4(%esp),%ebx\n\t"
2789 "lea 0x8(%esp),%esp");
2793 i386_emit_bit_xor (void)
2795 EMIT_ASM32 (i386_xor
,
2796 "xor (%esp),%eax\n\t"
2797 "xor 0x4(%esp),%ebx\n\t"
2798 "lea 0x8(%esp),%esp");
2802 i386_emit_bit_not (void)
2804 EMIT_ASM32 (i386_bit_not
,
2805 "xor $0xffffffff,%eax\n\t"
2806 "xor $0xffffffff,%ebx\n\t");
2810 i386_emit_equal (void)
2812 EMIT_ASM32 (i386_equal
,
2813 "cmpl %ebx,4(%esp)\n\t"
2814 "jne .Li386_equal_false\n\t"
2815 "cmpl %eax,(%esp)\n\t"
2816 "je .Li386_equal_true\n\t"
2817 ".Li386_equal_false:\n\t"
2819 "jmp .Li386_equal_end\n\t"
2820 ".Li386_equal_true:\n\t"
2822 ".Li386_equal_end:\n\t"
2824 "lea 0x8(%esp),%esp");
2828 i386_emit_less_signed (void)
2830 EMIT_ASM32 (i386_less_signed
,
2831 "cmpl %ebx,4(%esp)\n\t"
2832 "jl .Li386_less_signed_true\n\t"
2833 "jne .Li386_less_signed_false\n\t"
2834 "cmpl %eax,(%esp)\n\t"
2835 "jl .Li386_less_signed_true\n\t"
2836 ".Li386_less_signed_false:\n\t"
2838 "jmp .Li386_less_signed_end\n\t"
2839 ".Li386_less_signed_true:\n\t"
2841 ".Li386_less_signed_end:\n\t"
2843 "lea 0x8(%esp),%esp");
2847 i386_emit_less_unsigned (void)
2849 EMIT_ASM32 (i386_less_unsigned
,
2850 "cmpl %ebx,4(%esp)\n\t"
2851 "jb .Li386_less_unsigned_true\n\t"
2852 "jne .Li386_less_unsigned_false\n\t"
2853 "cmpl %eax,(%esp)\n\t"
2854 "jb .Li386_less_unsigned_true\n\t"
2855 ".Li386_less_unsigned_false:\n\t"
2857 "jmp .Li386_less_unsigned_end\n\t"
2858 ".Li386_less_unsigned_true:\n\t"
2860 ".Li386_less_unsigned_end:\n\t"
2862 "lea 0x8(%esp),%esp");
2866 i386_emit_ref (int size
)
2871 EMIT_ASM32 (i386_ref1
,
2875 EMIT_ASM32 (i386_ref2
,
2879 EMIT_ASM32 (i386_ref4
,
2880 "movl (%eax),%eax");
2883 EMIT_ASM32 (i386_ref8
,
2884 "movl 4(%eax),%ebx\n\t"
2885 "movl (%eax),%eax");
2891 i386_emit_if_goto (int *offset_p
, int *size_p
)
2893 EMIT_ASM32 (i386_if_goto
,
2899 /* Don't trust the assembler to choose the right jump */
2900 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2903 *offset_p
= 11; /* be sure that this matches the sequence above */
2909 i386_emit_goto (int *offset_p
, int *size_p
)
2911 EMIT_ASM32 (i386_goto
,
2912 /* Don't trust the assembler to choose the right jump */
2913 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2921 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2923 int diff
= (to
- (from
+ size
));
2924 unsigned char buf
[sizeof (int)];
2926 /* We're only doing 4-byte sizes at the moment. */
2933 memcpy (buf
, &diff
, sizeof (int));
2934 write_inferior_memory (from
, buf
, sizeof (int));
2938 i386_emit_const (LONGEST num
)
2940 unsigned char buf
[16];
2942 CORE_ADDR buildaddr
= current_insn_ptr
;
2945 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2946 lo
= num
& 0xffffffff;
2947 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2949 hi
= ((num
>> 32) & 0xffffffff);
2952 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2953 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2958 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2960 append_insns (&buildaddr
, i
, buf
);
2961 current_insn_ptr
= buildaddr
;
2965 i386_emit_call (CORE_ADDR fn
)
2967 unsigned char buf
[16];
2969 CORE_ADDR buildaddr
;
2971 buildaddr
= current_insn_ptr
;
2973 buf
[i
++] = 0xe8; /* call <reladdr> */
2974 offset
= ((int) fn
) - (buildaddr
+ 5);
2975 memcpy (buf
+ 1, &offset
, 4);
2976 append_insns (&buildaddr
, 5, buf
);
2977 current_insn_ptr
= buildaddr
;
2981 i386_emit_reg (int reg
)
2983 unsigned char buf
[16];
2985 CORE_ADDR buildaddr
;
2987 EMIT_ASM32 (i386_reg_a
,
2989 buildaddr
= current_insn_ptr
;
2991 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2992 memcpy (&buf
[i
], ®
, sizeof (reg
));
2994 append_insns (&buildaddr
, i
, buf
);
2995 current_insn_ptr
= buildaddr
;
2996 EMIT_ASM32 (i386_reg_b
,
2997 "mov %eax,4(%esp)\n\t"
2998 "mov 8(%ebp),%eax\n\t"
3000 i386_emit_call (get_raw_reg_func_addr ());
3001 EMIT_ASM32 (i386_reg_c
,
3003 "lea 0x8(%esp),%esp");
3007 i386_emit_pop (void)
3009 EMIT_ASM32 (i386_pop
,
3015 i386_emit_stack_flush (void)
3017 EMIT_ASM32 (i386_stack_flush
,
3023 i386_emit_zero_ext (int arg
)
3028 EMIT_ASM32 (i386_zero_ext_8
,
3029 "and $0xff,%eax\n\t"
3033 EMIT_ASM32 (i386_zero_ext_16
,
3034 "and $0xffff,%eax\n\t"
3038 EMIT_ASM32 (i386_zero_ext_32
,
3047 i386_emit_swap (void)
3049 EMIT_ASM32 (i386_swap
,
3059 i386_emit_stack_adjust (int n
)
3061 unsigned char buf
[16];
3063 CORE_ADDR buildaddr
= current_insn_ptr
;
3066 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
3070 append_insns (&buildaddr
, i
, buf
);
3071 current_insn_ptr
= buildaddr
;
3074 /* FN's prototype is `LONGEST(*fn)(int)'. */
3077 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
3079 unsigned char buf
[16];
3081 CORE_ADDR buildaddr
;
3083 EMIT_ASM32 (i386_int_call_1_a
,
3084 /* Reserve a bit of stack space. */
3086 /* Put the one argument on the stack. */
3087 buildaddr
= current_insn_ptr
;
3089 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3092 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3094 append_insns (&buildaddr
, i
, buf
);
3095 current_insn_ptr
= buildaddr
;
3096 i386_emit_call (fn
);
3097 EMIT_ASM32 (i386_int_call_1_c
,
3099 "lea 0x8(%esp),%esp");
3102 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3105 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
3107 unsigned char buf
[16];
3109 CORE_ADDR buildaddr
;
3111 EMIT_ASM32 (i386_void_call_2_a
,
3112 /* Preserve %eax only; we don't have to worry about %ebx. */
3114 /* Reserve a bit of stack space for arguments. */
3115 "sub $0x10,%esp\n\t"
3116 /* Copy "top" to the second argument position. (Note that
3117 we can't assume function won't scribble on its
3118 arguments, so don't try to restore from this.) */
3119 "mov %eax,4(%esp)\n\t"
3120 "mov %ebx,8(%esp)");
3121 /* Put the first argument on the stack. */
3122 buildaddr
= current_insn_ptr
;
3124 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3127 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3129 append_insns (&buildaddr
, i
, buf
);
3130 current_insn_ptr
= buildaddr
;
3131 i386_emit_call (fn
);
3132 EMIT_ASM32 (i386_void_call_2_b
,
3133 "lea 0x10(%esp),%esp\n\t"
3134 /* Restore original stack top. */
3140 i386_emit_eq_goto (int *offset_p
, int *size_p
)
3143 /* Check low half first, more likely to be decider */
3144 "cmpl %eax,(%esp)\n\t"
3145 "jne .Leq_fallthru\n\t"
3146 "cmpl %ebx,4(%esp)\n\t"
3147 "jne .Leq_fallthru\n\t"
3148 "lea 0x8(%esp),%esp\n\t"
3151 /* jmp, but don't trust the assembler to choose the right jump */
3152 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3153 ".Leq_fallthru:\n\t"
3154 "lea 0x8(%esp),%esp\n\t"
3165 i386_emit_ne_goto (int *offset_p
, int *size_p
)
3168 /* Check low half first, more likely to be decider */
3169 "cmpl %eax,(%esp)\n\t"
3171 "cmpl %ebx,4(%esp)\n\t"
3172 "je .Lne_fallthru\n\t"
3174 "lea 0x8(%esp),%esp\n\t"
3177 /* jmp, but don't trust the assembler to choose the right jump */
3178 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3179 ".Lne_fallthru:\n\t"
3180 "lea 0x8(%esp),%esp\n\t"
3191 i386_emit_lt_goto (int *offset_p
, int *size_p
)
3194 "cmpl %ebx,4(%esp)\n\t"
3196 "jne .Llt_fallthru\n\t"
3197 "cmpl %eax,(%esp)\n\t"
3198 "jnl .Llt_fallthru\n\t"
3200 "lea 0x8(%esp),%esp\n\t"
3203 /* jmp, but don't trust the assembler to choose the right jump */
3204 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3205 ".Llt_fallthru:\n\t"
3206 "lea 0x8(%esp),%esp\n\t"
3217 i386_emit_le_goto (int *offset_p
, int *size_p
)
3220 "cmpl %ebx,4(%esp)\n\t"
3222 "jne .Lle_fallthru\n\t"
3223 "cmpl %eax,(%esp)\n\t"
3224 "jnle .Lle_fallthru\n\t"
3226 "lea 0x8(%esp),%esp\n\t"
3229 /* jmp, but don't trust the assembler to choose the right jump */
3230 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3231 ".Lle_fallthru:\n\t"
3232 "lea 0x8(%esp),%esp\n\t"
3243 i386_emit_gt_goto (int *offset_p
, int *size_p
)
3246 "cmpl %ebx,4(%esp)\n\t"
3248 "jne .Lgt_fallthru\n\t"
3249 "cmpl %eax,(%esp)\n\t"
3250 "jng .Lgt_fallthru\n\t"
3252 "lea 0x8(%esp),%esp\n\t"
3255 /* jmp, but don't trust the assembler to choose the right jump */
3256 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3257 ".Lgt_fallthru:\n\t"
3258 "lea 0x8(%esp),%esp\n\t"
3269 i386_emit_ge_goto (int *offset_p
, int *size_p
)
3272 "cmpl %ebx,4(%esp)\n\t"
3274 "jne .Lge_fallthru\n\t"
3275 "cmpl %eax,(%esp)\n\t"
3276 "jnge .Lge_fallthru\n\t"
3278 "lea 0x8(%esp),%esp\n\t"
3281 /* jmp, but don't trust the assembler to choose the right jump */
3282 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3283 ".Lge_fallthru:\n\t"
3284 "lea 0x8(%esp),%esp\n\t"
3294 struct emit_ops i386_emit_ops
=
3302 i386_emit_rsh_signed
,
3303 i386_emit_rsh_unsigned
,
3311 i386_emit_less_signed
,
3312 i386_emit_less_unsigned
,
3316 i386_write_goto_address
,
3321 i386_emit_stack_flush
,
3324 i386_emit_stack_adjust
,
3325 i386_emit_int_call_1
,
3326 i386_emit_void_call_2
,
3336 static struct emit_ops
*
3340 if (is_64bit_tdesc ())
3341 return &amd64_emit_ops
;
3344 return &i386_emit_ops
;
3348 x86_supports_range_stepping (void)
3353 /* This is initialized assuming an amd64 target.
3354 x86_arch_setup will correct it for i386 or amd64 targets. */
3356 struct linux_target_ops the_low_target
=
3359 x86_linux_regs_info
,
3360 x86_cannot_fetch_register
,
3361 x86_cannot_store_register
,
3362 NULL
, /* fetch_register */
3372 x86_stopped_by_watchpoint
,
3373 x86_stopped_data_address
,
3374 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3375 native i386 case (no registers smaller than an xfer unit), and are not
3376 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3379 /* need to fix up i386 siginfo if host is amd64 */
3381 x86_linux_new_process
,
3382 x86_linux_new_thread
,
3383 x86_linux_prepare_to_resume
,
3384 x86_linux_process_qsupported
,
3385 x86_supports_tracepoints
,
3386 x86_get_thread_area
,
3387 x86_install_fast_tracepoint_jump_pad
,
3389 x86_get_min_fast_tracepoint_insn_len
,
3390 x86_supports_range_stepping
,
3394 initialize_low_arch (void)
3396 /* Initialize the Linux target descriptions. */
3398 init_registers_amd64_linux ();
3399 init_registers_amd64_avx_linux ();
3400 init_registers_amd64_mpx_linux ();
3402 init_registers_x32_linux ();
3403 init_registers_x32_avx_linux ();
3405 tdesc_amd64_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3406 copy_target_description (tdesc_amd64_linux_no_xml
, tdesc_amd64_linux
);
3407 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
3409 init_registers_i386_linux ();
3410 init_registers_i386_mmx_linux ();
3411 init_registers_i386_avx_linux ();
3412 init_registers_i386_mpx_linux ();
3414 tdesc_i386_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3415 copy_target_description (tdesc_i386_linux_no_xml
, tdesc_i386_linux
);
3416 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3418 initialize_regsets_info (&x86_regsets_info
);