1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2015 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "x86-xstate.h"
29 #include "gdb_proc_service.h"
30 /* Don't include elf/common.h if linux/elf.h got included by
31 gdb_proc_service.h. */
33 #include "elf/common.h"
38 #include "tracepoint.h"
40 #include "nat/linux-nat.h"
43 /* Defined in auto-generated file amd64-linux.c. */
44 void init_registers_amd64_linux (void);
45 extern const struct target_desc
*tdesc_amd64_linux
;
47 /* Defined in auto-generated file amd64-avx-linux.c. */
48 void init_registers_amd64_avx_linux (void);
49 extern const struct target_desc
*tdesc_amd64_avx_linux
;
51 /* Defined in auto-generated file amd64-avx512-linux.c. */
52 void init_registers_amd64_avx512_linux (void);
53 extern const struct target_desc
*tdesc_amd64_avx512_linux
;
55 /* Defined in auto-generated file amd64-mpx-linux.c. */
56 void init_registers_amd64_mpx_linux (void);
57 extern const struct target_desc
*tdesc_amd64_mpx_linux
;
59 /* Defined in auto-generated file x32-linux.c. */
60 void init_registers_x32_linux (void);
61 extern const struct target_desc
*tdesc_x32_linux
;
63 /* Defined in auto-generated file x32-avx-linux.c. */
64 void init_registers_x32_avx_linux (void);
65 extern const struct target_desc
*tdesc_x32_avx_linux
;
67 /* Defined in auto-generated file x32-avx512-linux.c. */
68 void init_registers_x32_avx512_linux (void);
69 extern const struct target_desc
*tdesc_x32_avx512_linux
;
73 /* Defined in auto-generated file i386-linux.c. */
74 void init_registers_i386_linux (void);
75 extern const struct target_desc
*tdesc_i386_linux
;
77 /* Defined in auto-generated file i386-mmx-linux.c. */
78 void init_registers_i386_mmx_linux (void);
79 extern const struct target_desc
*tdesc_i386_mmx_linux
;
81 /* Defined in auto-generated file i386-avx-linux.c. */
82 void init_registers_i386_avx_linux (void);
83 extern const struct target_desc
*tdesc_i386_avx_linux
;
85 /* Defined in auto-generated file i386-avx512-linux.c. */
86 void init_registers_i386_avx512_linux (void);
87 extern const struct target_desc
*tdesc_i386_avx512_linux
;
89 /* Defined in auto-generated file i386-mpx-linux.c. */
90 void init_registers_i386_mpx_linux (void);
91 extern const struct target_desc
*tdesc_i386_mpx_linux
;
94 static struct target_desc
*tdesc_amd64_linux_no_xml
;
96 static struct target_desc
*tdesc_i386_linux_no_xml
;
99 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
100 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
102 /* Backward compatibility for gdb without XML support. */
104 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
105 <architecture>i386</architecture>\
106 <osabi>GNU/Linux</osabi>\
110 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
111 <architecture>i386:x86-64</architecture>\
112 <osabi>GNU/Linux</osabi>\
117 #include <sys/procfs.h>
118 #include <sys/ptrace.h>
121 #ifndef PTRACE_GETREGSET
122 #define PTRACE_GETREGSET 0x4204
125 #ifndef PTRACE_SETREGSET
126 #define PTRACE_SETREGSET 0x4205
130 #ifndef PTRACE_GET_THREAD_AREA
131 #define PTRACE_GET_THREAD_AREA 25
134 /* This definition comes from prctl.h, but some kernels may not have it. */
135 #ifndef PTRACE_ARCH_PRCTL
136 #define PTRACE_ARCH_PRCTL 30
139 /* The following definitions come from prctl.h, but may be absent
140 for certain configurations. */
142 #define ARCH_SET_GS 0x1001
143 #define ARCH_SET_FS 0x1002
144 #define ARCH_GET_FS 0x1003
145 #define ARCH_GET_GS 0x1004
148 /* Per-process arch-specific data we want to keep. */
150 struct arch_process_info
152 struct x86_debug_reg_state debug_reg_state
;
155 /* Per-thread arch-specific data we want to keep. */
159 /* Non-zero if our copy differs from what's recorded in the thread. */
160 int debug_registers_changed
;
165 /* Mapping between the general-purpose registers in `struct user'
166 format and GDB's register array layout.
167 Note that the transfer layout uses 64-bit regs. */
168 static /*const*/ int i386_regmap
[] =
170 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
171 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
172 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
173 DS
* 8, ES
* 8, FS
* 8, GS
* 8
176 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
178 /* So code below doesn't have to care, i386 or amd64. */
179 #define ORIG_EAX ORIG_RAX
182 static const int x86_64_regmap
[] =
184 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
185 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
186 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
187 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
188 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
189 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
190 -1, -1, -1, -1, -1, -1, -1, -1,
191 -1, -1, -1, -1, -1, -1, -1, -1,
192 -1, -1, -1, -1, -1, -1, -1, -1,
194 -1, -1, -1, -1, -1, -1, -1, -1,
196 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
197 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
198 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
199 -1, -1, -1, -1, -1, -1, -1, -1,
200 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
201 -1, -1, -1, -1, -1, -1, -1, -1,
202 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
203 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
204 -1, -1, -1, -1, -1, -1, -1, -1,
205 -1, -1, -1, -1, -1, -1, -1, -1,
206 -1, -1, -1, -1, -1, -1, -1, -1
209 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
210 #define X86_64_USER_REGS (GS + 1)
212 #else /* ! __x86_64__ */
214 /* Mapping between the general-purpose registers in `struct user'
215 format and GDB's register array layout. */
216 static /*const*/ int i386_regmap
[] =
218 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
219 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
220 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
221 DS
* 4, ES
* 4, FS
* 4, GS
* 4
224 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
232 /* Returns true if the current inferior belongs to a x86-64 process,
236 is_64bit_tdesc (void)
238 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
240 return register_size (regcache
->tdesc
, 0) == 8;
246 /* Called by libthread_db. */
249 ps_get_thread_area (const struct ps_prochandle
*ph
,
250 lwpid_t lwpid
, int idx
, void **base
)
253 int use_64bit
= is_64bit_tdesc ();
260 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
264 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
275 unsigned int desc
[4];
277 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
278 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
281 /* Ensure we properly extend the value to 64-bits for x86_64. */
282 *base
= (void *) (uintptr_t) desc
[1];
287 /* Get the thread area address. This is used to recognize which
288 thread is which when tracing with the in-process agent library. We
289 don't read anything from the address, and treat it as opaque; it's
290 the address itself that we assume is unique per-thread. */
293 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
296 int use_64bit
= is_64bit_tdesc ();
301 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
303 *addr
= (CORE_ADDR
) (uintptr_t) base
;
312 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
313 struct thread_info
*thr
= get_lwp_thread (lwp
);
314 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
315 unsigned int desc
[4];
317 const int reg_thread_area
= 3; /* bits to scale down register value. */
320 collect_register_by_name (regcache
, "gs", &gs
);
322 idx
= gs
>> reg_thread_area
;
324 if (ptrace (PTRACE_GET_THREAD_AREA
,
326 (void *) (long) idx
, (unsigned long) &desc
) < 0)
337 x86_cannot_store_register (int regno
)
340 if (is_64bit_tdesc ())
344 return regno
>= I386_NUM_REGS
;
348 x86_cannot_fetch_register (int regno
)
351 if (is_64bit_tdesc ())
355 return regno
>= I386_NUM_REGS
;
359 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
364 if (register_size (regcache
->tdesc
, 0) == 8)
366 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
367 if (x86_64_regmap
[i
] != -1)
368 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
372 /* 32-bit inferior registers need to be zero-extended.
373 Callers would read uninitialized memory otherwise. */
374 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
377 for (i
= 0; i
< I386_NUM_REGS
; i
++)
378 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
380 collect_register_by_name (regcache
, "orig_eax",
381 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
385 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
390 if (register_size (regcache
->tdesc
, 0) == 8)
392 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
393 if (x86_64_regmap
[i
] != -1)
394 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
399 for (i
= 0; i
< I386_NUM_REGS
; i
++)
400 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
402 supply_register_by_name (regcache
, "orig_eax",
403 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
407 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
410 i387_cache_to_fxsave (regcache
, buf
);
412 i387_cache_to_fsave (regcache
, buf
);
417 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
420 i387_fxsave_to_cache (regcache
, buf
);
422 i387_fsave_to_cache (regcache
, buf
);
429 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
431 i387_cache_to_fxsave (regcache
, buf
);
435 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
437 i387_fxsave_to_cache (regcache
, buf
);
443 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
445 i387_cache_to_xsave (regcache
, buf
);
449 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
451 i387_xsave_to_cache (regcache
, buf
);
454 /* ??? The non-biarch i386 case stores all the i387 regs twice.
455 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
456 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
457 doesn't work. IWBN to avoid the duplication in the case where it
458 does work. Maybe the arch_setup routine could check whether it works
459 and update the supported regsets accordingly. */
461 static struct regset_info x86_regsets
[] =
463 #ifdef HAVE_PTRACE_GETREGS
464 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
466 x86_fill_gregset
, x86_store_gregset
},
467 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
468 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
470 # ifdef HAVE_PTRACE_GETFPXREGS
471 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
473 x86_fill_fpxregset
, x86_store_fpxregset
},
476 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
478 x86_fill_fpregset
, x86_store_fpregset
},
479 #endif /* HAVE_PTRACE_GETREGS */
480 { 0, 0, 0, -1, -1, NULL
, NULL
}
484 x86_get_pc (struct regcache
*regcache
)
486 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
491 collect_register_by_name (regcache
, "rip", &pc
);
492 return (CORE_ADDR
) pc
;
497 collect_register_by_name (regcache
, "eip", &pc
);
498 return (CORE_ADDR
) pc
;
503 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
505 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
509 unsigned long newpc
= pc
;
510 supply_register_by_name (regcache
, "rip", &newpc
);
514 unsigned int newpc
= pc
;
515 supply_register_by_name (regcache
, "eip", &newpc
);
519 static const unsigned char x86_breakpoint
[] = { 0xCC };
520 #define x86_breakpoint_len 1
523 x86_breakpoint_at (CORE_ADDR pc
)
527 (*the_target
->read_memory
) (pc
, &c
, 1);
535 /* Return the offset of REGNUM in the u_debugreg field of struct
539 u_debugreg_offset (int regnum
)
541 return (offsetof (struct user
, u_debugreg
)
542 + sizeof (((struct user
*) 0)->u_debugreg
[0]) * regnum
);
546 /* Support for debug registers. */
549 x86_linux_dr_get (ptid_t ptid
, int regnum
)
554 tid
= ptid_get_lwp (ptid
);
557 value
= ptrace (PTRACE_PEEKUSER
, tid
, u_debugreg_offset (regnum
), 0);
559 error ("Couldn't read debug register");
565 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
569 tid
= ptid_get_lwp (ptid
);
572 ptrace (PTRACE_POKEUSER
, tid
, u_debugreg_offset (regnum
), value
);
574 error ("Couldn't write debug register");
578 update_debug_registers_callback (struct lwp_info
*lwp
, void *arg
)
580 /* The actual update is done later just before resuming the lwp,
581 we just mark that the registers need updating. */
582 lwp
->arch_private
->debug_registers_changed
= 1;
584 /* If the lwp isn't stopped, force it to momentarily pause, so
585 we can update its debug registers. */
587 linux_stop_lwp (lwp
);
592 /* Update the inferior's debug register REGNUM from STATE. */
595 x86_dr_low_set_addr (int regnum
, CORE_ADDR addr
)
597 /* Only update the threads of this process. */
598 ptid_t pid_ptid
= pid_to_ptid (ptid_get_pid (current_lwp_ptid ()));
600 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
602 iterate_over_lwps (pid_ptid
, update_debug_registers_callback
, NULL
);
605 /* Return the inferior's debug register REGNUM. */
608 x86_dr_low_get_addr (int regnum
)
610 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
612 return x86_linux_dr_get (current_lwp_ptid (), regnum
);
615 /* Update the inferior's DR7 debug control register from STATE. */
618 x86_dr_low_set_control (unsigned long control
)
620 /* Only update the threads of this process. */
621 ptid_t pid_ptid
= pid_to_ptid (ptid_get_pid (current_lwp_ptid ()));
623 iterate_over_lwps (pid_ptid
, update_debug_registers_callback
, NULL
);
626 /* Return the inferior's DR7 debug control register. */
629 x86_dr_low_get_control (void)
631 return x86_linux_dr_get (current_lwp_ptid (), DR_CONTROL
);
634 /* Get the value of the DR6 debug status register from the inferior
635 and record it in STATE. */
638 x86_dr_low_get_status (void)
640 return x86_linux_dr_get (current_lwp_ptid (), DR_STATUS
);
643 /* Low-level function vector. */
644 struct x86_dr_low_type x86_dr_low
=
646 x86_dr_low_set_control
,
649 x86_dr_low_get_status
,
650 x86_dr_low_get_control
,
654 /* Breakpoint/Watchpoint support. */
657 x86_supports_z_point_type (char z_type
)
663 case Z_PACKET_WRITE_WP
:
664 case Z_PACKET_ACCESS_WP
:
672 x86_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
673 int size
, struct raw_breakpoint
*bp
)
675 struct process_info
*proc
= current_process ();
679 case raw_bkpt_type_sw
:
680 return insert_memory_breakpoint (bp
);
682 case raw_bkpt_type_hw
:
683 case raw_bkpt_type_write_wp
:
684 case raw_bkpt_type_access_wp
:
686 enum target_hw_bp_type hw_type
687 = raw_bkpt_type_to_target_hw_bp_type (type
);
688 struct x86_debug_reg_state
*state
689 = &proc
->priv
->arch_private
->debug_reg_state
;
691 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
701 x86_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
702 int size
, struct raw_breakpoint
*bp
)
704 struct process_info
*proc
= current_process ();
708 case raw_bkpt_type_sw
:
709 return remove_memory_breakpoint (bp
);
711 case raw_bkpt_type_hw
:
712 case raw_bkpt_type_write_wp
:
713 case raw_bkpt_type_access_wp
:
715 enum target_hw_bp_type hw_type
716 = raw_bkpt_type_to_target_hw_bp_type (type
);
717 struct x86_debug_reg_state
*state
718 = &proc
->priv
->arch_private
->debug_reg_state
;
720 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
729 x86_stopped_by_watchpoint (void)
731 struct process_info
*proc
= current_process ();
732 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
736 x86_stopped_data_address (void)
738 struct process_info
*proc
= current_process ();
740 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
746 /* Called when a new process is created. */
748 static struct arch_process_info
*
749 x86_linux_new_process (void)
751 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
753 x86_low_init_dregs (&info
->debug_reg_state
);
758 /* Called when a new thread is detected. */
760 static struct arch_lwp_info
*
761 x86_linux_new_thread (void)
763 struct arch_lwp_info
*info
= XCNEW (struct arch_lwp_info
);
765 info
->debug_registers_changed
= 1;
770 /* See nat/x86-dregs.h. */
772 struct x86_debug_reg_state
*
773 x86_debug_reg_state (pid_t pid
)
775 struct process_info
*proc
= find_process_pid (pid
);
777 return &proc
->priv
->arch_private
->debug_reg_state
;
780 /* Called when resuming a thread.
781 If the debug regs have changed, update the thread's copies. */
784 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
786 ptid_t ptid
= ptid_of (get_lwp_thread (lwp
));
787 int clear_status
= 0;
789 if (lwp
->arch_private
->debug_registers_changed
)
791 struct x86_debug_reg_state
*state
792 = x86_debug_reg_state (ptid_get_pid (ptid
));
795 x86_linux_dr_set (ptid
, DR_CONTROL
, 0);
797 ALL_DEBUG_ADDRESS_REGISTERS (i
)
798 if (state
->dr_ref_count
[i
] > 0)
800 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
802 /* If we're setting a watchpoint, any change the inferior
803 had done itself to the debug registers needs to be
804 discarded, otherwise, x86_dr_stopped_data_address can
809 if (state
->dr_control_mirror
!= 0)
810 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
812 lwp
->arch_private
->debug_registers_changed
= 0;
815 if (clear_status
|| lwp
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
)
816 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
819 /* When GDBSERVER is built as a 64-bit application on linux, the
820 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
821 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
822 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
823 conversion in-place ourselves. */
825 /* These types below (compat_*) define a siginfo type that is layout
826 compatible with the siginfo type exported by the 32-bit userspace
831 typedef int compat_int_t
;
832 typedef unsigned int compat_uptr_t
;
834 typedef int compat_time_t
;
835 typedef int compat_timer_t
;
836 typedef int compat_clock_t
;
838 struct compat_timeval
840 compat_time_t tv_sec
;
844 typedef union compat_sigval
846 compat_int_t sival_int
;
847 compat_uptr_t sival_ptr
;
850 typedef struct compat_siginfo
858 int _pad
[((128 / sizeof (int)) - 3)];
867 /* POSIX.1b timers */
872 compat_sigval_t _sigval
;
875 /* POSIX.1b signals */
880 compat_sigval_t _sigval
;
889 compat_clock_t _utime
;
890 compat_clock_t _stime
;
893 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
908 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
909 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t
;
911 typedef struct compat_x32_siginfo
919 int _pad
[((128 / sizeof (int)) - 3)];
928 /* POSIX.1b timers */
933 compat_sigval_t _sigval
;
936 /* POSIX.1b signals */
941 compat_sigval_t _sigval
;
950 compat_x32_clock_t _utime
;
951 compat_x32_clock_t _stime
;
954 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
967 } compat_x32_siginfo_t
__attribute__ ((__aligned__ (8)));
969 #define cpt_si_pid _sifields._kill._pid
970 #define cpt_si_uid _sifields._kill._uid
971 #define cpt_si_timerid _sifields._timer._tid
972 #define cpt_si_overrun _sifields._timer._overrun
973 #define cpt_si_status _sifields._sigchld._status
974 #define cpt_si_utime _sifields._sigchld._utime
975 #define cpt_si_stime _sifields._sigchld._stime
976 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
977 #define cpt_si_addr _sifields._sigfault._addr
978 #define cpt_si_band _sifields._sigpoll._band
979 #define cpt_si_fd _sifields._sigpoll._fd
981 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
982 In their place is si_timer1,si_timer2. */
984 #define si_timerid si_timer1
987 #define si_overrun si_timer2
991 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
993 memset (to
, 0, sizeof (*to
));
995 to
->si_signo
= from
->si_signo
;
996 to
->si_errno
= from
->si_errno
;
997 to
->si_code
= from
->si_code
;
999 if (to
->si_code
== SI_TIMER
)
1001 to
->cpt_si_timerid
= from
->si_timerid
;
1002 to
->cpt_si_overrun
= from
->si_overrun
;
1003 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1005 else if (to
->si_code
== SI_USER
)
1007 to
->cpt_si_pid
= from
->si_pid
;
1008 to
->cpt_si_uid
= from
->si_uid
;
1010 else if (to
->si_code
< 0)
1012 to
->cpt_si_pid
= from
->si_pid
;
1013 to
->cpt_si_uid
= from
->si_uid
;
1014 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1018 switch (to
->si_signo
)
1021 to
->cpt_si_pid
= from
->si_pid
;
1022 to
->cpt_si_uid
= from
->si_uid
;
1023 to
->cpt_si_status
= from
->si_status
;
1024 to
->cpt_si_utime
= from
->si_utime
;
1025 to
->cpt_si_stime
= from
->si_stime
;
1031 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1034 to
->cpt_si_band
= from
->si_band
;
1035 to
->cpt_si_fd
= from
->si_fd
;
1038 to
->cpt_si_pid
= from
->si_pid
;
1039 to
->cpt_si_uid
= from
->si_uid
;
1040 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1047 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
1049 memset (to
, 0, sizeof (*to
));
1051 to
->si_signo
= from
->si_signo
;
1052 to
->si_errno
= from
->si_errno
;
1053 to
->si_code
= from
->si_code
;
1055 if (to
->si_code
== SI_TIMER
)
1057 to
->si_timerid
= from
->cpt_si_timerid
;
1058 to
->si_overrun
= from
->cpt_si_overrun
;
1059 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1061 else if (to
->si_code
== SI_USER
)
1063 to
->si_pid
= from
->cpt_si_pid
;
1064 to
->si_uid
= from
->cpt_si_uid
;
1066 else if (to
->si_code
< 0)
1068 to
->si_pid
= from
->cpt_si_pid
;
1069 to
->si_uid
= from
->cpt_si_uid
;
1070 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1074 switch (to
->si_signo
)
1077 to
->si_pid
= from
->cpt_si_pid
;
1078 to
->si_uid
= from
->cpt_si_uid
;
1079 to
->si_status
= from
->cpt_si_status
;
1080 to
->si_utime
= from
->cpt_si_utime
;
1081 to
->si_stime
= from
->cpt_si_stime
;
1087 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1090 to
->si_band
= from
->cpt_si_band
;
1091 to
->si_fd
= from
->cpt_si_fd
;
1094 to
->si_pid
= from
->cpt_si_pid
;
1095 to
->si_uid
= from
->cpt_si_uid
;
1096 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1103 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t
*to
,
1106 memset (to
, 0, sizeof (*to
));
1108 to
->si_signo
= from
->si_signo
;
1109 to
->si_errno
= from
->si_errno
;
1110 to
->si_code
= from
->si_code
;
1112 if (to
->si_code
== SI_TIMER
)
1114 to
->cpt_si_timerid
= from
->si_timerid
;
1115 to
->cpt_si_overrun
= from
->si_overrun
;
1116 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1118 else if (to
->si_code
== SI_USER
)
1120 to
->cpt_si_pid
= from
->si_pid
;
1121 to
->cpt_si_uid
= from
->si_uid
;
1123 else if (to
->si_code
< 0)
1125 to
->cpt_si_pid
= from
->si_pid
;
1126 to
->cpt_si_uid
= from
->si_uid
;
1127 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1131 switch (to
->si_signo
)
1134 to
->cpt_si_pid
= from
->si_pid
;
1135 to
->cpt_si_uid
= from
->si_uid
;
1136 to
->cpt_si_status
= from
->si_status
;
1137 to
->cpt_si_utime
= from
->si_utime
;
1138 to
->cpt_si_stime
= from
->si_stime
;
1144 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1147 to
->cpt_si_band
= from
->si_band
;
1148 to
->cpt_si_fd
= from
->si_fd
;
1151 to
->cpt_si_pid
= from
->si_pid
;
1152 to
->cpt_si_uid
= from
->si_uid
;
1153 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1160 siginfo_from_compat_x32_siginfo (siginfo_t
*to
,
1161 compat_x32_siginfo_t
*from
)
1163 memset (to
, 0, sizeof (*to
));
1165 to
->si_signo
= from
->si_signo
;
1166 to
->si_errno
= from
->si_errno
;
1167 to
->si_code
= from
->si_code
;
1169 if (to
->si_code
== SI_TIMER
)
1171 to
->si_timerid
= from
->cpt_si_timerid
;
1172 to
->si_overrun
= from
->cpt_si_overrun
;
1173 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1175 else if (to
->si_code
== SI_USER
)
1177 to
->si_pid
= from
->cpt_si_pid
;
1178 to
->si_uid
= from
->cpt_si_uid
;
1180 else if (to
->si_code
< 0)
1182 to
->si_pid
= from
->cpt_si_pid
;
1183 to
->si_uid
= from
->cpt_si_uid
;
1184 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1188 switch (to
->si_signo
)
1191 to
->si_pid
= from
->cpt_si_pid
;
1192 to
->si_uid
= from
->cpt_si_uid
;
1193 to
->si_status
= from
->cpt_si_status
;
1194 to
->si_utime
= from
->cpt_si_utime
;
1195 to
->si_stime
= from
->cpt_si_stime
;
1201 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1204 to
->si_band
= from
->cpt_si_band
;
1205 to
->si_fd
= from
->cpt_si_fd
;
1208 to
->si_pid
= from
->cpt_si_pid
;
1209 to
->si_uid
= from
->cpt_si_uid
;
1210 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1216 #endif /* __x86_64__ */
1218 /* Convert a native/host siginfo object, into/from the siginfo in the
1219 layout of the inferiors' architecture. Returns true if any
1220 conversion was done; false otherwise. If DIRECTION is 1, then copy
1221 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1225 x86_siginfo_fixup (siginfo_t
*native
, void *inf
, int direction
)
1228 unsigned int machine
;
1229 int tid
= lwpid_of (current_thread
);
1230 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1232 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1233 if (!is_64bit_tdesc ())
1235 gdb_assert (sizeof (siginfo_t
) == sizeof (compat_siginfo_t
));
1238 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
1240 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
1244 /* No fixup for native x32 GDB. */
1245 else if (!is_elf64
&& sizeof (void *) == 8)
1247 gdb_assert (sizeof (siginfo_t
) == sizeof (compat_x32_siginfo_t
));
1250 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo
*) inf
,
1253 siginfo_from_compat_x32_siginfo (native
,
1254 (struct compat_x32_siginfo
*) inf
);
1265 /* Format of XSAVE extended state is:
1268 fxsave_bytes[0..463]
1269 sw_usable_bytes[464..511]
1270 xstate_hdr_bytes[512..575]
1275 Same memory layout will be used for the coredump NT_X86_XSTATE
1276 representing the XSAVE extended state registers.
1278 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1279 extended state mask, which is the same as the extended control register
1280 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1281 together with the mask saved in the xstate_hdr_bytes to determine what
1282 states the processor/OS supports and what state, used or initialized,
1283 the process/thread is in. */
1284 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1286 /* Does the current host support the GETFPXREGS request? The header
1287 file may or may not define it, and even if it is defined, the
1288 kernel will return EIO if it's running on a pre-SSE processor. */
1289 int have_ptrace_getfpxregs
=
1290 #ifdef HAVE_PTRACE_GETFPXREGS
1297 /* Does the current host support PTRACE_GETREGSET? */
1298 static int have_ptrace_getregset
= -1;
1300 /* Get Linux/x86 target description from running target. */
1302 static const struct target_desc
*
1303 x86_linux_read_description (void)
1305 unsigned int machine
;
1309 static uint64_t xcr0
;
1310 struct regset_info
*regset
;
1312 tid
= lwpid_of (current_thread
);
1314 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1316 if (sizeof (void *) == 4)
1319 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1321 else if (machine
== EM_X86_64
)
1322 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1326 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1327 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
1329 elf_fpxregset_t fpxregs
;
1331 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
1333 have_ptrace_getfpxregs
= 0;
1334 have_ptrace_getregset
= 0;
1335 return tdesc_i386_mmx_linux
;
1338 have_ptrace_getfpxregs
= 1;
1344 x86_xcr0
= X86_XSTATE_SSE_MASK
;
1346 /* Don't use XML. */
1348 if (machine
== EM_X86_64
)
1349 return tdesc_amd64_linux_no_xml
;
1352 return tdesc_i386_linux_no_xml
;
1355 if (have_ptrace_getregset
== -1)
1357 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
1360 iov
.iov_base
= xstateregs
;
1361 iov
.iov_len
= sizeof (xstateregs
);
1363 /* Check if PTRACE_GETREGSET works. */
1364 if (ptrace (PTRACE_GETREGSET
, tid
,
1365 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
1366 have_ptrace_getregset
= 0;
1369 have_ptrace_getregset
= 1;
1371 /* Get XCR0 from XSAVE extended state. */
1372 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
1373 / sizeof (uint64_t))];
1375 /* Use PTRACE_GETREGSET if it is available. */
1376 for (regset
= x86_regsets
;
1377 regset
->fill_function
!= NULL
; regset
++)
1378 if (regset
->get_request
== PTRACE_GETREGSET
)
1379 regset
->size
= X86_XSTATE_SIZE (xcr0
);
1380 else if (regset
->type
!= GENERAL_REGS
)
1385 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1386 xcr0_features
= (have_ptrace_getregset
1387 && (xcr0
& X86_XSTATE_ALL_MASK
));
1392 if (machine
== EM_X86_64
)
1399 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1401 case X86_XSTATE_AVX512_MASK
:
1402 return tdesc_amd64_avx512_linux
;
1404 case X86_XSTATE_MPX_MASK
:
1405 return tdesc_amd64_mpx_linux
;
1407 case X86_XSTATE_AVX_MASK
:
1408 return tdesc_amd64_avx_linux
;
1411 return tdesc_amd64_linux
;
1415 return tdesc_amd64_linux
;
1421 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1423 case X86_XSTATE_AVX512_MASK
:
1424 return tdesc_x32_avx512_linux
;
1426 case X86_XSTATE_MPX_MASK
: /* No MPX on x32. */
1427 case X86_XSTATE_AVX_MASK
:
1428 return tdesc_x32_avx_linux
;
1431 return tdesc_x32_linux
;
1435 return tdesc_x32_linux
;
1443 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1445 case (X86_XSTATE_AVX512_MASK
):
1446 return tdesc_i386_avx512_linux
;
1448 case (X86_XSTATE_MPX_MASK
):
1449 return tdesc_i386_mpx_linux
;
1451 case (X86_XSTATE_AVX_MASK
):
1452 return tdesc_i386_avx_linux
;
1455 return tdesc_i386_linux
;
1459 return tdesc_i386_linux
;
1462 gdb_assert_not_reached ("failed to return tdesc");
1465 /* Callback for find_inferior. Stops iteration when a thread with a
1466 given PID is found. */
1469 same_process_callback (struct inferior_list_entry
*entry
, void *data
)
1471 int pid
= *(int *) data
;
1473 return (ptid_get_pid (entry
->id
) == pid
);
1476 /* Callback for for_each_inferior. Calls the arch_setup routine for
1480 x86_arch_setup_process_callback (struct inferior_list_entry
*entry
)
1482 int pid
= ptid_get_pid (entry
->id
);
1484 /* Look up any thread of this processes. */
1486 = (struct thread_info
*) find_inferior (&all_threads
,
1487 same_process_callback
, &pid
);
1489 the_low_target
.arch_setup ();
1492 /* Update all the target description of all processes; a new GDB
1493 connected, and it may or not support xml target descriptions. */
1496 x86_linux_update_xmltarget (void)
1498 struct thread_info
*saved_thread
= current_thread
;
1500 /* Before changing the register cache's internal layout, flush the
1501 contents of the current valid caches back to the threads, and
1502 release the current regcache objects. */
1503 regcache_release ();
1505 for_each_inferior (&all_processes
, x86_arch_setup_process_callback
);
1507 current_thread
= saved_thread
;
1510 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1511 PTRACE_GETREGSET. */
1514 x86_linux_process_qsupported (const char *query
)
1516 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1517 with "i386" in qSupported query, it supports x86 XML target
1520 if (query
!= NULL
&& startswith (query
, "xmlRegisters="))
1522 char *copy
= xstrdup (query
+ 13);
1525 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1527 if (strcmp (p
, "i386") == 0)
1537 x86_linux_update_xmltarget ();
1540 /* Common for x86/x86-64. */
1542 static struct regsets_info x86_regsets_info
=
1544 x86_regsets
, /* regsets */
1545 0, /* num_regsets */
1546 NULL
, /* disabled_regsets */
1550 static struct regs_info amd64_linux_regs_info
=
1552 NULL
, /* regset_bitmap */
1553 NULL
, /* usrregs_info */
1557 static struct usrregs_info i386_linux_usrregs_info
=
1563 static struct regs_info i386_linux_regs_info
=
1565 NULL
, /* regset_bitmap */
1566 &i386_linux_usrregs_info
,
1570 const struct regs_info
*
1571 x86_linux_regs_info (void)
1574 if (is_64bit_tdesc ())
1575 return &amd64_linux_regs_info
;
1578 return &i386_linux_regs_info
;
1581 /* Initialize the target description for the architecture of the
1585 x86_arch_setup (void)
1587 current_process ()->tdesc
= x86_linux_read_description ();
1591 x86_supports_tracepoints (void)
1597 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1599 write_inferior_memory (*to
, buf
, len
);
1604 push_opcode (unsigned char *buf
, char *op
)
1606 unsigned char *buf_org
= buf
;
1611 unsigned long ul
= strtoul (op
, &endptr
, 16);
1620 return buf
- buf_org
;
1625 /* Build a jump pad that saves registers and calls a collection
1626 function. Writes a jump instruction to the jump pad to
1627 JJUMPAD_INSN. The caller is responsible to write it in at the
1628 tracepoint address. */
1631 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1632 CORE_ADDR collector
,
1635 CORE_ADDR
*jump_entry
,
1636 CORE_ADDR
*trampoline
,
1637 ULONGEST
*trampoline_size
,
1638 unsigned char *jjump_pad_insn
,
1639 ULONGEST
*jjump_pad_insn_size
,
1640 CORE_ADDR
*adjusted_insn_addr
,
1641 CORE_ADDR
*adjusted_insn_addr_end
,
1644 unsigned char buf
[40];
1648 CORE_ADDR buildaddr
= *jump_entry
;
1650 /* Build the jump pad. */
1652 /* First, do tracepoint data collection. Save registers. */
1654 /* Need to ensure stack pointer saved first. */
1655 buf
[i
++] = 0x54; /* push %rsp */
1656 buf
[i
++] = 0x55; /* push %rbp */
1657 buf
[i
++] = 0x57; /* push %rdi */
1658 buf
[i
++] = 0x56; /* push %rsi */
1659 buf
[i
++] = 0x52; /* push %rdx */
1660 buf
[i
++] = 0x51; /* push %rcx */
1661 buf
[i
++] = 0x53; /* push %rbx */
1662 buf
[i
++] = 0x50; /* push %rax */
1663 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1664 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1665 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1666 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1667 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1668 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1669 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1670 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1671 buf
[i
++] = 0x9c; /* pushfq */
1672 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1674 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1675 i
+= sizeof (unsigned long);
1676 buf
[i
++] = 0x57; /* push %rdi */
1677 append_insns (&buildaddr
, i
, buf
);
1679 /* Stack space for the collecting_t object. */
1681 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1682 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1683 memcpy (buf
+ i
, &tpoint
, 8);
1685 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1686 i
+= push_opcode (&buf
[i
],
1687 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1688 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1689 append_insns (&buildaddr
, i
, buf
);
1693 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1694 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1696 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1697 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1698 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1699 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1700 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1701 append_insns (&buildaddr
, i
, buf
);
1703 /* Set up the gdb_collect call. */
1704 /* At this point, (stack pointer + 0x18) is the base of our saved
1708 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1709 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1711 /* tpoint address may be 64-bit wide. */
1712 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1713 memcpy (buf
+ i
, &tpoint
, 8);
1715 append_insns (&buildaddr
, i
, buf
);
1717 /* The collector function being in the shared library, may be
1718 >31-bits away off the jump pad. */
1720 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1721 memcpy (buf
+ i
, &collector
, 8);
1723 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1724 append_insns (&buildaddr
, i
, buf
);
1726 /* Clear the spin-lock. */
1728 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1729 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1730 memcpy (buf
+ i
, &lockaddr
, 8);
1732 append_insns (&buildaddr
, i
, buf
);
1734 /* Remove stack that had been used for the collect_t object. */
1736 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1737 append_insns (&buildaddr
, i
, buf
);
1739 /* Restore register state. */
1741 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1745 buf
[i
++] = 0x9d; /* popfq */
1746 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1747 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1748 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1749 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1750 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1751 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1752 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1753 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1754 buf
[i
++] = 0x58; /* pop %rax */
1755 buf
[i
++] = 0x5b; /* pop %rbx */
1756 buf
[i
++] = 0x59; /* pop %rcx */
1757 buf
[i
++] = 0x5a; /* pop %rdx */
1758 buf
[i
++] = 0x5e; /* pop %rsi */
1759 buf
[i
++] = 0x5f; /* pop %rdi */
1760 buf
[i
++] = 0x5d; /* pop %rbp */
1761 buf
[i
++] = 0x5c; /* pop %rsp */
1762 append_insns (&buildaddr
, i
, buf
);
1764 /* Now, adjust the original instruction to execute in the jump
1766 *adjusted_insn_addr
= buildaddr
;
1767 relocate_instruction (&buildaddr
, tpaddr
);
1768 *adjusted_insn_addr_end
= buildaddr
;
1770 /* Finally, write a jump back to the program. */
1772 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1773 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1776 "E.Jump back from jump pad too far from tracepoint "
1777 "(offset 0x%" PRIx64
" > int32).", loffset
);
1781 offset
= (int) loffset
;
1782 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1783 memcpy (buf
+ 1, &offset
, 4);
1784 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1786 /* The jump pad is now built. Wire in a jump to our jump pad. This
1787 is always done last (by our caller actually), so that we can
1788 install fast tracepoints with threads running. This relies on
1789 the agent's atomic write support. */
1790 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1791 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1794 "E.Jump pad too far from tracepoint "
1795 "(offset 0x%" PRIx64
" > int32).", loffset
);
1799 offset
= (int) loffset
;
1801 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1802 memcpy (buf
+ 1, &offset
, 4);
1803 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1804 *jjump_pad_insn_size
= sizeof (jump_insn
);
1806 /* Return the end address of our pad. */
1807 *jump_entry
= buildaddr
;
1812 #endif /* __x86_64__ */
1814 /* Build a jump pad that saves registers and calls a collection
1815 function. Writes a jump instruction to the jump pad to
1816 JJUMPAD_INSN. The caller is responsible to write it in at the
1817 tracepoint address. */
1820 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1821 CORE_ADDR collector
,
1824 CORE_ADDR
*jump_entry
,
1825 CORE_ADDR
*trampoline
,
1826 ULONGEST
*trampoline_size
,
1827 unsigned char *jjump_pad_insn
,
1828 ULONGEST
*jjump_pad_insn_size
,
1829 CORE_ADDR
*adjusted_insn_addr
,
1830 CORE_ADDR
*adjusted_insn_addr_end
,
1833 unsigned char buf
[0x100];
1835 CORE_ADDR buildaddr
= *jump_entry
;
1837 /* Build the jump pad. */
1839 /* First, do tracepoint data collection. Save registers. */
1841 buf
[i
++] = 0x60; /* pushad */
1842 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1843 *((int *)(buf
+ i
)) = (int) tpaddr
;
1845 buf
[i
++] = 0x9c; /* pushf */
1846 buf
[i
++] = 0x1e; /* push %ds */
1847 buf
[i
++] = 0x06; /* push %es */
1848 buf
[i
++] = 0x0f; /* push %fs */
1850 buf
[i
++] = 0x0f; /* push %gs */
1852 buf
[i
++] = 0x16; /* push %ss */
1853 buf
[i
++] = 0x0e; /* push %cs */
1854 append_insns (&buildaddr
, i
, buf
);
1856 /* Stack space for the collecting_t object. */
1858 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1860 /* Build the object. */
1861 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1862 memcpy (buf
+ i
, &tpoint
, 4);
1864 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1866 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1867 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1868 append_insns (&buildaddr
, i
, buf
);
1870 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1871 If we cared for it, this could be using xchg alternatively. */
1874 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1875 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1877 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1879 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1880 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1881 append_insns (&buildaddr
, i
, buf
);
1884 /* Set up arguments to the gdb_collect call. */
1886 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1887 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1888 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1889 append_insns (&buildaddr
, i
, buf
);
1892 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1893 append_insns (&buildaddr
, i
, buf
);
1896 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1897 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1899 append_insns (&buildaddr
, i
, buf
);
1901 buf
[0] = 0xe8; /* call <reladdr> */
1902 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1903 memcpy (buf
+ 1, &offset
, 4);
1904 append_insns (&buildaddr
, 5, buf
);
1905 /* Clean up after the call. */
1906 buf
[0] = 0x83; /* add $0x8,%esp */
1909 append_insns (&buildaddr
, 3, buf
);
1912 /* Clear the spin-lock. This would need the LOCK prefix on older
1915 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1916 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1917 memcpy (buf
+ i
, &lockaddr
, 4);
1919 append_insns (&buildaddr
, i
, buf
);
1922 /* Remove stack that had been used for the collect_t object. */
1924 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1925 append_insns (&buildaddr
, i
, buf
);
1928 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1931 buf
[i
++] = 0x17; /* pop %ss */
1932 buf
[i
++] = 0x0f; /* pop %gs */
1934 buf
[i
++] = 0x0f; /* pop %fs */
1936 buf
[i
++] = 0x07; /* pop %es */
1937 buf
[i
++] = 0x1f; /* pop %ds */
1938 buf
[i
++] = 0x9d; /* popf */
1939 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1942 buf
[i
++] = 0x61; /* popad */
1943 append_insns (&buildaddr
, i
, buf
);
1945 /* Now, adjust the original instruction to execute in the jump
1947 *adjusted_insn_addr
= buildaddr
;
1948 relocate_instruction (&buildaddr
, tpaddr
);
1949 *adjusted_insn_addr_end
= buildaddr
;
1951 /* Write the jump back to the program. */
1952 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1953 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1954 memcpy (buf
+ 1, &offset
, 4);
1955 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1957 /* The jump pad is now built. Wire in a jump to our jump pad. This
1958 is always done last (by our caller actually), so that we can
1959 install fast tracepoints with threads running. This relies on
1960 the agent's atomic write support. */
1963 /* Create a trampoline. */
1964 *trampoline_size
= sizeof (jump_insn
);
1965 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1967 /* No trampoline space available. */
1969 "E.Cannot allocate trampoline space needed for fast "
1970 "tracepoints on 4-byte instructions.");
1974 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1975 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1976 memcpy (buf
+ 1, &offset
, 4);
1977 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1979 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1980 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1981 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1982 memcpy (buf
+ 2, &offset
, 2);
1983 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1984 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1988 /* Else use a 32-bit relative jump instruction. */
1989 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1990 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1991 memcpy (buf
+ 1, &offset
, 4);
1992 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1993 *jjump_pad_insn_size
= sizeof (jump_insn
);
1996 /* Return the end address of our pad. */
1997 *jump_entry
= buildaddr
;
2003 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
2004 CORE_ADDR collector
,
2007 CORE_ADDR
*jump_entry
,
2008 CORE_ADDR
*trampoline
,
2009 ULONGEST
*trampoline_size
,
2010 unsigned char *jjump_pad_insn
,
2011 ULONGEST
*jjump_pad_insn_size
,
2012 CORE_ADDR
*adjusted_insn_addr
,
2013 CORE_ADDR
*adjusted_insn_addr_end
,
2017 if (is_64bit_tdesc ())
2018 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
2019 collector
, lockaddr
,
2020 orig_size
, jump_entry
,
2021 trampoline
, trampoline_size
,
2023 jjump_pad_insn_size
,
2025 adjusted_insn_addr_end
,
2029 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
2030 collector
, lockaddr
,
2031 orig_size
, jump_entry
,
2032 trampoline
, trampoline_size
,
2034 jjump_pad_insn_size
,
2036 adjusted_insn_addr_end
,
2040 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
2044 x86_get_min_fast_tracepoint_insn_len (void)
2046 static int warned_about_fast_tracepoints
= 0;
2049 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
2050 used for fast tracepoints. */
2051 if (is_64bit_tdesc ())
2055 if (agent_loaded_p ())
2057 char errbuf
[IPA_BUFSIZ
];
2061 /* On x86, if trampolines are available, then 4-byte jump instructions
2062 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2063 with a 4-byte offset are used instead. */
2064 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
2068 /* GDB has no channel to explain to user why a shorter fast
2069 tracepoint is not possible, but at least make GDBserver
2070 mention that something has gone awry. */
2071 if (!warned_about_fast_tracepoints
)
2073 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
2074 warned_about_fast_tracepoints
= 1;
2081 /* Indicate that the minimum length is currently unknown since the IPA
2082 has not loaded yet. */
2088 add_insns (unsigned char *start
, int len
)
2090 CORE_ADDR buildaddr
= current_insn_ptr
;
2093 debug_printf ("Adding %d bytes of insn at %s\n",
2094 len
, paddress (buildaddr
));
2096 append_insns (&buildaddr
, len
, start
);
2097 current_insn_ptr
= buildaddr
;
2100 /* Our general strategy for emitting code is to avoid specifying raw
2101 bytes whenever possible, and instead copy a block of inline asm
2102 that is embedded in the function. This is a little messy, because
2103 we need to keep the compiler from discarding what looks like dead
2104 code, plus suppress various warnings. */
2106 #define EMIT_ASM(NAME, INSNS) \
2109 extern unsigned char start_ ## NAME, end_ ## NAME; \
2110 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2111 __asm__ ("jmp end_" #NAME "\n" \
2112 "\t" "start_" #NAME ":" \
2114 "\t" "end_" #NAME ":"); \
2119 #define EMIT_ASM32(NAME,INSNS) \
2122 extern unsigned char start_ ## NAME, end_ ## NAME; \
2123 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2124 __asm__ (".code32\n" \
2125 "\t" "jmp end_" #NAME "\n" \
2126 "\t" "start_" #NAME ":\n" \
2128 "\t" "end_" #NAME ":\n" \
2134 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2141 amd64_emit_prologue (void)
2143 EMIT_ASM (amd64_prologue
,
2145 "movq %rsp,%rbp\n\t"
2146 "sub $0x20,%rsp\n\t"
2147 "movq %rdi,-8(%rbp)\n\t"
2148 "movq %rsi,-16(%rbp)");
2153 amd64_emit_epilogue (void)
2155 EMIT_ASM (amd64_epilogue
,
2156 "movq -16(%rbp),%rdi\n\t"
2157 "movq %rax,(%rdi)\n\t"
2164 amd64_emit_add (void)
2166 EMIT_ASM (amd64_add
,
2167 "add (%rsp),%rax\n\t"
2168 "lea 0x8(%rsp),%rsp");
2172 amd64_emit_sub (void)
2174 EMIT_ASM (amd64_sub
,
2175 "sub %rax,(%rsp)\n\t"
2180 amd64_emit_mul (void)
2186 amd64_emit_lsh (void)
2192 amd64_emit_rsh_signed (void)
2198 amd64_emit_rsh_unsigned (void)
2204 amd64_emit_ext (int arg
)
2209 EMIT_ASM (amd64_ext_8
,
2215 EMIT_ASM (amd64_ext_16
,
2220 EMIT_ASM (amd64_ext_32
,
2229 amd64_emit_log_not (void)
2231 EMIT_ASM (amd64_log_not
,
2232 "test %rax,%rax\n\t"
2238 amd64_emit_bit_and (void)
2240 EMIT_ASM (amd64_and
,
2241 "and (%rsp),%rax\n\t"
2242 "lea 0x8(%rsp),%rsp");
2246 amd64_emit_bit_or (void)
2249 "or (%rsp),%rax\n\t"
2250 "lea 0x8(%rsp),%rsp");
2254 amd64_emit_bit_xor (void)
2256 EMIT_ASM (amd64_xor
,
2257 "xor (%rsp),%rax\n\t"
2258 "lea 0x8(%rsp),%rsp");
2262 amd64_emit_bit_not (void)
2264 EMIT_ASM (amd64_bit_not
,
2265 "xorq $0xffffffffffffffff,%rax");
2269 amd64_emit_equal (void)
2271 EMIT_ASM (amd64_equal
,
2272 "cmp %rax,(%rsp)\n\t"
2273 "je .Lamd64_equal_true\n\t"
2275 "jmp .Lamd64_equal_end\n\t"
2276 ".Lamd64_equal_true:\n\t"
2278 ".Lamd64_equal_end:\n\t"
2279 "lea 0x8(%rsp),%rsp");
2283 amd64_emit_less_signed (void)
2285 EMIT_ASM (amd64_less_signed
,
2286 "cmp %rax,(%rsp)\n\t"
2287 "jl .Lamd64_less_signed_true\n\t"
2289 "jmp .Lamd64_less_signed_end\n\t"
2290 ".Lamd64_less_signed_true:\n\t"
2292 ".Lamd64_less_signed_end:\n\t"
2293 "lea 0x8(%rsp),%rsp");
2297 amd64_emit_less_unsigned (void)
2299 EMIT_ASM (amd64_less_unsigned
,
2300 "cmp %rax,(%rsp)\n\t"
2301 "jb .Lamd64_less_unsigned_true\n\t"
2303 "jmp .Lamd64_less_unsigned_end\n\t"
2304 ".Lamd64_less_unsigned_true:\n\t"
2306 ".Lamd64_less_unsigned_end:\n\t"
2307 "lea 0x8(%rsp),%rsp");
2311 amd64_emit_ref (int size
)
2316 EMIT_ASM (amd64_ref1
,
2320 EMIT_ASM (amd64_ref2
,
2324 EMIT_ASM (amd64_ref4
,
2325 "movl (%rax),%eax");
2328 EMIT_ASM (amd64_ref8
,
2329 "movq (%rax),%rax");
2335 amd64_emit_if_goto (int *offset_p
, int *size_p
)
2337 EMIT_ASM (amd64_if_goto
,
2341 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2349 amd64_emit_goto (int *offset_p
, int *size_p
)
2351 EMIT_ASM (amd64_goto
,
2352 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2360 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2362 int diff
= (to
- (from
+ size
));
2363 unsigned char buf
[sizeof (int)];
2371 memcpy (buf
, &diff
, sizeof (int));
2372 write_inferior_memory (from
, buf
, sizeof (int));
2376 amd64_emit_const (LONGEST num
)
2378 unsigned char buf
[16];
2380 CORE_ADDR buildaddr
= current_insn_ptr
;
2383 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
2384 memcpy (&buf
[i
], &num
, sizeof (num
));
2386 append_insns (&buildaddr
, i
, buf
);
2387 current_insn_ptr
= buildaddr
;
2391 amd64_emit_call (CORE_ADDR fn
)
2393 unsigned char buf
[16];
2395 CORE_ADDR buildaddr
;
2398 /* The destination function being in the shared library, may be
2399 >31-bits away off the compiled code pad. */
2401 buildaddr
= current_insn_ptr
;
2403 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
2407 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
2409 /* Offset is too large for a call. Use callq, but that requires
2410 a register, so avoid it if possible. Use r10, since it is
2411 call-clobbered, we don't have to push/pop it. */
2412 buf
[i
++] = 0x48; /* mov $fn,%r10 */
2414 memcpy (buf
+ i
, &fn
, 8);
2416 buf
[i
++] = 0xff; /* callq *%r10 */
2421 int offset32
= offset64
; /* we know we can't overflow here. */
2422 memcpy (buf
+ i
, &offset32
, 4);
2426 append_insns (&buildaddr
, i
, buf
);
2427 current_insn_ptr
= buildaddr
;
2431 amd64_emit_reg (int reg
)
2433 unsigned char buf
[16];
2435 CORE_ADDR buildaddr
;
2437 /* Assume raw_regs is still in %rdi. */
2438 buildaddr
= current_insn_ptr
;
2440 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
2441 memcpy (&buf
[i
], ®
, sizeof (reg
));
2443 append_insns (&buildaddr
, i
, buf
);
2444 current_insn_ptr
= buildaddr
;
2445 amd64_emit_call (get_raw_reg_func_addr ());
2449 amd64_emit_pop (void)
2451 EMIT_ASM (amd64_pop
,
2456 amd64_emit_stack_flush (void)
2458 EMIT_ASM (amd64_stack_flush
,
2463 amd64_emit_zero_ext (int arg
)
2468 EMIT_ASM (amd64_zero_ext_8
,
2472 EMIT_ASM (amd64_zero_ext_16
,
2473 "and $0xffff,%rax");
2476 EMIT_ASM (amd64_zero_ext_32
,
2477 "mov $0xffffffff,%rcx\n\t"
2486 amd64_emit_swap (void)
2488 EMIT_ASM (amd64_swap
,
2495 amd64_emit_stack_adjust (int n
)
2497 unsigned char buf
[16];
2499 CORE_ADDR buildaddr
= current_insn_ptr
;
2502 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2506 /* This only handles adjustments up to 16, but we don't expect any more. */
2508 append_insns (&buildaddr
, i
, buf
);
2509 current_insn_ptr
= buildaddr
;
2512 /* FN's prototype is `LONGEST(*fn)(int)'. */
2515 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2517 unsigned char buf
[16];
2519 CORE_ADDR buildaddr
;
2521 buildaddr
= current_insn_ptr
;
2523 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2524 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2526 append_insns (&buildaddr
, i
, buf
);
2527 current_insn_ptr
= buildaddr
;
2528 amd64_emit_call (fn
);
2531 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2534 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2536 unsigned char buf
[16];
2538 CORE_ADDR buildaddr
;
2540 buildaddr
= current_insn_ptr
;
2542 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2543 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2545 append_insns (&buildaddr
, i
, buf
);
2546 current_insn_ptr
= buildaddr
;
2547 EMIT_ASM (amd64_void_call_2_a
,
2548 /* Save away a copy of the stack top. */
2550 /* Also pass top as the second argument. */
2552 amd64_emit_call (fn
);
2553 EMIT_ASM (amd64_void_call_2_b
,
2554 /* Restore the stack top, %rax may have been trashed. */
2559 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2562 "cmp %rax,(%rsp)\n\t"
2563 "jne .Lamd64_eq_fallthru\n\t"
2564 "lea 0x8(%rsp),%rsp\n\t"
2566 /* jmp, but don't trust the assembler to choose the right jump */
2567 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2568 ".Lamd64_eq_fallthru:\n\t"
2569 "lea 0x8(%rsp),%rsp\n\t"
2579 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2582 "cmp %rax,(%rsp)\n\t"
2583 "je .Lamd64_ne_fallthru\n\t"
2584 "lea 0x8(%rsp),%rsp\n\t"
2586 /* jmp, but don't trust the assembler to choose the right jump */
2587 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2588 ".Lamd64_ne_fallthru:\n\t"
2589 "lea 0x8(%rsp),%rsp\n\t"
2599 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2602 "cmp %rax,(%rsp)\n\t"
2603 "jnl .Lamd64_lt_fallthru\n\t"
2604 "lea 0x8(%rsp),%rsp\n\t"
2606 /* jmp, but don't trust the assembler to choose the right jump */
2607 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2608 ".Lamd64_lt_fallthru:\n\t"
2609 "lea 0x8(%rsp),%rsp\n\t"
2619 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2622 "cmp %rax,(%rsp)\n\t"
2623 "jnle .Lamd64_le_fallthru\n\t"
2624 "lea 0x8(%rsp),%rsp\n\t"
2626 /* jmp, but don't trust the assembler to choose the right jump */
2627 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2628 ".Lamd64_le_fallthru:\n\t"
2629 "lea 0x8(%rsp),%rsp\n\t"
2639 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2642 "cmp %rax,(%rsp)\n\t"
2643 "jng .Lamd64_gt_fallthru\n\t"
2644 "lea 0x8(%rsp),%rsp\n\t"
2646 /* jmp, but don't trust the assembler to choose the right jump */
2647 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2648 ".Lamd64_gt_fallthru:\n\t"
2649 "lea 0x8(%rsp),%rsp\n\t"
2659 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2662 "cmp %rax,(%rsp)\n\t"
2663 "jnge .Lamd64_ge_fallthru\n\t"
2664 ".Lamd64_ge_jump:\n\t"
2665 "lea 0x8(%rsp),%rsp\n\t"
2667 /* jmp, but don't trust the assembler to choose the right jump */
2668 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2669 ".Lamd64_ge_fallthru:\n\t"
2670 "lea 0x8(%rsp),%rsp\n\t"
2679 struct emit_ops amd64_emit_ops
=
2681 amd64_emit_prologue
,
2682 amd64_emit_epilogue
,
2687 amd64_emit_rsh_signed
,
2688 amd64_emit_rsh_unsigned
,
2696 amd64_emit_less_signed
,
2697 amd64_emit_less_unsigned
,
2701 amd64_write_goto_address
,
2706 amd64_emit_stack_flush
,
2707 amd64_emit_zero_ext
,
2709 amd64_emit_stack_adjust
,
2710 amd64_emit_int_call_1
,
2711 amd64_emit_void_call_2
,
2720 #endif /* __x86_64__ */
2723 i386_emit_prologue (void)
2725 EMIT_ASM32 (i386_prologue
,
2729 /* At this point, the raw regs base address is at 8(%ebp), and the
2730 value pointer is at 12(%ebp). */
2734 i386_emit_epilogue (void)
2736 EMIT_ASM32 (i386_epilogue
,
2737 "mov 12(%ebp),%ecx\n\t"
2738 "mov %eax,(%ecx)\n\t"
2739 "mov %ebx,0x4(%ecx)\n\t"
2747 i386_emit_add (void)
2749 EMIT_ASM32 (i386_add
,
2750 "add (%esp),%eax\n\t"
2751 "adc 0x4(%esp),%ebx\n\t"
2752 "lea 0x8(%esp),%esp");
2756 i386_emit_sub (void)
2758 EMIT_ASM32 (i386_sub
,
2759 "subl %eax,(%esp)\n\t"
2760 "sbbl %ebx,4(%esp)\n\t"
2766 i386_emit_mul (void)
2772 i386_emit_lsh (void)
2778 i386_emit_rsh_signed (void)
2784 i386_emit_rsh_unsigned (void)
2790 i386_emit_ext (int arg
)
2795 EMIT_ASM32 (i386_ext_8
,
2798 "movl %eax,%ebx\n\t"
2802 EMIT_ASM32 (i386_ext_16
,
2804 "movl %eax,%ebx\n\t"
2808 EMIT_ASM32 (i386_ext_32
,
2809 "movl %eax,%ebx\n\t"
2818 i386_emit_log_not (void)
2820 EMIT_ASM32 (i386_log_not
,
2822 "test %eax,%eax\n\t"
2829 i386_emit_bit_and (void)
2831 EMIT_ASM32 (i386_and
,
2832 "and (%esp),%eax\n\t"
2833 "and 0x4(%esp),%ebx\n\t"
2834 "lea 0x8(%esp),%esp");
2838 i386_emit_bit_or (void)
2840 EMIT_ASM32 (i386_or
,
2841 "or (%esp),%eax\n\t"
2842 "or 0x4(%esp),%ebx\n\t"
2843 "lea 0x8(%esp),%esp");
2847 i386_emit_bit_xor (void)
2849 EMIT_ASM32 (i386_xor
,
2850 "xor (%esp),%eax\n\t"
2851 "xor 0x4(%esp),%ebx\n\t"
2852 "lea 0x8(%esp),%esp");
2856 i386_emit_bit_not (void)
2858 EMIT_ASM32 (i386_bit_not
,
2859 "xor $0xffffffff,%eax\n\t"
2860 "xor $0xffffffff,%ebx\n\t");
2864 i386_emit_equal (void)
2866 EMIT_ASM32 (i386_equal
,
2867 "cmpl %ebx,4(%esp)\n\t"
2868 "jne .Li386_equal_false\n\t"
2869 "cmpl %eax,(%esp)\n\t"
2870 "je .Li386_equal_true\n\t"
2871 ".Li386_equal_false:\n\t"
2873 "jmp .Li386_equal_end\n\t"
2874 ".Li386_equal_true:\n\t"
2876 ".Li386_equal_end:\n\t"
2878 "lea 0x8(%esp),%esp");
2882 i386_emit_less_signed (void)
2884 EMIT_ASM32 (i386_less_signed
,
2885 "cmpl %ebx,4(%esp)\n\t"
2886 "jl .Li386_less_signed_true\n\t"
2887 "jne .Li386_less_signed_false\n\t"
2888 "cmpl %eax,(%esp)\n\t"
2889 "jl .Li386_less_signed_true\n\t"
2890 ".Li386_less_signed_false:\n\t"
2892 "jmp .Li386_less_signed_end\n\t"
2893 ".Li386_less_signed_true:\n\t"
2895 ".Li386_less_signed_end:\n\t"
2897 "lea 0x8(%esp),%esp");
2901 i386_emit_less_unsigned (void)
2903 EMIT_ASM32 (i386_less_unsigned
,
2904 "cmpl %ebx,4(%esp)\n\t"
2905 "jb .Li386_less_unsigned_true\n\t"
2906 "jne .Li386_less_unsigned_false\n\t"
2907 "cmpl %eax,(%esp)\n\t"
2908 "jb .Li386_less_unsigned_true\n\t"
2909 ".Li386_less_unsigned_false:\n\t"
2911 "jmp .Li386_less_unsigned_end\n\t"
2912 ".Li386_less_unsigned_true:\n\t"
2914 ".Li386_less_unsigned_end:\n\t"
2916 "lea 0x8(%esp),%esp");
2920 i386_emit_ref (int size
)
2925 EMIT_ASM32 (i386_ref1
,
2929 EMIT_ASM32 (i386_ref2
,
2933 EMIT_ASM32 (i386_ref4
,
2934 "movl (%eax),%eax");
2937 EMIT_ASM32 (i386_ref8
,
2938 "movl 4(%eax),%ebx\n\t"
2939 "movl (%eax),%eax");
2945 i386_emit_if_goto (int *offset_p
, int *size_p
)
2947 EMIT_ASM32 (i386_if_goto
,
2953 /* Don't trust the assembler to choose the right jump */
2954 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2957 *offset_p
= 11; /* be sure that this matches the sequence above */
2963 i386_emit_goto (int *offset_p
, int *size_p
)
2965 EMIT_ASM32 (i386_goto
,
2966 /* Don't trust the assembler to choose the right jump */
2967 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2975 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2977 int diff
= (to
- (from
+ size
));
2978 unsigned char buf
[sizeof (int)];
2980 /* We're only doing 4-byte sizes at the moment. */
2987 memcpy (buf
, &diff
, sizeof (int));
2988 write_inferior_memory (from
, buf
, sizeof (int));
2992 i386_emit_const (LONGEST num
)
2994 unsigned char buf
[16];
2996 CORE_ADDR buildaddr
= current_insn_ptr
;
2999 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
3000 lo
= num
& 0xffffffff;
3001 memcpy (&buf
[i
], &lo
, sizeof (lo
));
3003 hi
= ((num
>> 32) & 0xffffffff);
3006 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
3007 memcpy (&buf
[i
], &hi
, sizeof (hi
));
3012 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
3014 append_insns (&buildaddr
, i
, buf
);
3015 current_insn_ptr
= buildaddr
;
3019 i386_emit_call (CORE_ADDR fn
)
3021 unsigned char buf
[16];
3023 CORE_ADDR buildaddr
;
3025 buildaddr
= current_insn_ptr
;
3027 buf
[i
++] = 0xe8; /* call <reladdr> */
3028 offset
= ((int) fn
) - (buildaddr
+ 5);
3029 memcpy (buf
+ 1, &offset
, 4);
3030 append_insns (&buildaddr
, 5, buf
);
3031 current_insn_ptr
= buildaddr
;
3035 i386_emit_reg (int reg
)
3037 unsigned char buf
[16];
3039 CORE_ADDR buildaddr
;
3041 EMIT_ASM32 (i386_reg_a
,
3043 buildaddr
= current_insn_ptr
;
3045 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
3046 memcpy (&buf
[i
], ®
, sizeof (reg
));
3048 append_insns (&buildaddr
, i
, buf
);
3049 current_insn_ptr
= buildaddr
;
3050 EMIT_ASM32 (i386_reg_b
,
3051 "mov %eax,4(%esp)\n\t"
3052 "mov 8(%ebp),%eax\n\t"
3054 i386_emit_call (get_raw_reg_func_addr ());
3055 EMIT_ASM32 (i386_reg_c
,
3057 "lea 0x8(%esp),%esp");
3061 i386_emit_pop (void)
3063 EMIT_ASM32 (i386_pop
,
3069 i386_emit_stack_flush (void)
3071 EMIT_ASM32 (i386_stack_flush
,
3077 i386_emit_zero_ext (int arg
)
3082 EMIT_ASM32 (i386_zero_ext_8
,
3083 "and $0xff,%eax\n\t"
3087 EMIT_ASM32 (i386_zero_ext_16
,
3088 "and $0xffff,%eax\n\t"
3092 EMIT_ASM32 (i386_zero_ext_32
,
3101 i386_emit_swap (void)
3103 EMIT_ASM32 (i386_swap
,
3113 i386_emit_stack_adjust (int n
)
3115 unsigned char buf
[16];
3117 CORE_ADDR buildaddr
= current_insn_ptr
;
3120 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
3124 append_insns (&buildaddr
, i
, buf
);
3125 current_insn_ptr
= buildaddr
;
3128 /* FN's prototype is `LONGEST(*fn)(int)'. */
3131 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
3133 unsigned char buf
[16];
3135 CORE_ADDR buildaddr
;
3137 EMIT_ASM32 (i386_int_call_1_a
,
3138 /* Reserve a bit of stack space. */
3140 /* Put the one argument on the stack. */
3141 buildaddr
= current_insn_ptr
;
3143 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3146 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3148 append_insns (&buildaddr
, i
, buf
);
3149 current_insn_ptr
= buildaddr
;
3150 i386_emit_call (fn
);
3151 EMIT_ASM32 (i386_int_call_1_c
,
3153 "lea 0x8(%esp),%esp");
3156 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3159 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
3161 unsigned char buf
[16];
3163 CORE_ADDR buildaddr
;
3165 EMIT_ASM32 (i386_void_call_2_a
,
3166 /* Preserve %eax only; we don't have to worry about %ebx. */
3168 /* Reserve a bit of stack space for arguments. */
3169 "sub $0x10,%esp\n\t"
3170 /* Copy "top" to the second argument position. (Note that
3171 we can't assume function won't scribble on its
3172 arguments, so don't try to restore from this.) */
3173 "mov %eax,4(%esp)\n\t"
3174 "mov %ebx,8(%esp)");
3175 /* Put the first argument on the stack. */
3176 buildaddr
= current_insn_ptr
;
3178 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3181 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3183 append_insns (&buildaddr
, i
, buf
);
3184 current_insn_ptr
= buildaddr
;
3185 i386_emit_call (fn
);
3186 EMIT_ASM32 (i386_void_call_2_b
,
3187 "lea 0x10(%esp),%esp\n\t"
3188 /* Restore original stack top. */
3194 i386_emit_eq_goto (int *offset_p
, int *size_p
)
3197 /* Check low half first, more likely to be decider */
3198 "cmpl %eax,(%esp)\n\t"
3199 "jne .Leq_fallthru\n\t"
3200 "cmpl %ebx,4(%esp)\n\t"
3201 "jne .Leq_fallthru\n\t"
3202 "lea 0x8(%esp),%esp\n\t"
3205 /* jmp, but don't trust the assembler to choose the right jump */
3206 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3207 ".Leq_fallthru:\n\t"
3208 "lea 0x8(%esp),%esp\n\t"
3219 i386_emit_ne_goto (int *offset_p
, int *size_p
)
3222 /* Check low half first, more likely to be decider */
3223 "cmpl %eax,(%esp)\n\t"
3225 "cmpl %ebx,4(%esp)\n\t"
3226 "je .Lne_fallthru\n\t"
3228 "lea 0x8(%esp),%esp\n\t"
3231 /* jmp, but don't trust the assembler to choose the right jump */
3232 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3233 ".Lne_fallthru:\n\t"
3234 "lea 0x8(%esp),%esp\n\t"
3245 i386_emit_lt_goto (int *offset_p
, int *size_p
)
3248 "cmpl %ebx,4(%esp)\n\t"
3250 "jne .Llt_fallthru\n\t"
3251 "cmpl %eax,(%esp)\n\t"
3252 "jnl .Llt_fallthru\n\t"
3254 "lea 0x8(%esp),%esp\n\t"
3257 /* jmp, but don't trust the assembler to choose the right jump */
3258 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3259 ".Llt_fallthru:\n\t"
3260 "lea 0x8(%esp),%esp\n\t"
3271 i386_emit_le_goto (int *offset_p
, int *size_p
)
3274 "cmpl %ebx,4(%esp)\n\t"
3276 "jne .Lle_fallthru\n\t"
3277 "cmpl %eax,(%esp)\n\t"
3278 "jnle .Lle_fallthru\n\t"
3280 "lea 0x8(%esp),%esp\n\t"
3283 /* jmp, but don't trust the assembler to choose the right jump */
3284 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3285 ".Lle_fallthru:\n\t"
3286 "lea 0x8(%esp),%esp\n\t"
3297 i386_emit_gt_goto (int *offset_p
, int *size_p
)
3300 "cmpl %ebx,4(%esp)\n\t"
3302 "jne .Lgt_fallthru\n\t"
3303 "cmpl %eax,(%esp)\n\t"
3304 "jng .Lgt_fallthru\n\t"
3306 "lea 0x8(%esp),%esp\n\t"
3309 /* jmp, but don't trust the assembler to choose the right jump */
3310 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3311 ".Lgt_fallthru:\n\t"
3312 "lea 0x8(%esp),%esp\n\t"
3323 i386_emit_ge_goto (int *offset_p
, int *size_p
)
3326 "cmpl %ebx,4(%esp)\n\t"
3328 "jne .Lge_fallthru\n\t"
3329 "cmpl %eax,(%esp)\n\t"
3330 "jnge .Lge_fallthru\n\t"
3332 "lea 0x8(%esp),%esp\n\t"
3335 /* jmp, but don't trust the assembler to choose the right jump */
3336 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3337 ".Lge_fallthru:\n\t"
3338 "lea 0x8(%esp),%esp\n\t"
3348 struct emit_ops i386_emit_ops
=
3356 i386_emit_rsh_signed
,
3357 i386_emit_rsh_unsigned
,
3365 i386_emit_less_signed
,
3366 i386_emit_less_unsigned
,
3370 i386_write_goto_address
,
3375 i386_emit_stack_flush
,
3378 i386_emit_stack_adjust
,
3379 i386_emit_int_call_1
,
3380 i386_emit_void_call_2
,
3390 static struct emit_ops
*
3394 if (is_64bit_tdesc ())
3395 return &amd64_emit_ops
;
3398 return &i386_emit_ops
;
3402 x86_supports_range_stepping (void)
3407 /* This is initialized assuming an amd64 target.
3408 x86_arch_setup will correct it for i386 or amd64 targets. */
3410 struct linux_target_ops the_low_target
=
3413 x86_linux_regs_info
,
3414 x86_cannot_fetch_register
,
3415 x86_cannot_store_register
,
3416 NULL
, /* fetch_register */
3424 x86_supports_z_point_type
,
3427 x86_stopped_by_watchpoint
,
3428 x86_stopped_data_address
,
3429 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3430 native i386 case (no registers smaller than an xfer unit), and are not
3431 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3434 /* need to fix up i386 siginfo if host is amd64 */
3436 x86_linux_new_process
,
3437 x86_linux_new_thread
,
3438 x86_linux_prepare_to_resume
,
3439 x86_linux_process_qsupported
,
3440 x86_supports_tracepoints
,
3441 x86_get_thread_area
,
3442 x86_install_fast_tracepoint_jump_pad
,
3444 x86_get_min_fast_tracepoint_insn_len
,
3445 x86_supports_range_stepping
,
3449 initialize_low_arch (void)
3451 /* Initialize the Linux target descriptions. */
3453 init_registers_amd64_linux ();
3454 init_registers_amd64_avx_linux ();
3455 init_registers_amd64_avx512_linux ();
3456 init_registers_amd64_mpx_linux ();
3458 init_registers_x32_linux ();
3459 init_registers_x32_avx_linux ();
3460 init_registers_x32_avx512_linux ();
3462 tdesc_amd64_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3463 copy_target_description (tdesc_amd64_linux_no_xml
, tdesc_amd64_linux
);
3464 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
3466 init_registers_i386_linux ();
3467 init_registers_i386_mmx_linux ();
3468 init_registers_i386_avx_linux ();
3469 init_registers_i386_avx512_linux ();
3470 init_registers_i386_mpx_linux ();
3472 tdesc_i386_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3473 copy_target_description (tdesc_i386_linux_no_xml
, tdesc_i386_linux
);
3474 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3476 initialize_regsets_info (&x86_regsets_info
);