1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2015 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "x86-xstate.h"
29 #include "gdb_proc_service.h"
30 /* Don't include elf/common.h if linux/elf.h got included by
31 gdb_proc_service.h. */
33 #include "elf/common.h"
38 #include "tracepoint.h"
40 #include "nat/linux-nat.h"
41 #include "nat/x86-linux.h"
44 /* Defined in auto-generated file amd64-linux.c. */
45 void init_registers_amd64_linux (void);
46 extern const struct target_desc
*tdesc_amd64_linux
;
48 /* Defined in auto-generated file amd64-avx-linux.c. */
49 void init_registers_amd64_avx_linux (void);
50 extern const struct target_desc
*tdesc_amd64_avx_linux
;
52 /* Defined in auto-generated file amd64-avx512-linux.c. */
53 void init_registers_amd64_avx512_linux (void);
54 extern const struct target_desc
*tdesc_amd64_avx512_linux
;
56 /* Defined in auto-generated file amd64-mpx-linux.c. */
57 void init_registers_amd64_mpx_linux (void);
58 extern const struct target_desc
*tdesc_amd64_mpx_linux
;
60 /* Defined in auto-generated file x32-linux.c. */
61 void init_registers_x32_linux (void);
62 extern const struct target_desc
*tdesc_x32_linux
;
64 /* Defined in auto-generated file x32-avx-linux.c. */
65 void init_registers_x32_avx_linux (void);
66 extern const struct target_desc
*tdesc_x32_avx_linux
;
68 /* Defined in auto-generated file x32-avx512-linux.c. */
69 void init_registers_x32_avx512_linux (void);
70 extern const struct target_desc
*tdesc_x32_avx512_linux
;
74 /* Defined in auto-generated file i386-linux.c. */
75 void init_registers_i386_linux (void);
76 extern const struct target_desc
*tdesc_i386_linux
;
78 /* Defined in auto-generated file i386-mmx-linux.c. */
79 void init_registers_i386_mmx_linux (void);
80 extern const struct target_desc
*tdesc_i386_mmx_linux
;
82 /* Defined in auto-generated file i386-avx-linux.c. */
83 void init_registers_i386_avx_linux (void);
84 extern const struct target_desc
*tdesc_i386_avx_linux
;
86 /* Defined in auto-generated file i386-avx512-linux.c. */
87 void init_registers_i386_avx512_linux (void);
88 extern const struct target_desc
*tdesc_i386_avx512_linux
;
90 /* Defined in auto-generated file i386-mpx-linux.c. */
91 void init_registers_i386_mpx_linux (void);
92 extern const struct target_desc
*tdesc_i386_mpx_linux
;
95 static struct target_desc
*tdesc_amd64_linux_no_xml
;
97 static struct target_desc
*tdesc_i386_linux_no_xml
;
100 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
101 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
103 /* Backward compatibility for gdb without XML support. */
105 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
106 <architecture>i386</architecture>\
107 <osabi>GNU/Linux</osabi>\
111 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
112 <architecture>i386:x86-64</architecture>\
113 <osabi>GNU/Linux</osabi>\
118 #include <sys/procfs.h>
119 #include <sys/ptrace.h>
122 #ifndef PTRACE_GETREGSET
123 #define PTRACE_GETREGSET 0x4204
126 #ifndef PTRACE_SETREGSET
127 #define PTRACE_SETREGSET 0x4205
131 #ifndef PTRACE_GET_THREAD_AREA
132 #define PTRACE_GET_THREAD_AREA 25
135 /* This definition comes from prctl.h, but some kernels may not have it. */
136 #ifndef PTRACE_ARCH_PRCTL
137 #define PTRACE_ARCH_PRCTL 30
140 /* The following definitions come from prctl.h, but may be absent
141 for certain configurations. */
143 #define ARCH_SET_GS 0x1001
144 #define ARCH_SET_FS 0x1002
145 #define ARCH_GET_FS 0x1003
146 #define ARCH_GET_GS 0x1004
149 /* Per-process arch-specific data we want to keep. */
151 struct arch_process_info
153 struct x86_debug_reg_state debug_reg_state
;
158 /* Mapping between the general-purpose registers in `struct user'
159 format and GDB's register array layout.
160 Note that the transfer layout uses 64-bit regs. */
161 static /*const*/ int i386_regmap
[] =
163 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
164 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
165 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
166 DS
* 8, ES
* 8, FS
* 8, GS
* 8
169 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
171 /* So code below doesn't have to care, i386 or amd64. */
172 #define ORIG_EAX ORIG_RAX
175 static const int x86_64_regmap
[] =
177 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
178 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
179 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
180 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
181 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
182 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
183 -1, -1, -1, -1, -1, -1, -1, -1,
184 -1, -1, -1, -1, -1, -1, -1, -1,
185 -1, -1, -1, -1, -1, -1, -1, -1,
187 -1, -1, -1, -1, -1, -1, -1, -1,
189 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
190 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
191 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
192 -1, -1, -1, -1, -1, -1, -1, -1,
193 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
194 -1, -1, -1, -1, -1, -1, -1, -1,
195 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
196 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
197 -1, -1, -1, -1, -1, -1, -1, -1,
198 -1, -1, -1, -1, -1, -1, -1, -1,
199 -1, -1, -1, -1, -1, -1, -1, -1
202 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
203 #define X86_64_USER_REGS (GS + 1)
205 #else /* ! __x86_64__ */
207 /* Mapping between the general-purpose registers in `struct user'
208 format and GDB's register array layout. */
209 static /*const*/ int i386_regmap
[] =
211 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
212 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
213 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
214 DS
* 4, ES
* 4, FS
* 4, GS
* 4
217 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
225 /* Returns true if the current inferior belongs to a x86-64 process,
229 is_64bit_tdesc (void)
231 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
233 return register_size (regcache
->tdesc
, 0) == 8;
239 /* Called by libthread_db. */
242 ps_get_thread_area (const struct ps_prochandle
*ph
,
243 lwpid_t lwpid
, int idx
, void **base
)
246 int use_64bit
= is_64bit_tdesc ();
253 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
257 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
268 unsigned int desc
[4];
270 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
271 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
274 /* Ensure we properly extend the value to 64-bits for x86_64. */
275 *base
= (void *) (uintptr_t) desc
[1];
280 /* Get the thread area address. This is used to recognize which
281 thread is which when tracing with the in-process agent library. We
282 don't read anything from the address, and treat it as opaque; it's
283 the address itself that we assume is unique per-thread. */
286 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
289 int use_64bit
= is_64bit_tdesc ();
294 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
296 *addr
= (CORE_ADDR
) (uintptr_t) base
;
305 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
306 struct thread_info
*thr
= get_lwp_thread (lwp
);
307 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
308 unsigned int desc
[4];
310 const int reg_thread_area
= 3; /* bits to scale down register value. */
313 collect_register_by_name (regcache
, "gs", &gs
);
315 idx
= gs
>> reg_thread_area
;
317 if (ptrace (PTRACE_GET_THREAD_AREA
,
319 (void *) (long) idx
, (unsigned long) &desc
) < 0)
330 x86_cannot_store_register (int regno
)
333 if (is_64bit_tdesc ())
337 return regno
>= I386_NUM_REGS
;
341 x86_cannot_fetch_register (int regno
)
344 if (is_64bit_tdesc ())
348 return regno
>= I386_NUM_REGS
;
352 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
357 if (register_size (regcache
->tdesc
, 0) == 8)
359 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
360 if (x86_64_regmap
[i
] != -1)
361 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
365 /* 32-bit inferior registers need to be zero-extended.
366 Callers would read uninitialized memory otherwise. */
367 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
370 for (i
= 0; i
< I386_NUM_REGS
; i
++)
371 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
373 collect_register_by_name (regcache
, "orig_eax",
374 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
378 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
383 if (register_size (regcache
->tdesc
, 0) == 8)
385 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
386 if (x86_64_regmap
[i
] != -1)
387 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
392 for (i
= 0; i
< I386_NUM_REGS
; i
++)
393 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
395 supply_register_by_name (regcache
, "orig_eax",
396 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
400 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
403 i387_cache_to_fxsave (regcache
, buf
);
405 i387_cache_to_fsave (regcache
, buf
);
410 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
413 i387_fxsave_to_cache (regcache
, buf
);
415 i387_fsave_to_cache (regcache
, buf
);
422 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
424 i387_cache_to_fxsave (regcache
, buf
);
428 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
430 i387_fxsave_to_cache (regcache
, buf
);
436 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
438 i387_cache_to_xsave (regcache
, buf
);
442 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
444 i387_xsave_to_cache (regcache
, buf
);
447 /* ??? The non-biarch i386 case stores all the i387 regs twice.
448 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
449 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
450 doesn't work. IWBN to avoid the duplication in the case where it
451 does work. Maybe the arch_setup routine could check whether it works
452 and update the supported regsets accordingly. */
454 static struct regset_info x86_regsets
[] =
456 #ifdef HAVE_PTRACE_GETREGS
457 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
459 x86_fill_gregset
, x86_store_gregset
},
460 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
461 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
463 # ifdef HAVE_PTRACE_GETFPXREGS
464 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
466 x86_fill_fpxregset
, x86_store_fpxregset
},
469 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
471 x86_fill_fpregset
, x86_store_fpregset
},
472 #endif /* HAVE_PTRACE_GETREGS */
473 { 0, 0, 0, -1, -1, NULL
, NULL
}
477 x86_get_pc (struct regcache
*regcache
)
479 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
484 collect_register_by_name (regcache
, "rip", &pc
);
485 return (CORE_ADDR
) pc
;
490 collect_register_by_name (regcache
, "eip", &pc
);
491 return (CORE_ADDR
) pc
;
496 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
498 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
502 unsigned long newpc
= pc
;
503 supply_register_by_name (regcache
, "rip", &newpc
);
507 unsigned int newpc
= pc
;
508 supply_register_by_name (regcache
, "eip", &newpc
);
512 static const unsigned char x86_breakpoint
[] = { 0xCC };
513 #define x86_breakpoint_len 1
516 x86_breakpoint_at (CORE_ADDR pc
)
520 (*the_target
->read_memory
) (pc
, &c
, 1);
528 /* Return the offset of REGNUM in the u_debugreg field of struct
532 u_debugreg_offset (int regnum
)
534 return (offsetof (struct user
, u_debugreg
)
535 + sizeof (((struct user
*) 0)->u_debugreg
[0]) * regnum
);
539 /* Support for debug registers. */
541 /* Get debug register REGNUM value from the LWP specified by PTID. */
544 x86_linux_dr_get (ptid_t ptid
, int regnum
)
549 gdb_assert (ptid_lwp_p (ptid
));
550 tid
= ptid_get_lwp (ptid
);
553 value
= ptrace (PTRACE_PEEKUSER
, tid
, u_debugreg_offset (regnum
), 0);
555 perror_with_name (_("Couldn't read debug register"));
560 /* Set debug register REGNUM to VALUE in the LWP specified by PTID. */
563 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
567 gdb_assert (ptid_lwp_p (ptid
));
568 tid
= ptid_get_lwp (ptid
);
571 ptrace (PTRACE_POKEUSER
, tid
, u_debugreg_offset (regnum
), value
);
573 perror_with_name (_("Couldn't write debug register"));
576 /* Callback for iterate_over_lwps. Mark that our local mirror of
577 LWP's debug registers has been changed, and cause LWP to stop if
578 it isn't already. Values are written from our local mirror to
579 the actual debug registers immediately prior to LWP resuming. */
582 update_debug_registers_callback (struct lwp_info
*lwp
, void *arg
)
584 lwp_set_debug_registers_changed (lwp
, 1);
586 if (!lwp_is_stopped (lwp
))
587 linux_stop_lwp (lwp
);
589 /* Continue the iteration. */
593 /* Store ADDR in debug register REGNUM of all LWPs of the current
597 x86_linux_dr_set_addr (int regnum
, CORE_ADDR addr
)
599 ptid_t pid_ptid
= pid_to_ptid (ptid_get_pid (current_lwp_ptid ()));
601 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
603 iterate_over_lwps (pid_ptid
, update_debug_registers_callback
, NULL
);
606 /* Return the address stored in the current inferior's debug register
610 x86_linux_dr_get_addr (int regnum
)
612 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
614 return x86_linux_dr_get (current_lwp_ptid (), regnum
);
617 /* Store CONTROL in the debug control registers of all LWPs of the
621 x86_linux_dr_set_control (unsigned long control
)
623 ptid_t pid_ptid
= pid_to_ptid (ptid_get_pid (current_lwp_ptid ()));
625 iterate_over_lwps (pid_ptid
, update_debug_registers_callback
, NULL
);
628 /* Return the value stored in the current inferior's debug control
632 x86_linux_dr_get_control (void)
634 return x86_linux_dr_get (current_lwp_ptid (), DR_CONTROL
);
637 /* Return the value stored in the current inferior's debug status
641 x86_linux_dr_get_status (void)
643 return x86_linux_dr_get (current_lwp_ptid (), DR_STATUS
);
646 /* Low-level function vector. */
647 struct x86_dr_low_type x86_dr_low
=
649 x86_linux_dr_set_control
,
650 x86_linux_dr_set_addr
,
651 x86_linux_dr_get_addr
,
652 x86_linux_dr_get_status
,
653 x86_linux_dr_get_control
,
657 /* Breakpoint/Watchpoint support. */
660 x86_supports_z_point_type (char z_type
)
666 case Z_PACKET_WRITE_WP
:
667 case Z_PACKET_ACCESS_WP
:
675 x86_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
676 int size
, struct raw_breakpoint
*bp
)
678 struct process_info
*proc
= current_process ();
682 case raw_bkpt_type_sw
:
683 return insert_memory_breakpoint (bp
);
685 case raw_bkpt_type_hw
:
686 case raw_bkpt_type_write_wp
:
687 case raw_bkpt_type_access_wp
:
689 enum target_hw_bp_type hw_type
690 = raw_bkpt_type_to_target_hw_bp_type (type
);
691 struct x86_debug_reg_state
*state
692 = &proc
->priv
->arch_private
->debug_reg_state
;
694 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
704 x86_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
705 int size
, struct raw_breakpoint
*bp
)
707 struct process_info
*proc
= current_process ();
711 case raw_bkpt_type_sw
:
712 return remove_memory_breakpoint (bp
);
714 case raw_bkpt_type_hw
:
715 case raw_bkpt_type_write_wp
:
716 case raw_bkpt_type_access_wp
:
718 enum target_hw_bp_type hw_type
719 = raw_bkpt_type_to_target_hw_bp_type (type
);
720 struct x86_debug_reg_state
*state
721 = &proc
->priv
->arch_private
->debug_reg_state
;
723 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
732 x86_stopped_by_watchpoint (void)
734 struct process_info
*proc
= current_process ();
735 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
739 x86_stopped_data_address (void)
741 struct process_info
*proc
= current_process ();
743 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
749 /* Called when a new process is created. */
751 static struct arch_process_info
*
752 x86_linux_new_process (void)
754 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
756 x86_low_init_dregs (&info
->debug_reg_state
);
761 /* Called when a new thread is detected. */
764 x86_linux_new_thread (struct lwp_info
*lwp
)
766 lwp_set_debug_registers_changed (lwp
, 1);
769 /* See nat/x86-dregs.h. */
771 struct x86_debug_reg_state
*
772 x86_debug_reg_state (pid_t pid
)
774 struct process_info
*proc
= find_process_pid (pid
);
776 return &proc
->priv
->arch_private
->debug_reg_state
;
779 /* Update the thread's debug registers if the values in our local
780 mirror have been changed. */
783 x86_linux_update_debug_registers (struct lwp_info
*lwp
)
785 ptid_t ptid
= ptid_of_lwp (lwp
);
786 int clear_status
= 0;
788 gdb_assert (lwp_is_stopped (lwp
));
790 if (lwp_debug_registers_changed (lwp
))
792 struct x86_debug_reg_state
*state
793 = x86_debug_reg_state (ptid_get_pid (ptid
));
796 /* Prior to Linux kernel 2.6.33 commit
797 72f674d203cd230426437cdcf7dd6f681dad8b0d, setting DR0-3 to
798 a value that did not match what was enabled in DR_CONTROL
799 resulted in EINVAL. To avoid this we zero DR_CONTROL before
800 writing address registers, only writing DR_CONTROL's actual
801 value once all the addresses are in place. */
802 x86_linux_dr_set (ptid
, DR_CONTROL
, 0);
804 ALL_DEBUG_ADDRESS_REGISTERS (i
)
805 if (state
->dr_ref_count
[i
] > 0)
807 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
809 /* If we're setting a watchpoint, any change the inferior
810 has made to its debug registers needs to be discarded
811 to avoid x86_stopped_data_address getting confused. */
815 /* If DR_CONTROL is supposed to be zero then it's already set. */
816 if (state
->dr_control_mirror
!= 0)
817 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
819 lwp_set_debug_registers_changed (lwp
, 0);
823 || lwp_stop_reason (lwp
) == TARGET_STOPPED_BY_WATCHPOINT
)
824 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
827 /* Called prior to resuming a thread. */
830 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
832 x86_linux_update_debug_registers (lwp
);
835 /* When GDBSERVER is built as a 64-bit application on linux, the
836 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
837 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
838 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
839 conversion in-place ourselves. */
841 /* These types below (compat_*) define a siginfo type that is layout
842 compatible with the siginfo type exported by the 32-bit userspace
847 typedef int compat_int_t
;
848 typedef unsigned int compat_uptr_t
;
850 typedef int compat_time_t
;
851 typedef int compat_timer_t
;
852 typedef int compat_clock_t
;
854 struct compat_timeval
856 compat_time_t tv_sec
;
860 typedef union compat_sigval
862 compat_int_t sival_int
;
863 compat_uptr_t sival_ptr
;
866 typedef struct compat_siginfo
874 int _pad
[((128 / sizeof (int)) - 3)];
883 /* POSIX.1b timers */
888 compat_sigval_t _sigval
;
891 /* POSIX.1b signals */
896 compat_sigval_t _sigval
;
905 compat_clock_t _utime
;
906 compat_clock_t _stime
;
909 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
924 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
925 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t
;
927 typedef struct compat_x32_siginfo
935 int _pad
[((128 / sizeof (int)) - 3)];
944 /* POSIX.1b timers */
949 compat_sigval_t _sigval
;
952 /* POSIX.1b signals */
957 compat_sigval_t _sigval
;
966 compat_x32_clock_t _utime
;
967 compat_x32_clock_t _stime
;
970 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
983 } compat_x32_siginfo_t
__attribute__ ((__aligned__ (8)));
985 #define cpt_si_pid _sifields._kill._pid
986 #define cpt_si_uid _sifields._kill._uid
987 #define cpt_si_timerid _sifields._timer._tid
988 #define cpt_si_overrun _sifields._timer._overrun
989 #define cpt_si_status _sifields._sigchld._status
990 #define cpt_si_utime _sifields._sigchld._utime
991 #define cpt_si_stime _sifields._sigchld._stime
992 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
993 #define cpt_si_addr _sifields._sigfault._addr
994 #define cpt_si_band _sifields._sigpoll._band
995 #define cpt_si_fd _sifields._sigpoll._fd
997 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
998 In their place is si_timer1,si_timer2. */
1000 #define si_timerid si_timer1
1003 #define si_overrun si_timer2
1007 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
1009 memset (to
, 0, sizeof (*to
));
1011 to
->si_signo
= from
->si_signo
;
1012 to
->si_errno
= from
->si_errno
;
1013 to
->si_code
= from
->si_code
;
1015 if (to
->si_code
== SI_TIMER
)
1017 to
->cpt_si_timerid
= from
->si_timerid
;
1018 to
->cpt_si_overrun
= from
->si_overrun
;
1019 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1021 else if (to
->si_code
== SI_USER
)
1023 to
->cpt_si_pid
= from
->si_pid
;
1024 to
->cpt_si_uid
= from
->si_uid
;
1026 else if (to
->si_code
< 0)
1028 to
->cpt_si_pid
= from
->si_pid
;
1029 to
->cpt_si_uid
= from
->si_uid
;
1030 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1034 switch (to
->si_signo
)
1037 to
->cpt_si_pid
= from
->si_pid
;
1038 to
->cpt_si_uid
= from
->si_uid
;
1039 to
->cpt_si_status
= from
->si_status
;
1040 to
->cpt_si_utime
= from
->si_utime
;
1041 to
->cpt_si_stime
= from
->si_stime
;
1047 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1050 to
->cpt_si_band
= from
->si_band
;
1051 to
->cpt_si_fd
= from
->si_fd
;
1054 to
->cpt_si_pid
= from
->si_pid
;
1055 to
->cpt_si_uid
= from
->si_uid
;
1056 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1063 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
1065 memset (to
, 0, sizeof (*to
));
1067 to
->si_signo
= from
->si_signo
;
1068 to
->si_errno
= from
->si_errno
;
1069 to
->si_code
= from
->si_code
;
1071 if (to
->si_code
== SI_TIMER
)
1073 to
->si_timerid
= from
->cpt_si_timerid
;
1074 to
->si_overrun
= from
->cpt_si_overrun
;
1075 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1077 else if (to
->si_code
== SI_USER
)
1079 to
->si_pid
= from
->cpt_si_pid
;
1080 to
->si_uid
= from
->cpt_si_uid
;
1082 else if (to
->si_code
< 0)
1084 to
->si_pid
= from
->cpt_si_pid
;
1085 to
->si_uid
= from
->cpt_si_uid
;
1086 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1090 switch (to
->si_signo
)
1093 to
->si_pid
= from
->cpt_si_pid
;
1094 to
->si_uid
= from
->cpt_si_uid
;
1095 to
->si_status
= from
->cpt_si_status
;
1096 to
->si_utime
= from
->cpt_si_utime
;
1097 to
->si_stime
= from
->cpt_si_stime
;
1103 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1106 to
->si_band
= from
->cpt_si_band
;
1107 to
->si_fd
= from
->cpt_si_fd
;
1110 to
->si_pid
= from
->cpt_si_pid
;
1111 to
->si_uid
= from
->cpt_si_uid
;
1112 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1119 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t
*to
,
1122 memset (to
, 0, sizeof (*to
));
1124 to
->si_signo
= from
->si_signo
;
1125 to
->si_errno
= from
->si_errno
;
1126 to
->si_code
= from
->si_code
;
1128 if (to
->si_code
== SI_TIMER
)
1130 to
->cpt_si_timerid
= from
->si_timerid
;
1131 to
->cpt_si_overrun
= from
->si_overrun
;
1132 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1134 else if (to
->si_code
== SI_USER
)
1136 to
->cpt_si_pid
= from
->si_pid
;
1137 to
->cpt_si_uid
= from
->si_uid
;
1139 else if (to
->si_code
< 0)
1141 to
->cpt_si_pid
= from
->si_pid
;
1142 to
->cpt_si_uid
= from
->si_uid
;
1143 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1147 switch (to
->si_signo
)
1150 to
->cpt_si_pid
= from
->si_pid
;
1151 to
->cpt_si_uid
= from
->si_uid
;
1152 to
->cpt_si_status
= from
->si_status
;
1153 to
->cpt_si_utime
= from
->si_utime
;
1154 to
->cpt_si_stime
= from
->si_stime
;
1160 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1163 to
->cpt_si_band
= from
->si_band
;
1164 to
->cpt_si_fd
= from
->si_fd
;
1167 to
->cpt_si_pid
= from
->si_pid
;
1168 to
->cpt_si_uid
= from
->si_uid
;
1169 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1176 siginfo_from_compat_x32_siginfo (siginfo_t
*to
,
1177 compat_x32_siginfo_t
*from
)
1179 memset (to
, 0, sizeof (*to
));
1181 to
->si_signo
= from
->si_signo
;
1182 to
->si_errno
= from
->si_errno
;
1183 to
->si_code
= from
->si_code
;
1185 if (to
->si_code
== SI_TIMER
)
1187 to
->si_timerid
= from
->cpt_si_timerid
;
1188 to
->si_overrun
= from
->cpt_si_overrun
;
1189 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1191 else if (to
->si_code
== SI_USER
)
1193 to
->si_pid
= from
->cpt_si_pid
;
1194 to
->si_uid
= from
->cpt_si_uid
;
1196 else if (to
->si_code
< 0)
1198 to
->si_pid
= from
->cpt_si_pid
;
1199 to
->si_uid
= from
->cpt_si_uid
;
1200 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1204 switch (to
->si_signo
)
1207 to
->si_pid
= from
->cpt_si_pid
;
1208 to
->si_uid
= from
->cpt_si_uid
;
1209 to
->si_status
= from
->cpt_si_status
;
1210 to
->si_utime
= from
->cpt_si_utime
;
1211 to
->si_stime
= from
->cpt_si_stime
;
1217 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1220 to
->si_band
= from
->cpt_si_band
;
1221 to
->si_fd
= from
->cpt_si_fd
;
1224 to
->si_pid
= from
->cpt_si_pid
;
1225 to
->si_uid
= from
->cpt_si_uid
;
1226 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1232 #endif /* __x86_64__ */
1234 /* Convert a native/host siginfo object, into/from the siginfo in the
1235 layout of the inferiors' architecture. Returns true if any
1236 conversion was done; false otherwise. If DIRECTION is 1, then copy
1237 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1241 x86_siginfo_fixup (siginfo_t
*native
, void *inf
, int direction
)
1244 unsigned int machine
;
1245 int tid
= lwpid_of (current_thread
);
1246 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1248 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1249 if (!is_64bit_tdesc ())
1251 gdb_assert (sizeof (siginfo_t
) == sizeof (compat_siginfo_t
));
1254 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
1256 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
1260 /* No fixup for native x32 GDB. */
1261 else if (!is_elf64
&& sizeof (void *) == 8)
1263 gdb_assert (sizeof (siginfo_t
) == sizeof (compat_x32_siginfo_t
));
1266 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo
*) inf
,
1269 siginfo_from_compat_x32_siginfo (native
,
1270 (struct compat_x32_siginfo
*) inf
);
1281 /* Format of XSAVE extended state is:
1284 fxsave_bytes[0..463]
1285 sw_usable_bytes[464..511]
1286 xstate_hdr_bytes[512..575]
1291 Same memory layout will be used for the coredump NT_X86_XSTATE
1292 representing the XSAVE extended state registers.
1294 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1295 extended state mask, which is the same as the extended control register
1296 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1297 together with the mask saved in the xstate_hdr_bytes to determine what
1298 states the processor/OS supports and what state, used or initialized,
1299 the process/thread is in. */
1300 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1302 /* Does the current host support the GETFPXREGS request? The header
1303 file may or may not define it, and even if it is defined, the
1304 kernel will return EIO if it's running on a pre-SSE processor. */
1305 int have_ptrace_getfpxregs
=
1306 #ifdef HAVE_PTRACE_GETFPXREGS
1313 /* Does the current host support PTRACE_GETREGSET? */
1314 static int have_ptrace_getregset
= -1;
1316 /* Get Linux/x86 target description from running target. */
1318 static const struct target_desc
*
1319 x86_linux_read_description (void)
1321 unsigned int machine
;
1325 static uint64_t xcr0
;
1326 struct regset_info
*regset
;
1328 tid
= lwpid_of (current_thread
);
1330 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1332 if (sizeof (void *) == 4)
1335 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1337 else if (machine
== EM_X86_64
)
1338 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1342 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1343 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
1345 elf_fpxregset_t fpxregs
;
1347 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
1349 have_ptrace_getfpxregs
= 0;
1350 have_ptrace_getregset
= 0;
1351 return tdesc_i386_mmx_linux
;
1354 have_ptrace_getfpxregs
= 1;
1360 x86_xcr0
= X86_XSTATE_SSE_MASK
;
1362 /* Don't use XML. */
1364 if (machine
== EM_X86_64
)
1365 return tdesc_amd64_linux_no_xml
;
1368 return tdesc_i386_linux_no_xml
;
1371 if (have_ptrace_getregset
== -1)
1373 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
1376 iov
.iov_base
= xstateregs
;
1377 iov
.iov_len
= sizeof (xstateregs
);
1379 /* Check if PTRACE_GETREGSET works. */
1380 if (ptrace (PTRACE_GETREGSET
, tid
,
1381 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
1382 have_ptrace_getregset
= 0;
1385 have_ptrace_getregset
= 1;
1387 /* Get XCR0 from XSAVE extended state. */
1388 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
1389 / sizeof (uint64_t))];
1391 /* Use PTRACE_GETREGSET if it is available. */
1392 for (regset
= x86_regsets
;
1393 regset
->fill_function
!= NULL
; regset
++)
1394 if (regset
->get_request
== PTRACE_GETREGSET
)
1395 regset
->size
= X86_XSTATE_SIZE (xcr0
);
1396 else if (regset
->type
!= GENERAL_REGS
)
1401 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1402 xcr0_features
= (have_ptrace_getregset
1403 && (xcr0
& X86_XSTATE_ALL_MASK
));
1408 if (machine
== EM_X86_64
)
1415 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1417 case X86_XSTATE_AVX512_MASK
:
1418 return tdesc_amd64_avx512_linux
;
1420 case X86_XSTATE_MPX_MASK
:
1421 return tdesc_amd64_mpx_linux
;
1423 case X86_XSTATE_AVX_MASK
:
1424 return tdesc_amd64_avx_linux
;
1427 return tdesc_amd64_linux
;
1431 return tdesc_amd64_linux
;
1437 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1439 case X86_XSTATE_AVX512_MASK
:
1440 return tdesc_x32_avx512_linux
;
1442 case X86_XSTATE_MPX_MASK
: /* No MPX on x32. */
1443 case X86_XSTATE_AVX_MASK
:
1444 return tdesc_x32_avx_linux
;
1447 return tdesc_x32_linux
;
1451 return tdesc_x32_linux
;
1459 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1461 case (X86_XSTATE_AVX512_MASK
):
1462 return tdesc_i386_avx512_linux
;
1464 case (X86_XSTATE_MPX_MASK
):
1465 return tdesc_i386_mpx_linux
;
1467 case (X86_XSTATE_AVX_MASK
):
1468 return tdesc_i386_avx_linux
;
1471 return tdesc_i386_linux
;
1475 return tdesc_i386_linux
;
1478 gdb_assert_not_reached ("failed to return tdesc");
1481 /* Callback for find_inferior. Stops iteration when a thread with a
1482 given PID is found. */
1485 same_process_callback (struct inferior_list_entry
*entry
, void *data
)
1487 int pid
= *(int *) data
;
1489 return (ptid_get_pid (entry
->id
) == pid
);
1492 /* Callback for for_each_inferior. Calls the arch_setup routine for
1496 x86_arch_setup_process_callback (struct inferior_list_entry
*entry
)
1498 int pid
= ptid_get_pid (entry
->id
);
1500 /* Look up any thread of this processes. */
1502 = (struct thread_info
*) find_inferior (&all_threads
,
1503 same_process_callback
, &pid
);
1505 the_low_target
.arch_setup ();
1508 /* Update all the target description of all processes; a new GDB
1509 connected, and it may or not support xml target descriptions. */
1512 x86_linux_update_xmltarget (void)
1514 struct thread_info
*saved_thread
= current_thread
;
1516 /* Before changing the register cache's internal layout, flush the
1517 contents of the current valid caches back to the threads, and
1518 release the current regcache objects. */
1519 regcache_release ();
1521 for_each_inferior (&all_processes
, x86_arch_setup_process_callback
);
1523 current_thread
= saved_thread
;
1526 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1527 PTRACE_GETREGSET. */
1530 x86_linux_process_qsupported (const char *query
)
1532 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1533 with "i386" in qSupported query, it supports x86 XML target
1536 if (query
!= NULL
&& startswith (query
, "xmlRegisters="))
1538 char *copy
= xstrdup (query
+ 13);
1541 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1543 if (strcmp (p
, "i386") == 0)
1553 x86_linux_update_xmltarget ();
1556 /* Common for x86/x86-64. */
1558 static struct regsets_info x86_regsets_info
=
1560 x86_regsets
, /* regsets */
1561 0, /* num_regsets */
1562 NULL
, /* disabled_regsets */
1566 static struct regs_info amd64_linux_regs_info
=
1568 NULL
, /* regset_bitmap */
1569 NULL
, /* usrregs_info */
1573 static struct usrregs_info i386_linux_usrregs_info
=
1579 static struct regs_info i386_linux_regs_info
=
1581 NULL
, /* regset_bitmap */
1582 &i386_linux_usrregs_info
,
1586 const struct regs_info
*
1587 x86_linux_regs_info (void)
1590 if (is_64bit_tdesc ())
1591 return &amd64_linux_regs_info
;
1594 return &i386_linux_regs_info
;
1597 /* Initialize the target description for the architecture of the
1601 x86_arch_setup (void)
1603 current_process ()->tdesc
= x86_linux_read_description ();
1607 x86_supports_tracepoints (void)
1613 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1615 write_inferior_memory (*to
, buf
, len
);
1620 push_opcode (unsigned char *buf
, char *op
)
1622 unsigned char *buf_org
= buf
;
1627 unsigned long ul
= strtoul (op
, &endptr
, 16);
1636 return buf
- buf_org
;
1641 /* Build a jump pad that saves registers and calls a collection
1642 function. Writes a jump instruction to the jump pad to
1643 JJUMPAD_INSN. The caller is responsible to write it in at the
1644 tracepoint address. */
1647 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1648 CORE_ADDR collector
,
1651 CORE_ADDR
*jump_entry
,
1652 CORE_ADDR
*trampoline
,
1653 ULONGEST
*trampoline_size
,
1654 unsigned char *jjump_pad_insn
,
1655 ULONGEST
*jjump_pad_insn_size
,
1656 CORE_ADDR
*adjusted_insn_addr
,
1657 CORE_ADDR
*adjusted_insn_addr_end
,
1660 unsigned char buf
[40];
1664 CORE_ADDR buildaddr
= *jump_entry
;
1666 /* Build the jump pad. */
1668 /* First, do tracepoint data collection. Save registers. */
1670 /* Need to ensure stack pointer saved first. */
1671 buf
[i
++] = 0x54; /* push %rsp */
1672 buf
[i
++] = 0x55; /* push %rbp */
1673 buf
[i
++] = 0x57; /* push %rdi */
1674 buf
[i
++] = 0x56; /* push %rsi */
1675 buf
[i
++] = 0x52; /* push %rdx */
1676 buf
[i
++] = 0x51; /* push %rcx */
1677 buf
[i
++] = 0x53; /* push %rbx */
1678 buf
[i
++] = 0x50; /* push %rax */
1679 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1680 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1681 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1682 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1683 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1684 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1685 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1686 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1687 buf
[i
++] = 0x9c; /* pushfq */
1688 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1690 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1691 i
+= sizeof (unsigned long);
1692 buf
[i
++] = 0x57; /* push %rdi */
1693 append_insns (&buildaddr
, i
, buf
);
1695 /* Stack space for the collecting_t object. */
1697 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1698 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1699 memcpy (buf
+ i
, &tpoint
, 8);
1701 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1702 i
+= push_opcode (&buf
[i
],
1703 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1704 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1705 append_insns (&buildaddr
, i
, buf
);
1709 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1710 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1712 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1713 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1714 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1715 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1716 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1717 append_insns (&buildaddr
, i
, buf
);
1719 /* Set up the gdb_collect call. */
1720 /* At this point, (stack pointer + 0x18) is the base of our saved
1724 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1725 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1727 /* tpoint address may be 64-bit wide. */
1728 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1729 memcpy (buf
+ i
, &tpoint
, 8);
1731 append_insns (&buildaddr
, i
, buf
);
1733 /* The collector function being in the shared library, may be
1734 >31-bits away off the jump pad. */
1736 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1737 memcpy (buf
+ i
, &collector
, 8);
1739 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1740 append_insns (&buildaddr
, i
, buf
);
1742 /* Clear the spin-lock. */
1744 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1745 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1746 memcpy (buf
+ i
, &lockaddr
, 8);
1748 append_insns (&buildaddr
, i
, buf
);
1750 /* Remove stack that had been used for the collect_t object. */
1752 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1753 append_insns (&buildaddr
, i
, buf
);
1755 /* Restore register state. */
1757 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1761 buf
[i
++] = 0x9d; /* popfq */
1762 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1763 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1764 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1765 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1766 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1767 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1768 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1769 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1770 buf
[i
++] = 0x58; /* pop %rax */
1771 buf
[i
++] = 0x5b; /* pop %rbx */
1772 buf
[i
++] = 0x59; /* pop %rcx */
1773 buf
[i
++] = 0x5a; /* pop %rdx */
1774 buf
[i
++] = 0x5e; /* pop %rsi */
1775 buf
[i
++] = 0x5f; /* pop %rdi */
1776 buf
[i
++] = 0x5d; /* pop %rbp */
1777 buf
[i
++] = 0x5c; /* pop %rsp */
1778 append_insns (&buildaddr
, i
, buf
);
1780 /* Now, adjust the original instruction to execute in the jump
1782 *adjusted_insn_addr
= buildaddr
;
1783 relocate_instruction (&buildaddr
, tpaddr
);
1784 *adjusted_insn_addr_end
= buildaddr
;
1786 /* Finally, write a jump back to the program. */
1788 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1789 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1792 "E.Jump back from jump pad too far from tracepoint "
1793 "(offset 0x%" PRIx64
" > int32).", loffset
);
1797 offset
= (int) loffset
;
1798 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1799 memcpy (buf
+ 1, &offset
, 4);
1800 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1802 /* The jump pad is now built. Wire in a jump to our jump pad. This
1803 is always done last (by our caller actually), so that we can
1804 install fast tracepoints with threads running. This relies on
1805 the agent's atomic write support. */
1806 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1807 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1810 "E.Jump pad too far from tracepoint "
1811 "(offset 0x%" PRIx64
" > int32).", loffset
);
1815 offset
= (int) loffset
;
1817 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1818 memcpy (buf
+ 1, &offset
, 4);
1819 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1820 *jjump_pad_insn_size
= sizeof (jump_insn
);
1822 /* Return the end address of our pad. */
1823 *jump_entry
= buildaddr
;
1828 #endif /* __x86_64__ */
1830 /* Build a jump pad that saves registers and calls a collection
1831 function. Writes a jump instruction to the jump pad to
1832 JJUMPAD_INSN. The caller is responsible to write it in at the
1833 tracepoint address. */
1836 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1837 CORE_ADDR collector
,
1840 CORE_ADDR
*jump_entry
,
1841 CORE_ADDR
*trampoline
,
1842 ULONGEST
*trampoline_size
,
1843 unsigned char *jjump_pad_insn
,
1844 ULONGEST
*jjump_pad_insn_size
,
1845 CORE_ADDR
*adjusted_insn_addr
,
1846 CORE_ADDR
*adjusted_insn_addr_end
,
1849 unsigned char buf
[0x100];
1851 CORE_ADDR buildaddr
= *jump_entry
;
1853 /* Build the jump pad. */
1855 /* First, do tracepoint data collection. Save registers. */
1857 buf
[i
++] = 0x60; /* pushad */
1858 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1859 *((int *)(buf
+ i
)) = (int) tpaddr
;
1861 buf
[i
++] = 0x9c; /* pushf */
1862 buf
[i
++] = 0x1e; /* push %ds */
1863 buf
[i
++] = 0x06; /* push %es */
1864 buf
[i
++] = 0x0f; /* push %fs */
1866 buf
[i
++] = 0x0f; /* push %gs */
1868 buf
[i
++] = 0x16; /* push %ss */
1869 buf
[i
++] = 0x0e; /* push %cs */
1870 append_insns (&buildaddr
, i
, buf
);
1872 /* Stack space for the collecting_t object. */
1874 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1876 /* Build the object. */
1877 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1878 memcpy (buf
+ i
, &tpoint
, 4);
1880 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1882 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1883 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1884 append_insns (&buildaddr
, i
, buf
);
1886 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1887 If we cared for it, this could be using xchg alternatively. */
1890 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1891 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1893 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1895 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1896 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1897 append_insns (&buildaddr
, i
, buf
);
1900 /* Set up arguments to the gdb_collect call. */
1902 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1903 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1904 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1905 append_insns (&buildaddr
, i
, buf
);
1908 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1909 append_insns (&buildaddr
, i
, buf
);
1912 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1913 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1915 append_insns (&buildaddr
, i
, buf
);
1917 buf
[0] = 0xe8; /* call <reladdr> */
1918 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1919 memcpy (buf
+ 1, &offset
, 4);
1920 append_insns (&buildaddr
, 5, buf
);
1921 /* Clean up after the call. */
1922 buf
[0] = 0x83; /* add $0x8,%esp */
1925 append_insns (&buildaddr
, 3, buf
);
1928 /* Clear the spin-lock. This would need the LOCK prefix on older
1931 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1932 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1933 memcpy (buf
+ i
, &lockaddr
, 4);
1935 append_insns (&buildaddr
, i
, buf
);
1938 /* Remove stack that had been used for the collect_t object. */
1940 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1941 append_insns (&buildaddr
, i
, buf
);
1944 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1947 buf
[i
++] = 0x17; /* pop %ss */
1948 buf
[i
++] = 0x0f; /* pop %gs */
1950 buf
[i
++] = 0x0f; /* pop %fs */
1952 buf
[i
++] = 0x07; /* pop %es */
1953 buf
[i
++] = 0x1f; /* pop %ds */
1954 buf
[i
++] = 0x9d; /* popf */
1955 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1958 buf
[i
++] = 0x61; /* popad */
1959 append_insns (&buildaddr
, i
, buf
);
1961 /* Now, adjust the original instruction to execute in the jump
1963 *adjusted_insn_addr
= buildaddr
;
1964 relocate_instruction (&buildaddr
, tpaddr
);
1965 *adjusted_insn_addr_end
= buildaddr
;
1967 /* Write the jump back to the program. */
1968 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1969 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1970 memcpy (buf
+ 1, &offset
, 4);
1971 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1973 /* The jump pad is now built. Wire in a jump to our jump pad. This
1974 is always done last (by our caller actually), so that we can
1975 install fast tracepoints with threads running. This relies on
1976 the agent's atomic write support. */
1979 /* Create a trampoline. */
1980 *trampoline_size
= sizeof (jump_insn
);
1981 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1983 /* No trampoline space available. */
1985 "E.Cannot allocate trampoline space needed for fast "
1986 "tracepoints on 4-byte instructions.");
1990 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1991 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1992 memcpy (buf
+ 1, &offset
, 4);
1993 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1995 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1996 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1997 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1998 memcpy (buf
+ 2, &offset
, 2);
1999 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
2000 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
2004 /* Else use a 32-bit relative jump instruction. */
2005 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
2006 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
2007 memcpy (buf
+ 1, &offset
, 4);
2008 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
2009 *jjump_pad_insn_size
= sizeof (jump_insn
);
2012 /* Return the end address of our pad. */
2013 *jump_entry
= buildaddr
;
2019 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
2020 CORE_ADDR collector
,
2023 CORE_ADDR
*jump_entry
,
2024 CORE_ADDR
*trampoline
,
2025 ULONGEST
*trampoline_size
,
2026 unsigned char *jjump_pad_insn
,
2027 ULONGEST
*jjump_pad_insn_size
,
2028 CORE_ADDR
*adjusted_insn_addr
,
2029 CORE_ADDR
*adjusted_insn_addr_end
,
2033 if (is_64bit_tdesc ())
2034 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
2035 collector
, lockaddr
,
2036 orig_size
, jump_entry
,
2037 trampoline
, trampoline_size
,
2039 jjump_pad_insn_size
,
2041 adjusted_insn_addr_end
,
2045 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
2046 collector
, lockaddr
,
2047 orig_size
, jump_entry
,
2048 trampoline
, trampoline_size
,
2050 jjump_pad_insn_size
,
2052 adjusted_insn_addr_end
,
2056 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
2060 x86_get_min_fast_tracepoint_insn_len (void)
2062 static int warned_about_fast_tracepoints
= 0;
2065 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
2066 used for fast tracepoints. */
2067 if (is_64bit_tdesc ())
2071 if (agent_loaded_p ())
2073 char errbuf
[IPA_BUFSIZ
];
2077 /* On x86, if trampolines are available, then 4-byte jump instructions
2078 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2079 with a 4-byte offset are used instead. */
2080 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
2084 /* GDB has no channel to explain to user why a shorter fast
2085 tracepoint is not possible, but at least make GDBserver
2086 mention that something has gone awry. */
2087 if (!warned_about_fast_tracepoints
)
2089 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
2090 warned_about_fast_tracepoints
= 1;
2097 /* Indicate that the minimum length is currently unknown since the IPA
2098 has not loaded yet. */
2104 add_insns (unsigned char *start
, int len
)
2106 CORE_ADDR buildaddr
= current_insn_ptr
;
2109 debug_printf ("Adding %d bytes of insn at %s\n",
2110 len
, paddress (buildaddr
));
2112 append_insns (&buildaddr
, len
, start
);
2113 current_insn_ptr
= buildaddr
;
2116 /* Our general strategy for emitting code is to avoid specifying raw
2117 bytes whenever possible, and instead copy a block of inline asm
2118 that is embedded in the function. This is a little messy, because
2119 we need to keep the compiler from discarding what looks like dead
2120 code, plus suppress various warnings. */
2122 #define EMIT_ASM(NAME, INSNS) \
2125 extern unsigned char start_ ## NAME, end_ ## NAME; \
2126 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2127 __asm__ ("jmp end_" #NAME "\n" \
2128 "\t" "start_" #NAME ":" \
2130 "\t" "end_" #NAME ":"); \
2135 #define EMIT_ASM32(NAME,INSNS) \
2138 extern unsigned char start_ ## NAME, end_ ## NAME; \
2139 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2140 __asm__ (".code32\n" \
2141 "\t" "jmp end_" #NAME "\n" \
2142 "\t" "start_" #NAME ":\n" \
2144 "\t" "end_" #NAME ":\n" \
2150 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2157 amd64_emit_prologue (void)
2159 EMIT_ASM (amd64_prologue
,
2161 "movq %rsp,%rbp\n\t"
2162 "sub $0x20,%rsp\n\t"
2163 "movq %rdi,-8(%rbp)\n\t"
2164 "movq %rsi,-16(%rbp)");
2169 amd64_emit_epilogue (void)
2171 EMIT_ASM (amd64_epilogue
,
2172 "movq -16(%rbp),%rdi\n\t"
2173 "movq %rax,(%rdi)\n\t"
2180 amd64_emit_add (void)
2182 EMIT_ASM (amd64_add
,
2183 "add (%rsp),%rax\n\t"
2184 "lea 0x8(%rsp),%rsp");
2188 amd64_emit_sub (void)
2190 EMIT_ASM (amd64_sub
,
2191 "sub %rax,(%rsp)\n\t"
2196 amd64_emit_mul (void)
2202 amd64_emit_lsh (void)
2208 amd64_emit_rsh_signed (void)
2214 amd64_emit_rsh_unsigned (void)
2220 amd64_emit_ext (int arg
)
2225 EMIT_ASM (amd64_ext_8
,
2231 EMIT_ASM (amd64_ext_16
,
2236 EMIT_ASM (amd64_ext_32
,
2245 amd64_emit_log_not (void)
2247 EMIT_ASM (amd64_log_not
,
2248 "test %rax,%rax\n\t"
2254 amd64_emit_bit_and (void)
2256 EMIT_ASM (amd64_and
,
2257 "and (%rsp),%rax\n\t"
2258 "lea 0x8(%rsp),%rsp");
2262 amd64_emit_bit_or (void)
2265 "or (%rsp),%rax\n\t"
2266 "lea 0x8(%rsp),%rsp");
2270 amd64_emit_bit_xor (void)
2272 EMIT_ASM (amd64_xor
,
2273 "xor (%rsp),%rax\n\t"
2274 "lea 0x8(%rsp),%rsp");
2278 amd64_emit_bit_not (void)
2280 EMIT_ASM (amd64_bit_not
,
2281 "xorq $0xffffffffffffffff,%rax");
2285 amd64_emit_equal (void)
2287 EMIT_ASM (amd64_equal
,
2288 "cmp %rax,(%rsp)\n\t"
2289 "je .Lamd64_equal_true\n\t"
2291 "jmp .Lamd64_equal_end\n\t"
2292 ".Lamd64_equal_true:\n\t"
2294 ".Lamd64_equal_end:\n\t"
2295 "lea 0x8(%rsp),%rsp");
2299 amd64_emit_less_signed (void)
2301 EMIT_ASM (amd64_less_signed
,
2302 "cmp %rax,(%rsp)\n\t"
2303 "jl .Lamd64_less_signed_true\n\t"
2305 "jmp .Lamd64_less_signed_end\n\t"
2306 ".Lamd64_less_signed_true:\n\t"
2308 ".Lamd64_less_signed_end:\n\t"
2309 "lea 0x8(%rsp),%rsp");
2313 amd64_emit_less_unsigned (void)
2315 EMIT_ASM (amd64_less_unsigned
,
2316 "cmp %rax,(%rsp)\n\t"
2317 "jb .Lamd64_less_unsigned_true\n\t"
2319 "jmp .Lamd64_less_unsigned_end\n\t"
2320 ".Lamd64_less_unsigned_true:\n\t"
2322 ".Lamd64_less_unsigned_end:\n\t"
2323 "lea 0x8(%rsp),%rsp");
2327 amd64_emit_ref (int size
)
2332 EMIT_ASM (amd64_ref1
,
2336 EMIT_ASM (amd64_ref2
,
2340 EMIT_ASM (amd64_ref4
,
2341 "movl (%rax),%eax");
2344 EMIT_ASM (amd64_ref8
,
2345 "movq (%rax),%rax");
2351 amd64_emit_if_goto (int *offset_p
, int *size_p
)
2353 EMIT_ASM (amd64_if_goto
,
2357 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2365 amd64_emit_goto (int *offset_p
, int *size_p
)
2367 EMIT_ASM (amd64_goto
,
2368 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2376 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2378 int diff
= (to
- (from
+ size
));
2379 unsigned char buf
[sizeof (int)];
2387 memcpy (buf
, &diff
, sizeof (int));
2388 write_inferior_memory (from
, buf
, sizeof (int));
2392 amd64_emit_const (LONGEST num
)
2394 unsigned char buf
[16];
2396 CORE_ADDR buildaddr
= current_insn_ptr
;
2399 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
2400 memcpy (&buf
[i
], &num
, sizeof (num
));
2402 append_insns (&buildaddr
, i
, buf
);
2403 current_insn_ptr
= buildaddr
;
2407 amd64_emit_call (CORE_ADDR fn
)
2409 unsigned char buf
[16];
2411 CORE_ADDR buildaddr
;
2414 /* The destination function being in the shared library, may be
2415 >31-bits away off the compiled code pad. */
2417 buildaddr
= current_insn_ptr
;
2419 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
2423 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
2425 /* Offset is too large for a call. Use callq, but that requires
2426 a register, so avoid it if possible. Use r10, since it is
2427 call-clobbered, we don't have to push/pop it. */
2428 buf
[i
++] = 0x48; /* mov $fn,%r10 */
2430 memcpy (buf
+ i
, &fn
, 8);
2432 buf
[i
++] = 0xff; /* callq *%r10 */
2437 int offset32
= offset64
; /* we know we can't overflow here. */
2438 memcpy (buf
+ i
, &offset32
, 4);
2442 append_insns (&buildaddr
, i
, buf
);
2443 current_insn_ptr
= buildaddr
;
2447 amd64_emit_reg (int reg
)
2449 unsigned char buf
[16];
2451 CORE_ADDR buildaddr
;
2453 /* Assume raw_regs is still in %rdi. */
2454 buildaddr
= current_insn_ptr
;
2456 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
2457 memcpy (&buf
[i
], ®
, sizeof (reg
));
2459 append_insns (&buildaddr
, i
, buf
);
2460 current_insn_ptr
= buildaddr
;
2461 amd64_emit_call (get_raw_reg_func_addr ());
2465 amd64_emit_pop (void)
2467 EMIT_ASM (amd64_pop
,
2472 amd64_emit_stack_flush (void)
2474 EMIT_ASM (amd64_stack_flush
,
2479 amd64_emit_zero_ext (int arg
)
2484 EMIT_ASM (amd64_zero_ext_8
,
2488 EMIT_ASM (amd64_zero_ext_16
,
2489 "and $0xffff,%rax");
2492 EMIT_ASM (amd64_zero_ext_32
,
2493 "mov $0xffffffff,%rcx\n\t"
2502 amd64_emit_swap (void)
2504 EMIT_ASM (amd64_swap
,
2511 amd64_emit_stack_adjust (int n
)
2513 unsigned char buf
[16];
2515 CORE_ADDR buildaddr
= current_insn_ptr
;
2518 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2522 /* This only handles adjustments up to 16, but we don't expect any more. */
2524 append_insns (&buildaddr
, i
, buf
);
2525 current_insn_ptr
= buildaddr
;
2528 /* FN's prototype is `LONGEST(*fn)(int)'. */
2531 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2533 unsigned char buf
[16];
2535 CORE_ADDR buildaddr
;
2537 buildaddr
= current_insn_ptr
;
2539 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2540 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2542 append_insns (&buildaddr
, i
, buf
);
2543 current_insn_ptr
= buildaddr
;
2544 amd64_emit_call (fn
);
2547 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2550 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2552 unsigned char buf
[16];
2554 CORE_ADDR buildaddr
;
2556 buildaddr
= current_insn_ptr
;
2558 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2559 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2561 append_insns (&buildaddr
, i
, buf
);
2562 current_insn_ptr
= buildaddr
;
2563 EMIT_ASM (amd64_void_call_2_a
,
2564 /* Save away a copy of the stack top. */
2566 /* Also pass top as the second argument. */
2568 amd64_emit_call (fn
);
2569 EMIT_ASM (amd64_void_call_2_b
,
2570 /* Restore the stack top, %rax may have been trashed. */
2575 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2578 "cmp %rax,(%rsp)\n\t"
2579 "jne .Lamd64_eq_fallthru\n\t"
2580 "lea 0x8(%rsp),%rsp\n\t"
2582 /* jmp, but don't trust the assembler to choose the right jump */
2583 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2584 ".Lamd64_eq_fallthru:\n\t"
2585 "lea 0x8(%rsp),%rsp\n\t"
2595 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2598 "cmp %rax,(%rsp)\n\t"
2599 "je .Lamd64_ne_fallthru\n\t"
2600 "lea 0x8(%rsp),%rsp\n\t"
2602 /* jmp, but don't trust the assembler to choose the right jump */
2603 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2604 ".Lamd64_ne_fallthru:\n\t"
2605 "lea 0x8(%rsp),%rsp\n\t"
2615 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2618 "cmp %rax,(%rsp)\n\t"
2619 "jnl .Lamd64_lt_fallthru\n\t"
2620 "lea 0x8(%rsp),%rsp\n\t"
2622 /* jmp, but don't trust the assembler to choose the right jump */
2623 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2624 ".Lamd64_lt_fallthru:\n\t"
2625 "lea 0x8(%rsp),%rsp\n\t"
2635 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2638 "cmp %rax,(%rsp)\n\t"
2639 "jnle .Lamd64_le_fallthru\n\t"
2640 "lea 0x8(%rsp),%rsp\n\t"
2642 /* jmp, but don't trust the assembler to choose the right jump */
2643 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2644 ".Lamd64_le_fallthru:\n\t"
2645 "lea 0x8(%rsp),%rsp\n\t"
2655 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2658 "cmp %rax,(%rsp)\n\t"
2659 "jng .Lamd64_gt_fallthru\n\t"
2660 "lea 0x8(%rsp),%rsp\n\t"
2662 /* jmp, but don't trust the assembler to choose the right jump */
2663 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2664 ".Lamd64_gt_fallthru:\n\t"
2665 "lea 0x8(%rsp),%rsp\n\t"
2675 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2678 "cmp %rax,(%rsp)\n\t"
2679 "jnge .Lamd64_ge_fallthru\n\t"
2680 ".Lamd64_ge_jump:\n\t"
2681 "lea 0x8(%rsp),%rsp\n\t"
2683 /* jmp, but don't trust the assembler to choose the right jump */
2684 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2685 ".Lamd64_ge_fallthru:\n\t"
2686 "lea 0x8(%rsp),%rsp\n\t"
2695 struct emit_ops amd64_emit_ops
=
2697 amd64_emit_prologue
,
2698 amd64_emit_epilogue
,
2703 amd64_emit_rsh_signed
,
2704 amd64_emit_rsh_unsigned
,
2712 amd64_emit_less_signed
,
2713 amd64_emit_less_unsigned
,
2717 amd64_write_goto_address
,
2722 amd64_emit_stack_flush
,
2723 amd64_emit_zero_ext
,
2725 amd64_emit_stack_adjust
,
2726 amd64_emit_int_call_1
,
2727 amd64_emit_void_call_2
,
2736 #endif /* __x86_64__ */
2739 i386_emit_prologue (void)
2741 EMIT_ASM32 (i386_prologue
,
2745 /* At this point, the raw regs base address is at 8(%ebp), and the
2746 value pointer is at 12(%ebp). */
2750 i386_emit_epilogue (void)
2752 EMIT_ASM32 (i386_epilogue
,
2753 "mov 12(%ebp),%ecx\n\t"
2754 "mov %eax,(%ecx)\n\t"
2755 "mov %ebx,0x4(%ecx)\n\t"
2763 i386_emit_add (void)
2765 EMIT_ASM32 (i386_add
,
2766 "add (%esp),%eax\n\t"
2767 "adc 0x4(%esp),%ebx\n\t"
2768 "lea 0x8(%esp),%esp");
2772 i386_emit_sub (void)
2774 EMIT_ASM32 (i386_sub
,
2775 "subl %eax,(%esp)\n\t"
2776 "sbbl %ebx,4(%esp)\n\t"
2782 i386_emit_mul (void)
2788 i386_emit_lsh (void)
2794 i386_emit_rsh_signed (void)
2800 i386_emit_rsh_unsigned (void)
2806 i386_emit_ext (int arg
)
2811 EMIT_ASM32 (i386_ext_8
,
2814 "movl %eax,%ebx\n\t"
2818 EMIT_ASM32 (i386_ext_16
,
2820 "movl %eax,%ebx\n\t"
2824 EMIT_ASM32 (i386_ext_32
,
2825 "movl %eax,%ebx\n\t"
2834 i386_emit_log_not (void)
2836 EMIT_ASM32 (i386_log_not
,
2838 "test %eax,%eax\n\t"
2845 i386_emit_bit_and (void)
2847 EMIT_ASM32 (i386_and
,
2848 "and (%esp),%eax\n\t"
2849 "and 0x4(%esp),%ebx\n\t"
2850 "lea 0x8(%esp),%esp");
2854 i386_emit_bit_or (void)
2856 EMIT_ASM32 (i386_or
,
2857 "or (%esp),%eax\n\t"
2858 "or 0x4(%esp),%ebx\n\t"
2859 "lea 0x8(%esp),%esp");
2863 i386_emit_bit_xor (void)
2865 EMIT_ASM32 (i386_xor
,
2866 "xor (%esp),%eax\n\t"
2867 "xor 0x4(%esp),%ebx\n\t"
2868 "lea 0x8(%esp),%esp");
2872 i386_emit_bit_not (void)
2874 EMIT_ASM32 (i386_bit_not
,
2875 "xor $0xffffffff,%eax\n\t"
2876 "xor $0xffffffff,%ebx\n\t");
2880 i386_emit_equal (void)
2882 EMIT_ASM32 (i386_equal
,
2883 "cmpl %ebx,4(%esp)\n\t"
2884 "jne .Li386_equal_false\n\t"
2885 "cmpl %eax,(%esp)\n\t"
2886 "je .Li386_equal_true\n\t"
2887 ".Li386_equal_false:\n\t"
2889 "jmp .Li386_equal_end\n\t"
2890 ".Li386_equal_true:\n\t"
2892 ".Li386_equal_end:\n\t"
2894 "lea 0x8(%esp),%esp");
2898 i386_emit_less_signed (void)
2900 EMIT_ASM32 (i386_less_signed
,
2901 "cmpl %ebx,4(%esp)\n\t"
2902 "jl .Li386_less_signed_true\n\t"
2903 "jne .Li386_less_signed_false\n\t"
2904 "cmpl %eax,(%esp)\n\t"
2905 "jl .Li386_less_signed_true\n\t"
2906 ".Li386_less_signed_false:\n\t"
2908 "jmp .Li386_less_signed_end\n\t"
2909 ".Li386_less_signed_true:\n\t"
2911 ".Li386_less_signed_end:\n\t"
2913 "lea 0x8(%esp),%esp");
2917 i386_emit_less_unsigned (void)
2919 EMIT_ASM32 (i386_less_unsigned
,
2920 "cmpl %ebx,4(%esp)\n\t"
2921 "jb .Li386_less_unsigned_true\n\t"
2922 "jne .Li386_less_unsigned_false\n\t"
2923 "cmpl %eax,(%esp)\n\t"
2924 "jb .Li386_less_unsigned_true\n\t"
2925 ".Li386_less_unsigned_false:\n\t"
2927 "jmp .Li386_less_unsigned_end\n\t"
2928 ".Li386_less_unsigned_true:\n\t"
2930 ".Li386_less_unsigned_end:\n\t"
2932 "lea 0x8(%esp),%esp");
2936 i386_emit_ref (int size
)
2941 EMIT_ASM32 (i386_ref1
,
2945 EMIT_ASM32 (i386_ref2
,
2949 EMIT_ASM32 (i386_ref4
,
2950 "movl (%eax),%eax");
2953 EMIT_ASM32 (i386_ref8
,
2954 "movl 4(%eax),%ebx\n\t"
2955 "movl (%eax),%eax");
2961 i386_emit_if_goto (int *offset_p
, int *size_p
)
2963 EMIT_ASM32 (i386_if_goto
,
2969 /* Don't trust the assembler to choose the right jump */
2970 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2973 *offset_p
= 11; /* be sure that this matches the sequence above */
2979 i386_emit_goto (int *offset_p
, int *size_p
)
2981 EMIT_ASM32 (i386_goto
,
2982 /* Don't trust the assembler to choose the right jump */
2983 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2991 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2993 int diff
= (to
- (from
+ size
));
2994 unsigned char buf
[sizeof (int)];
2996 /* We're only doing 4-byte sizes at the moment. */
3003 memcpy (buf
, &diff
, sizeof (int));
3004 write_inferior_memory (from
, buf
, sizeof (int));
3008 i386_emit_const (LONGEST num
)
3010 unsigned char buf
[16];
3012 CORE_ADDR buildaddr
= current_insn_ptr
;
3015 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
3016 lo
= num
& 0xffffffff;
3017 memcpy (&buf
[i
], &lo
, sizeof (lo
));
3019 hi
= ((num
>> 32) & 0xffffffff);
3022 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
3023 memcpy (&buf
[i
], &hi
, sizeof (hi
));
3028 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
3030 append_insns (&buildaddr
, i
, buf
);
3031 current_insn_ptr
= buildaddr
;
3035 i386_emit_call (CORE_ADDR fn
)
3037 unsigned char buf
[16];
3039 CORE_ADDR buildaddr
;
3041 buildaddr
= current_insn_ptr
;
3043 buf
[i
++] = 0xe8; /* call <reladdr> */
3044 offset
= ((int) fn
) - (buildaddr
+ 5);
3045 memcpy (buf
+ 1, &offset
, 4);
3046 append_insns (&buildaddr
, 5, buf
);
3047 current_insn_ptr
= buildaddr
;
3051 i386_emit_reg (int reg
)
3053 unsigned char buf
[16];
3055 CORE_ADDR buildaddr
;
3057 EMIT_ASM32 (i386_reg_a
,
3059 buildaddr
= current_insn_ptr
;
3061 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
3062 memcpy (&buf
[i
], ®
, sizeof (reg
));
3064 append_insns (&buildaddr
, i
, buf
);
3065 current_insn_ptr
= buildaddr
;
3066 EMIT_ASM32 (i386_reg_b
,
3067 "mov %eax,4(%esp)\n\t"
3068 "mov 8(%ebp),%eax\n\t"
3070 i386_emit_call (get_raw_reg_func_addr ());
3071 EMIT_ASM32 (i386_reg_c
,
3073 "lea 0x8(%esp),%esp");
3077 i386_emit_pop (void)
3079 EMIT_ASM32 (i386_pop
,
3085 i386_emit_stack_flush (void)
3087 EMIT_ASM32 (i386_stack_flush
,
3093 i386_emit_zero_ext (int arg
)
3098 EMIT_ASM32 (i386_zero_ext_8
,
3099 "and $0xff,%eax\n\t"
3103 EMIT_ASM32 (i386_zero_ext_16
,
3104 "and $0xffff,%eax\n\t"
3108 EMIT_ASM32 (i386_zero_ext_32
,
3117 i386_emit_swap (void)
3119 EMIT_ASM32 (i386_swap
,
3129 i386_emit_stack_adjust (int n
)
3131 unsigned char buf
[16];
3133 CORE_ADDR buildaddr
= current_insn_ptr
;
3136 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
3140 append_insns (&buildaddr
, i
, buf
);
3141 current_insn_ptr
= buildaddr
;
3144 /* FN's prototype is `LONGEST(*fn)(int)'. */
3147 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
3149 unsigned char buf
[16];
3151 CORE_ADDR buildaddr
;
3153 EMIT_ASM32 (i386_int_call_1_a
,
3154 /* Reserve a bit of stack space. */
3156 /* Put the one argument on the stack. */
3157 buildaddr
= current_insn_ptr
;
3159 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3162 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3164 append_insns (&buildaddr
, i
, buf
);
3165 current_insn_ptr
= buildaddr
;
3166 i386_emit_call (fn
);
3167 EMIT_ASM32 (i386_int_call_1_c
,
3169 "lea 0x8(%esp),%esp");
3172 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3175 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
3177 unsigned char buf
[16];
3179 CORE_ADDR buildaddr
;
3181 EMIT_ASM32 (i386_void_call_2_a
,
3182 /* Preserve %eax only; we don't have to worry about %ebx. */
3184 /* Reserve a bit of stack space for arguments. */
3185 "sub $0x10,%esp\n\t"
3186 /* Copy "top" to the second argument position. (Note that
3187 we can't assume function won't scribble on its
3188 arguments, so don't try to restore from this.) */
3189 "mov %eax,4(%esp)\n\t"
3190 "mov %ebx,8(%esp)");
3191 /* Put the first argument on the stack. */
3192 buildaddr
= current_insn_ptr
;
3194 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3197 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3199 append_insns (&buildaddr
, i
, buf
);
3200 current_insn_ptr
= buildaddr
;
3201 i386_emit_call (fn
);
3202 EMIT_ASM32 (i386_void_call_2_b
,
3203 "lea 0x10(%esp),%esp\n\t"
3204 /* Restore original stack top. */
3210 i386_emit_eq_goto (int *offset_p
, int *size_p
)
3213 /* Check low half first, more likely to be decider */
3214 "cmpl %eax,(%esp)\n\t"
3215 "jne .Leq_fallthru\n\t"
3216 "cmpl %ebx,4(%esp)\n\t"
3217 "jne .Leq_fallthru\n\t"
3218 "lea 0x8(%esp),%esp\n\t"
3221 /* jmp, but don't trust the assembler to choose the right jump */
3222 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3223 ".Leq_fallthru:\n\t"
3224 "lea 0x8(%esp),%esp\n\t"
3235 i386_emit_ne_goto (int *offset_p
, int *size_p
)
3238 /* Check low half first, more likely to be decider */
3239 "cmpl %eax,(%esp)\n\t"
3241 "cmpl %ebx,4(%esp)\n\t"
3242 "je .Lne_fallthru\n\t"
3244 "lea 0x8(%esp),%esp\n\t"
3247 /* jmp, but don't trust the assembler to choose the right jump */
3248 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3249 ".Lne_fallthru:\n\t"
3250 "lea 0x8(%esp),%esp\n\t"
3261 i386_emit_lt_goto (int *offset_p
, int *size_p
)
3264 "cmpl %ebx,4(%esp)\n\t"
3266 "jne .Llt_fallthru\n\t"
3267 "cmpl %eax,(%esp)\n\t"
3268 "jnl .Llt_fallthru\n\t"
3270 "lea 0x8(%esp),%esp\n\t"
3273 /* jmp, but don't trust the assembler to choose the right jump */
3274 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3275 ".Llt_fallthru:\n\t"
3276 "lea 0x8(%esp),%esp\n\t"
3287 i386_emit_le_goto (int *offset_p
, int *size_p
)
3290 "cmpl %ebx,4(%esp)\n\t"
3292 "jne .Lle_fallthru\n\t"
3293 "cmpl %eax,(%esp)\n\t"
3294 "jnle .Lle_fallthru\n\t"
3296 "lea 0x8(%esp),%esp\n\t"
3299 /* jmp, but don't trust the assembler to choose the right jump */
3300 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3301 ".Lle_fallthru:\n\t"
3302 "lea 0x8(%esp),%esp\n\t"
3313 i386_emit_gt_goto (int *offset_p
, int *size_p
)
3316 "cmpl %ebx,4(%esp)\n\t"
3318 "jne .Lgt_fallthru\n\t"
3319 "cmpl %eax,(%esp)\n\t"
3320 "jng .Lgt_fallthru\n\t"
3322 "lea 0x8(%esp),%esp\n\t"
3325 /* jmp, but don't trust the assembler to choose the right jump */
3326 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3327 ".Lgt_fallthru:\n\t"
3328 "lea 0x8(%esp),%esp\n\t"
3339 i386_emit_ge_goto (int *offset_p
, int *size_p
)
3342 "cmpl %ebx,4(%esp)\n\t"
3344 "jne .Lge_fallthru\n\t"
3345 "cmpl %eax,(%esp)\n\t"
3346 "jnge .Lge_fallthru\n\t"
3348 "lea 0x8(%esp),%esp\n\t"
3351 /* jmp, but don't trust the assembler to choose the right jump */
3352 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3353 ".Lge_fallthru:\n\t"
3354 "lea 0x8(%esp),%esp\n\t"
3364 struct emit_ops i386_emit_ops
=
3372 i386_emit_rsh_signed
,
3373 i386_emit_rsh_unsigned
,
3381 i386_emit_less_signed
,
3382 i386_emit_less_unsigned
,
3386 i386_write_goto_address
,
3391 i386_emit_stack_flush
,
3394 i386_emit_stack_adjust
,
3395 i386_emit_int_call_1
,
3396 i386_emit_void_call_2
,
3406 static struct emit_ops
*
3410 if (is_64bit_tdesc ())
3411 return &amd64_emit_ops
;
3414 return &i386_emit_ops
;
3418 x86_supports_range_stepping (void)
3423 /* This is initialized assuming an amd64 target.
3424 x86_arch_setup will correct it for i386 or amd64 targets. */
3426 struct linux_target_ops the_low_target
=
3429 x86_linux_regs_info
,
3430 x86_cannot_fetch_register
,
3431 x86_cannot_store_register
,
3432 NULL
, /* fetch_register */
3440 x86_supports_z_point_type
,
3443 x86_stopped_by_watchpoint
,
3444 x86_stopped_data_address
,
3445 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3446 native i386 case (no registers smaller than an xfer unit), and are not
3447 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3450 /* need to fix up i386 siginfo if host is amd64 */
3452 x86_linux_new_process
,
3453 x86_linux_new_thread
,
3454 x86_linux_prepare_to_resume
,
3455 x86_linux_process_qsupported
,
3456 x86_supports_tracepoints
,
3457 x86_get_thread_area
,
3458 x86_install_fast_tracepoint_jump_pad
,
3460 x86_get_min_fast_tracepoint_insn_len
,
3461 x86_supports_range_stepping
,
3465 initialize_low_arch (void)
3467 /* Initialize the Linux target descriptions. */
3469 init_registers_amd64_linux ();
3470 init_registers_amd64_avx_linux ();
3471 init_registers_amd64_avx512_linux ();
3472 init_registers_amd64_mpx_linux ();
3474 init_registers_x32_linux ();
3475 init_registers_x32_avx_linux ();
3476 init_registers_x32_avx512_linux ();
3478 tdesc_amd64_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3479 copy_target_description (tdesc_amd64_linux_no_xml
, tdesc_amd64_linux
);
3480 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
3482 init_registers_i386_linux ();
3483 init_registers_i386_mmx_linux ();
3484 init_registers_i386_avx_linux ();
3485 init_registers_i386_avx512_linux ();
3486 init_registers_i386_mpx_linux ();
3488 tdesc_i386_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3489 copy_target_description (tdesc_i386_linux_no_xml
, tdesc_i386_linux
);
3490 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3492 initialize_regsets_info (&x86_regsets_info
);