1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2014 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "linux-low.h"
28 #include "i386-xstate.h"
30 #include "gdb_proc_service.h"
31 /* Don't include elf/common.h if linux/elf.h got included by
32 gdb_proc_service.h. */
34 #include "elf/common.h"
39 #include "tracepoint.h"
43 /* Defined in auto-generated file amd64-linux.c. */
44 void init_registers_amd64_linux (void);
45 extern const struct target_desc
*tdesc_amd64_linux
;
47 /* Defined in auto-generated file amd64-avx-linux.c. */
48 void init_registers_amd64_avx_linux (void);
49 extern const struct target_desc
*tdesc_amd64_avx_linux
;
51 /* Defined in auto-generated file amd64-avx512-linux.c. */
52 void init_registers_amd64_avx512_linux (void);
53 extern const struct target_desc
*tdesc_amd64_avx512_linux
;
55 /* Defined in auto-generated file amd64-mpx-linux.c. */
56 void init_registers_amd64_mpx_linux (void);
57 extern const struct target_desc
*tdesc_amd64_mpx_linux
;
59 /* Defined in auto-generated file x32-linux.c. */
60 void init_registers_x32_linux (void);
61 extern const struct target_desc
*tdesc_x32_linux
;
63 /* Defined in auto-generated file x32-avx-linux.c. */
64 void init_registers_x32_avx_linux (void);
65 extern const struct target_desc
*tdesc_x32_avx_linux
;
67 /* Defined in auto-generated file x32-avx512-linux.c. */
68 void init_registers_x32_avx512_linux (void);
69 extern const struct target_desc
*tdesc_x32_avx512_linux
;
73 /* Defined in auto-generated file i386-linux.c. */
74 void init_registers_i386_linux (void);
75 extern const struct target_desc
*tdesc_i386_linux
;
77 /* Defined in auto-generated file i386-mmx-linux.c. */
78 void init_registers_i386_mmx_linux (void);
79 extern const struct target_desc
*tdesc_i386_mmx_linux
;
81 /* Defined in auto-generated file i386-avx-linux.c. */
82 void init_registers_i386_avx_linux (void);
83 extern const struct target_desc
*tdesc_i386_avx_linux
;
85 /* Defined in auto-generated file i386-avx512-linux.c. */
86 void init_registers_i386_avx512_linux (void);
87 extern const struct target_desc
*tdesc_i386_avx512_linux
;
89 /* Defined in auto-generated file i386-mpx-linux.c. */
90 void init_registers_i386_mpx_linux (void);
91 extern const struct target_desc
*tdesc_i386_mpx_linux
;
94 static struct target_desc
*tdesc_amd64_linux_no_xml
;
96 static struct target_desc
*tdesc_i386_linux_no_xml
;
99 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
100 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
102 /* Backward compatibility for gdb without XML support. */
104 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
105 <architecture>i386</architecture>\
106 <osabi>GNU/Linux</osabi>\
110 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
111 <architecture>i386:x86-64</architecture>\
112 <osabi>GNU/Linux</osabi>\
117 #include <sys/procfs.h>
118 #include <sys/ptrace.h>
121 #ifndef PTRACE_GETREGSET
122 #define PTRACE_GETREGSET 0x4204
125 #ifndef PTRACE_SETREGSET
126 #define PTRACE_SETREGSET 0x4205
130 #ifndef PTRACE_GET_THREAD_AREA
131 #define PTRACE_GET_THREAD_AREA 25
134 /* This definition comes from prctl.h, but some kernels may not have it. */
135 #ifndef PTRACE_ARCH_PRCTL
136 #define PTRACE_ARCH_PRCTL 30
139 /* The following definitions come from prctl.h, but may be absent
140 for certain configurations. */
142 #define ARCH_SET_GS 0x1001
143 #define ARCH_SET_FS 0x1002
144 #define ARCH_GET_FS 0x1003
145 #define ARCH_GET_GS 0x1004
148 /* Per-process arch-specific data we want to keep. */
150 struct arch_process_info
152 struct i386_debug_reg_state debug_reg_state
;
155 /* Per-thread arch-specific data we want to keep. */
159 /* Non-zero if our copy differs from what's recorded in the thread. */
160 int debug_registers_changed
;
165 /* Mapping between the general-purpose registers in `struct user'
166 format and GDB's register array layout.
167 Note that the transfer layout uses 64-bit regs. */
168 static /*const*/ int i386_regmap
[] =
170 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
171 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
172 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
173 DS
* 8, ES
* 8, FS
* 8, GS
* 8
176 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
178 /* So code below doesn't have to care, i386 or amd64. */
179 #define ORIG_EAX ORIG_RAX
181 static const int x86_64_regmap
[] =
183 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
184 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
185 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
186 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
187 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
188 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
189 -1, -1, -1, -1, -1, -1, -1, -1,
190 -1, -1, -1, -1, -1, -1, -1, -1,
191 -1, -1, -1, -1, -1, -1, -1, -1,
193 -1, -1, -1, -1, -1, -1, -1, -1,
195 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
196 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
197 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
198 -1, -1, -1, -1, -1, -1, -1, -1,
199 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
200 -1, -1, -1, -1, -1, -1, -1, -1,
201 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
202 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
203 -1, -1, -1, -1, -1, -1, -1, -1,
204 -1, -1, -1, -1, -1, -1, -1, -1,
205 -1, -1, -1, -1, -1, -1, -1, -1
208 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
209 #define X86_64_USER_REGS (GS + 1)
211 #else /* ! __x86_64__ */
213 /* Mapping between the general-purpose registers in `struct user'
214 format and GDB's register array layout. */
215 static /*const*/ int i386_regmap
[] =
217 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
218 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
219 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
220 DS
* 4, ES
* 4, FS
* 4, GS
* 4
223 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
229 /* Returns true if the current inferior belongs to a x86-64 process,
233 is_64bit_tdesc (void)
235 struct regcache
*regcache
= get_thread_regcache (current_inferior
, 0);
237 return register_size (regcache
->tdesc
, 0) == 8;
243 /* Called by libthread_db. */
246 ps_get_thread_area (const struct ps_prochandle
*ph
,
247 lwpid_t lwpid
, int idx
, void **base
)
250 int use_64bit
= is_64bit_tdesc ();
257 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
261 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
272 unsigned int desc
[4];
274 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
275 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
278 /* Ensure we properly extend the value to 64-bits for x86_64. */
279 *base
= (void *) (uintptr_t) desc
[1];
284 /* Get the thread area address. This is used to recognize which
285 thread is which when tracing with the in-process agent library. We
286 don't read anything from the address, and treat it as opaque; it's
287 the address itself that we assume is unique per-thread. */
290 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
293 int use_64bit
= is_64bit_tdesc ();
298 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
300 *addr
= (CORE_ADDR
) (uintptr_t) base
;
309 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
310 struct thread_info
*thr
= get_lwp_thread (lwp
);
311 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
312 unsigned int desc
[4];
314 const int reg_thread_area
= 3; /* bits to scale down register value. */
317 collect_register_by_name (regcache
, "gs", &gs
);
319 idx
= gs
>> reg_thread_area
;
321 if (ptrace (PTRACE_GET_THREAD_AREA
,
323 (void *) (long) idx
, (unsigned long) &desc
) < 0)
334 x86_cannot_store_register (int regno
)
337 if (is_64bit_tdesc ())
341 return regno
>= I386_NUM_REGS
;
345 x86_cannot_fetch_register (int regno
)
348 if (is_64bit_tdesc ())
352 return regno
>= I386_NUM_REGS
;
356 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
361 if (register_size (regcache
->tdesc
, 0) == 8)
363 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
364 if (x86_64_regmap
[i
] != -1)
365 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
369 /* 32-bit inferior registers need to be zero-extended.
370 Callers would read uninitialized memory otherwise. */
371 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
374 for (i
= 0; i
< I386_NUM_REGS
; i
++)
375 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
377 collect_register_by_name (regcache
, "orig_eax",
378 ((char *) buf
) + ORIG_EAX
* 4);
382 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
387 if (register_size (regcache
->tdesc
, 0) == 8)
389 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
390 if (x86_64_regmap
[i
] != -1)
391 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
396 for (i
= 0; i
< I386_NUM_REGS
; i
++)
397 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
399 supply_register_by_name (regcache
, "orig_eax",
400 ((char *) buf
) + ORIG_EAX
* 4);
404 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
407 i387_cache_to_fxsave (regcache
, buf
);
409 i387_cache_to_fsave (regcache
, buf
);
414 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
417 i387_fxsave_to_cache (regcache
, buf
);
419 i387_fsave_to_cache (regcache
, buf
);
426 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
428 i387_cache_to_fxsave (regcache
, buf
);
432 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
434 i387_fxsave_to_cache (regcache
, buf
);
440 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
442 i387_cache_to_xsave (regcache
, buf
);
446 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
448 i387_xsave_to_cache (regcache
, buf
);
451 /* ??? The non-biarch i386 case stores all the i387 regs twice.
452 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
453 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
454 doesn't work. IWBN to avoid the duplication in the case where it
455 does work. Maybe the arch_setup routine could check whether it works
456 and update the supported regsets accordingly. */
458 static struct regset_info x86_regsets
[] =
460 #ifdef HAVE_PTRACE_GETREGS
461 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
463 x86_fill_gregset
, x86_store_gregset
},
464 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
465 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
467 # ifdef HAVE_PTRACE_GETFPXREGS
468 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
470 x86_fill_fpxregset
, x86_store_fpxregset
},
473 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
475 x86_fill_fpregset
, x86_store_fpregset
},
476 #endif /* HAVE_PTRACE_GETREGS */
477 { 0, 0, 0, -1, -1, NULL
, NULL
}
481 x86_get_pc (struct regcache
*regcache
)
483 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
488 collect_register_by_name (regcache
, "rip", &pc
);
489 return (CORE_ADDR
) pc
;
494 collect_register_by_name (regcache
, "eip", &pc
);
495 return (CORE_ADDR
) pc
;
500 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
502 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
506 unsigned long newpc
= pc
;
507 supply_register_by_name (regcache
, "rip", &newpc
);
511 unsigned int newpc
= pc
;
512 supply_register_by_name (regcache
, "eip", &newpc
);
516 static const unsigned char x86_breakpoint
[] = { 0xCC };
517 #define x86_breakpoint_len 1
520 x86_breakpoint_at (CORE_ADDR pc
)
524 (*the_target
->read_memory
) (pc
, &c
, 1);
531 /* Support for debug registers. */
534 x86_linux_dr_get (ptid_t ptid
, int regnum
)
539 tid
= ptid_get_lwp (ptid
);
542 value
= ptrace (PTRACE_PEEKUSER
, tid
,
543 offsetof (struct user
, u_debugreg
[regnum
]), 0);
545 error ("Couldn't read debug register");
551 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
555 tid
= ptid_get_lwp (ptid
);
558 ptrace (PTRACE_POKEUSER
, tid
,
559 offsetof (struct user
, u_debugreg
[regnum
]), value
);
561 error ("Couldn't write debug register");
565 update_debug_registers_callback (struct inferior_list_entry
*entry
,
568 struct thread_info
*thr
= (struct thread_info
*) entry
;
569 struct lwp_info
*lwp
= get_thread_lwp (thr
);
570 int pid
= *(int *) pid_p
;
572 /* Only update the threads of this process. */
573 if (pid_of (thr
) == pid
)
575 /* The actual update is done later just before resuming the lwp,
576 we just mark that the registers need updating. */
577 lwp
->arch_private
->debug_registers_changed
= 1;
579 /* If the lwp isn't stopped, force it to momentarily pause, so
580 we can update its debug registers. */
582 linux_stop_lwp (lwp
);
588 /* Update the inferior's debug register REGNUM from STATE. */
591 i386_dr_low_set_addr (int regnum
, CORE_ADDR addr
)
593 /* Only update the threads of this process. */
594 int pid
= pid_of (current_inferior
);
596 if (! (regnum
>= 0 && regnum
<= DR_LASTADDR
- DR_FIRSTADDR
))
597 fatal ("Invalid debug register %d", regnum
);
599 find_inferior (&all_threads
, update_debug_registers_callback
, &pid
);
602 /* Return the inferior's debug register REGNUM. */
605 i386_dr_low_get_addr (int regnum
)
607 ptid_t ptid
= ptid_of (current_inferior
);
609 /* DR6 and DR7 are retrieved with some other way. */
610 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
612 return x86_linux_dr_get (ptid
, regnum
);
615 /* Update the inferior's DR7 debug control register from STATE. */
618 i386_dr_low_set_control (unsigned long control
)
620 /* Only update the threads of this process. */
621 int pid
= pid_of (current_inferior
);
623 find_inferior (&all_threads
, update_debug_registers_callback
, &pid
);
626 /* Return the inferior's DR7 debug control register. */
629 i386_dr_low_get_control (void)
631 ptid_t ptid
= ptid_of (current_inferior
);
633 return x86_linux_dr_get (ptid
, DR_CONTROL
);
636 /* Get the value of the DR6 debug status register from the inferior
637 and record it in STATE. */
640 i386_dr_low_get_status (void)
642 ptid_t ptid
= ptid_of (current_inferior
);
644 return x86_linux_dr_get (ptid
, DR_STATUS
);
647 /* Low-level function vector. */
648 struct i386_dr_low_type i386_dr_low
=
650 i386_dr_low_set_control
,
651 i386_dr_low_set_addr
,
652 i386_dr_low_get_addr
,
653 i386_dr_low_get_status
,
654 i386_dr_low_get_control
,
658 /* Breakpoint/Watchpoint support. */
661 x86_supports_z_point_type (char z_type
)
667 case Z_PACKET_WRITE_WP
:
668 case Z_PACKET_ACCESS_WP
:
676 x86_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
677 int size
, struct raw_breakpoint
*bp
)
679 struct process_info
*proc
= current_process ();
683 case raw_bkpt_type_sw
:
684 return insert_memory_breakpoint (bp
);
686 case raw_bkpt_type_hw
:
687 case raw_bkpt_type_write_wp
:
688 case raw_bkpt_type_access_wp
:
690 enum target_hw_bp_type hw_type
691 = raw_bkpt_type_to_target_hw_bp_type (type
);
692 struct i386_debug_reg_state
*state
693 = &proc
->private->arch_private
->debug_reg_state
;
695 return i386_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
705 x86_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
706 int size
, struct raw_breakpoint
*bp
)
708 struct process_info
*proc
= current_process ();
712 case raw_bkpt_type_sw
:
713 return remove_memory_breakpoint (bp
);
715 case raw_bkpt_type_hw
:
716 case raw_bkpt_type_write_wp
:
717 case raw_bkpt_type_access_wp
:
719 enum target_hw_bp_type hw_type
720 = raw_bkpt_type_to_target_hw_bp_type (type
);
721 struct i386_debug_reg_state
*state
722 = &proc
->private->arch_private
->debug_reg_state
;
724 return i386_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
733 x86_stopped_by_watchpoint (void)
735 struct process_info
*proc
= current_process ();
736 return i386_dr_stopped_by_watchpoint (&proc
->private->arch_private
->debug_reg_state
);
740 x86_stopped_data_address (void)
742 struct process_info
*proc
= current_process ();
744 if (i386_dr_stopped_data_address (&proc
->private->arch_private
->debug_reg_state
,
750 /* Called when a new process is created. */
752 static struct arch_process_info
*
753 x86_linux_new_process (void)
755 struct arch_process_info
*info
= xcalloc (1, sizeof (*info
));
757 i386_low_init_dregs (&info
->debug_reg_state
);
762 /* Called when a new thread is detected. */
764 static struct arch_lwp_info
*
765 x86_linux_new_thread (void)
767 struct arch_lwp_info
*info
= xcalloc (1, sizeof (*info
));
769 info
->debug_registers_changed
= 1;
774 /* Called when resuming a thread.
775 If the debug regs have changed, update the thread's copies. */
778 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
780 ptid_t ptid
= ptid_of (get_lwp_thread (lwp
));
781 int clear_status
= 0;
783 if (lwp
->arch_private
->debug_registers_changed
)
786 int pid
= ptid_get_pid (ptid
);
787 struct process_info
*proc
= find_process_pid (pid
);
788 struct i386_debug_reg_state
*state
789 = &proc
->private->arch_private
->debug_reg_state
;
791 x86_linux_dr_set (ptid
, DR_CONTROL
, 0);
793 for (i
= DR_FIRSTADDR
; i
<= DR_LASTADDR
; i
++)
794 if (state
->dr_ref_count
[i
] > 0)
796 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
798 /* If we're setting a watchpoint, any change the inferior
799 had done itself to the debug registers needs to be
800 discarded, otherwise, i386_dr_stopped_data_address can
805 if (state
->dr_control_mirror
!= 0)
806 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
808 lwp
->arch_private
->debug_registers_changed
= 0;
811 if (clear_status
|| lwp
->stopped_by_watchpoint
)
812 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
815 /* When GDBSERVER is built as a 64-bit application on linux, the
816 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
817 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
818 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
819 conversion in-place ourselves. */
821 /* These types below (compat_*) define a siginfo type that is layout
822 compatible with the siginfo type exported by the 32-bit userspace
827 typedef int compat_int_t
;
828 typedef unsigned int compat_uptr_t
;
830 typedef int compat_time_t
;
831 typedef int compat_timer_t
;
832 typedef int compat_clock_t
;
834 struct compat_timeval
836 compat_time_t tv_sec
;
840 typedef union compat_sigval
842 compat_int_t sival_int
;
843 compat_uptr_t sival_ptr
;
846 typedef struct compat_siginfo
854 int _pad
[((128 / sizeof (int)) - 3)];
863 /* POSIX.1b timers */
868 compat_sigval_t _sigval
;
871 /* POSIX.1b signals */
876 compat_sigval_t _sigval
;
885 compat_clock_t _utime
;
886 compat_clock_t _stime
;
889 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
904 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
905 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t
;
907 typedef struct compat_x32_siginfo
915 int _pad
[((128 / sizeof (int)) - 3)];
924 /* POSIX.1b timers */
929 compat_sigval_t _sigval
;
932 /* POSIX.1b signals */
937 compat_sigval_t _sigval
;
946 compat_x32_clock_t _utime
;
947 compat_x32_clock_t _stime
;
950 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
963 } compat_x32_siginfo_t
__attribute__ ((__aligned__ (8)));
965 #define cpt_si_pid _sifields._kill._pid
966 #define cpt_si_uid _sifields._kill._uid
967 #define cpt_si_timerid _sifields._timer._tid
968 #define cpt_si_overrun _sifields._timer._overrun
969 #define cpt_si_status _sifields._sigchld._status
970 #define cpt_si_utime _sifields._sigchld._utime
971 #define cpt_si_stime _sifields._sigchld._stime
972 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
973 #define cpt_si_addr _sifields._sigfault._addr
974 #define cpt_si_band _sifields._sigpoll._band
975 #define cpt_si_fd _sifields._sigpoll._fd
977 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
978 In their place is si_timer1,si_timer2. */
980 #define si_timerid si_timer1
983 #define si_overrun si_timer2
987 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
989 memset (to
, 0, sizeof (*to
));
991 to
->si_signo
= from
->si_signo
;
992 to
->si_errno
= from
->si_errno
;
993 to
->si_code
= from
->si_code
;
995 if (to
->si_code
== SI_TIMER
)
997 to
->cpt_si_timerid
= from
->si_timerid
;
998 to
->cpt_si_overrun
= from
->si_overrun
;
999 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1001 else if (to
->si_code
== SI_USER
)
1003 to
->cpt_si_pid
= from
->si_pid
;
1004 to
->cpt_si_uid
= from
->si_uid
;
1006 else if (to
->si_code
< 0)
1008 to
->cpt_si_pid
= from
->si_pid
;
1009 to
->cpt_si_uid
= from
->si_uid
;
1010 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1014 switch (to
->si_signo
)
1017 to
->cpt_si_pid
= from
->si_pid
;
1018 to
->cpt_si_uid
= from
->si_uid
;
1019 to
->cpt_si_status
= from
->si_status
;
1020 to
->cpt_si_utime
= from
->si_utime
;
1021 to
->cpt_si_stime
= from
->si_stime
;
1027 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1030 to
->cpt_si_band
= from
->si_band
;
1031 to
->cpt_si_fd
= from
->si_fd
;
1034 to
->cpt_si_pid
= from
->si_pid
;
1035 to
->cpt_si_uid
= from
->si_uid
;
1036 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1043 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
1045 memset (to
, 0, sizeof (*to
));
1047 to
->si_signo
= from
->si_signo
;
1048 to
->si_errno
= from
->si_errno
;
1049 to
->si_code
= from
->si_code
;
1051 if (to
->si_code
== SI_TIMER
)
1053 to
->si_timerid
= from
->cpt_si_timerid
;
1054 to
->si_overrun
= from
->cpt_si_overrun
;
1055 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1057 else if (to
->si_code
== SI_USER
)
1059 to
->si_pid
= from
->cpt_si_pid
;
1060 to
->si_uid
= from
->cpt_si_uid
;
1062 else if (to
->si_code
< 0)
1064 to
->si_pid
= from
->cpt_si_pid
;
1065 to
->si_uid
= from
->cpt_si_uid
;
1066 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1070 switch (to
->si_signo
)
1073 to
->si_pid
= from
->cpt_si_pid
;
1074 to
->si_uid
= from
->cpt_si_uid
;
1075 to
->si_status
= from
->cpt_si_status
;
1076 to
->si_utime
= from
->cpt_si_utime
;
1077 to
->si_stime
= from
->cpt_si_stime
;
1083 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1086 to
->si_band
= from
->cpt_si_band
;
1087 to
->si_fd
= from
->cpt_si_fd
;
1090 to
->si_pid
= from
->cpt_si_pid
;
1091 to
->si_uid
= from
->cpt_si_uid
;
1092 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1099 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t
*to
,
1102 memset (to
, 0, sizeof (*to
));
1104 to
->si_signo
= from
->si_signo
;
1105 to
->si_errno
= from
->si_errno
;
1106 to
->si_code
= from
->si_code
;
1108 if (to
->si_code
== SI_TIMER
)
1110 to
->cpt_si_timerid
= from
->si_timerid
;
1111 to
->cpt_si_overrun
= from
->si_overrun
;
1112 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1114 else if (to
->si_code
== SI_USER
)
1116 to
->cpt_si_pid
= from
->si_pid
;
1117 to
->cpt_si_uid
= from
->si_uid
;
1119 else if (to
->si_code
< 0)
1121 to
->cpt_si_pid
= from
->si_pid
;
1122 to
->cpt_si_uid
= from
->si_uid
;
1123 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1127 switch (to
->si_signo
)
1130 to
->cpt_si_pid
= from
->si_pid
;
1131 to
->cpt_si_uid
= from
->si_uid
;
1132 to
->cpt_si_status
= from
->si_status
;
1133 to
->cpt_si_utime
= from
->si_utime
;
1134 to
->cpt_si_stime
= from
->si_stime
;
1140 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1143 to
->cpt_si_band
= from
->si_band
;
1144 to
->cpt_si_fd
= from
->si_fd
;
1147 to
->cpt_si_pid
= from
->si_pid
;
1148 to
->cpt_si_uid
= from
->si_uid
;
1149 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1156 siginfo_from_compat_x32_siginfo (siginfo_t
*to
,
1157 compat_x32_siginfo_t
*from
)
1159 memset (to
, 0, sizeof (*to
));
1161 to
->si_signo
= from
->si_signo
;
1162 to
->si_errno
= from
->si_errno
;
1163 to
->si_code
= from
->si_code
;
1165 if (to
->si_code
== SI_TIMER
)
1167 to
->si_timerid
= from
->cpt_si_timerid
;
1168 to
->si_overrun
= from
->cpt_si_overrun
;
1169 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1171 else if (to
->si_code
== SI_USER
)
1173 to
->si_pid
= from
->cpt_si_pid
;
1174 to
->si_uid
= from
->cpt_si_uid
;
1176 else if (to
->si_code
< 0)
1178 to
->si_pid
= from
->cpt_si_pid
;
1179 to
->si_uid
= from
->cpt_si_uid
;
1180 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1184 switch (to
->si_signo
)
1187 to
->si_pid
= from
->cpt_si_pid
;
1188 to
->si_uid
= from
->cpt_si_uid
;
1189 to
->si_status
= from
->cpt_si_status
;
1190 to
->si_utime
= from
->cpt_si_utime
;
1191 to
->si_stime
= from
->cpt_si_stime
;
1197 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1200 to
->si_band
= from
->cpt_si_band
;
1201 to
->si_fd
= from
->cpt_si_fd
;
1204 to
->si_pid
= from
->cpt_si_pid
;
1205 to
->si_uid
= from
->cpt_si_uid
;
1206 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1212 #endif /* __x86_64__ */
1214 /* Convert a native/host siginfo object, into/from the siginfo in the
1215 layout of the inferiors' architecture. Returns true if any
1216 conversion was done; false otherwise. If DIRECTION is 1, then copy
1217 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1221 x86_siginfo_fixup (siginfo_t
*native
, void *inf
, int direction
)
1224 unsigned int machine
;
1225 int tid
= lwpid_of (current_inferior
);
1226 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1228 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1229 if (!is_64bit_tdesc ())
1231 if (sizeof (siginfo_t
) != sizeof (compat_siginfo_t
))
1232 fatal ("unexpected difference in siginfo");
1235 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
1237 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
1241 /* No fixup for native x32 GDB. */
1242 else if (!is_elf64
&& sizeof (void *) == 8)
1244 if (sizeof (siginfo_t
) != sizeof (compat_x32_siginfo_t
))
1245 fatal ("unexpected difference in siginfo");
1248 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo
*) inf
,
1251 siginfo_from_compat_x32_siginfo (native
,
1252 (struct compat_x32_siginfo
*) inf
);
1263 /* Format of XSAVE extended state is:
1266 fxsave_bytes[0..463]
1267 sw_usable_bytes[464..511]
1268 xstate_hdr_bytes[512..575]
1273 Same memory layout will be used for the coredump NT_X86_XSTATE
1274 representing the XSAVE extended state registers.
1276 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1277 extended state mask, which is the same as the extended control register
1278 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1279 together with the mask saved in the xstate_hdr_bytes to determine what
1280 states the processor/OS supports and what state, used or initialized,
1281 the process/thread is in. */
1282 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1284 /* Does the current host support the GETFPXREGS request? The header
1285 file may or may not define it, and even if it is defined, the
1286 kernel will return EIO if it's running on a pre-SSE processor. */
1287 int have_ptrace_getfpxregs
=
1288 #ifdef HAVE_PTRACE_GETFPXREGS
1295 /* Does the current host support PTRACE_GETREGSET? */
1296 static int have_ptrace_getregset
= -1;
1298 /* Get Linux/x86 target description from running target. */
1300 static const struct target_desc
*
1301 x86_linux_read_description (void)
1303 unsigned int machine
;
1307 static uint64_t xcr0
;
1308 struct regset_info
*regset
;
1310 tid
= lwpid_of (current_inferior
);
1312 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1314 if (sizeof (void *) == 4)
1317 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1319 else if (machine
== EM_X86_64
)
1320 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1324 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1325 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
1327 elf_fpxregset_t fpxregs
;
1329 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
1331 have_ptrace_getfpxregs
= 0;
1332 have_ptrace_getregset
= 0;
1333 return tdesc_i386_mmx_linux
;
1336 have_ptrace_getfpxregs
= 1;
1342 x86_xcr0
= I386_XSTATE_SSE_MASK
;
1344 /* Don't use XML. */
1346 if (machine
== EM_X86_64
)
1347 return tdesc_amd64_linux_no_xml
;
1350 return tdesc_i386_linux_no_xml
;
1353 if (have_ptrace_getregset
== -1)
1355 uint64_t xstateregs
[(I386_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
1358 iov
.iov_base
= xstateregs
;
1359 iov
.iov_len
= sizeof (xstateregs
);
1361 /* Check if PTRACE_GETREGSET works. */
1362 if (ptrace (PTRACE_GETREGSET
, tid
,
1363 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
1364 have_ptrace_getregset
= 0;
1367 have_ptrace_getregset
= 1;
1369 /* Get XCR0 from XSAVE extended state. */
1370 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
1371 / sizeof (uint64_t))];
1373 /* Use PTRACE_GETREGSET if it is available. */
1374 for (regset
= x86_regsets
;
1375 regset
->fill_function
!= NULL
; regset
++)
1376 if (regset
->get_request
== PTRACE_GETREGSET
)
1377 regset
->size
= I386_XSTATE_SIZE (xcr0
);
1378 else if (regset
->type
!= GENERAL_REGS
)
1383 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1384 xcr0_features
= (have_ptrace_getregset
1385 && (xcr0
& I386_XSTATE_ALL_MASK
));
1390 if (machine
== EM_X86_64
)
1397 switch (xcr0
& I386_XSTATE_ALL_MASK
)
1399 case I386_XSTATE_AVX512_MASK
:
1400 return tdesc_amd64_avx512_linux
;
1402 case I386_XSTATE_MPX_MASK
:
1403 return tdesc_amd64_mpx_linux
;
1405 case I386_XSTATE_AVX_MASK
:
1406 return tdesc_amd64_avx_linux
;
1409 return tdesc_amd64_linux
;
1413 return tdesc_amd64_linux
;
1419 switch (xcr0
& I386_XSTATE_ALL_MASK
)
1421 case I386_XSTATE_AVX512_MASK
:
1422 return tdesc_x32_avx512_linux
;
1424 case I386_XSTATE_MPX_MASK
: /* No MPX on x32. */
1425 case I386_XSTATE_AVX_MASK
:
1426 return tdesc_x32_avx_linux
;
1429 return tdesc_x32_linux
;
1433 return tdesc_x32_linux
;
1441 switch (xcr0
& I386_XSTATE_ALL_MASK
)
1443 case (I386_XSTATE_AVX512_MASK
):
1444 return tdesc_i386_avx512_linux
;
1446 case (I386_XSTATE_MPX_MASK
):
1447 return tdesc_i386_mpx_linux
;
1449 case (I386_XSTATE_AVX_MASK
):
1450 return tdesc_i386_avx_linux
;
1453 return tdesc_i386_linux
;
1457 return tdesc_i386_linux
;
1460 gdb_assert_not_reached ("failed to return tdesc");
1463 /* Callback for find_inferior. Stops iteration when a thread with a
1464 given PID is found. */
1467 same_process_callback (struct inferior_list_entry
*entry
, void *data
)
1469 int pid
= *(int *) data
;
1471 return (ptid_get_pid (entry
->id
) == pid
);
1474 /* Callback for for_each_inferior. Calls the arch_setup routine for
1478 x86_arch_setup_process_callback (struct inferior_list_entry
*entry
)
1480 int pid
= ptid_get_pid (entry
->id
);
1482 /* Look up any thread of this processes. */
1484 = (struct thread_info
*) find_inferior (&all_threads
,
1485 same_process_callback
, &pid
);
1487 the_low_target
.arch_setup ();
1490 /* Update all the target description of all processes; a new GDB
1491 connected, and it may or not support xml target descriptions. */
1494 x86_linux_update_xmltarget (void)
1496 struct thread_info
*save_inferior
= current_inferior
;
1498 /* Before changing the register cache's internal layout, flush the
1499 contents of the current valid caches back to the threads, and
1500 release the current regcache objects. */
1501 regcache_release ();
1503 for_each_inferior (&all_processes
, x86_arch_setup_process_callback
);
1505 current_inferior
= save_inferior
;
1508 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1509 PTRACE_GETREGSET. */
1512 x86_linux_process_qsupported (const char *query
)
1514 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1515 with "i386" in qSupported query, it supports x86 XML target
1518 if (query
!= NULL
&& strncmp (query
, "xmlRegisters=", 13) == 0)
1520 char *copy
= xstrdup (query
+ 13);
1523 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1525 if (strcmp (p
, "i386") == 0)
1535 x86_linux_update_xmltarget ();
1538 /* Common for x86/x86-64. */
1540 static struct regsets_info x86_regsets_info
=
1542 x86_regsets
, /* regsets */
1543 0, /* num_regsets */
1544 NULL
, /* disabled_regsets */
1548 static struct regs_info amd64_linux_regs_info
=
1550 NULL
, /* regset_bitmap */
1551 NULL
, /* usrregs_info */
1555 static struct usrregs_info i386_linux_usrregs_info
=
1561 static struct regs_info i386_linux_regs_info
=
1563 NULL
, /* regset_bitmap */
1564 &i386_linux_usrregs_info
,
1568 const struct regs_info
*
1569 x86_linux_regs_info (void)
1572 if (is_64bit_tdesc ())
1573 return &amd64_linux_regs_info
;
1576 return &i386_linux_regs_info
;
1579 /* Initialize the target description for the architecture of the
1583 x86_arch_setup (void)
1585 current_process ()->tdesc
= x86_linux_read_description ();
1589 x86_supports_tracepoints (void)
1595 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1597 write_inferior_memory (*to
, buf
, len
);
1602 push_opcode (unsigned char *buf
, char *op
)
1604 unsigned char *buf_org
= buf
;
1609 unsigned long ul
= strtoul (op
, &endptr
, 16);
1618 return buf
- buf_org
;
1623 /* Build a jump pad that saves registers and calls a collection
1624 function. Writes a jump instruction to the jump pad to
1625 JJUMPAD_INSN. The caller is responsible to write it in at the
1626 tracepoint address. */
1629 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1630 CORE_ADDR collector
,
1633 CORE_ADDR
*jump_entry
,
1634 CORE_ADDR
*trampoline
,
1635 ULONGEST
*trampoline_size
,
1636 unsigned char *jjump_pad_insn
,
1637 ULONGEST
*jjump_pad_insn_size
,
1638 CORE_ADDR
*adjusted_insn_addr
,
1639 CORE_ADDR
*adjusted_insn_addr_end
,
1642 unsigned char buf
[40];
1646 CORE_ADDR buildaddr
= *jump_entry
;
1648 /* Build the jump pad. */
1650 /* First, do tracepoint data collection. Save registers. */
1652 /* Need to ensure stack pointer saved first. */
1653 buf
[i
++] = 0x54; /* push %rsp */
1654 buf
[i
++] = 0x55; /* push %rbp */
1655 buf
[i
++] = 0x57; /* push %rdi */
1656 buf
[i
++] = 0x56; /* push %rsi */
1657 buf
[i
++] = 0x52; /* push %rdx */
1658 buf
[i
++] = 0x51; /* push %rcx */
1659 buf
[i
++] = 0x53; /* push %rbx */
1660 buf
[i
++] = 0x50; /* push %rax */
1661 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1662 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1663 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1664 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1665 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1666 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1667 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1668 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1669 buf
[i
++] = 0x9c; /* pushfq */
1670 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1672 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1673 i
+= sizeof (unsigned long);
1674 buf
[i
++] = 0x57; /* push %rdi */
1675 append_insns (&buildaddr
, i
, buf
);
1677 /* Stack space for the collecting_t object. */
1679 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1680 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1681 memcpy (buf
+ i
, &tpoint
, 8);
1683 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1684 i
+= push_opcode (&buf
[i
],
1685 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1686 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1687 append_insns (&buildaddr
, i
, buf
);
1691 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1692 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1694 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1695 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1696 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1697 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1698 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1699 append_insns (&buildaddr
, i
, buf
);
1701 /* Set up the gdb_collect call. */
1702 /* At this point, (stack pointer + 0x18) is the base of our saved
1706 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1707 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1709 /* tpoint address may be 64-bit wide. */
1710 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1711 memcpy (buf
+ i
, &tpoint
, 8);
1713 append_insns (&buildaddr
, i
, buf
);
1715 /* The collector function being in the shared library, may be
1716 >31-bits away off the jump pad. */
1718 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1719 memcpy (buf
+ i
, &collector
, 8);
1721 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1722 append_insns (&buildaddr
, i
, buf
);
1724 /* Clear the spin-lock. */
1726 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1727 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1728 memcpy (buf
+ i
, &lockaddr
, 8);
1730 append_insns (&buildaddr
, i
, buf
);
1732 /* Remove stack that had been used for the collect_t object. */
1734 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1735 append_insns (&buildaddr
, i
, buf
);
1737 /* Restore register state. */
1739 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1743 buf
[i
++] = 0x9d; /* popfq */
1744 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1745 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1746 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1747 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1748 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1749 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1750 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1751 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1752 buf
[i
++] = 0x58; /* pop %rax */
1753 buf
[i
++] = 0x5b; /* pop %rbx */
1754 buf
[i
++] = 0x59; /* pop %rcx */
1755 buf
[i
++] = 0x5a; /* pop %rdx */
1756 buf
[i
++] = 0x5e; /* pop %rsi */
1757 buf
[i
++] = 0x5f; /* pop %rdi */
1758 buf
[i
++] = 0x5d; /* pop %rbp */
1759 buf
[i
++] = 0x5c; /* pop %rsp */
1760 append_insns (&buildaddr
, i
, buf
);
1762 /* Now, adjust the original instruction to execute in the jump
1764 *adjusted_insn_addr
= buildaddr
;
1765 relocate_instruction (&buildaddr
, tpaddr
);
1766 *adjusted_insn_addr_end
= buildaddr
;
1768 /* Finally, write a jump back to the program. */
1770 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1771 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1774 "E.Jump back from jump pad too far from tracepoint "
1775 "(offset 0x%" PRIx64
" > int32).", loffset
);
1779 offset
= (int) loffset
;
1780 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1781 memcpy (buf
+ 1, &offset
, 4);
1782 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1784 /* The jump pad is now built. Wire in a jump to our jump pad. This
1785 is always done last (by our caller actually), so that we can
1786 install fast tracepoints with threads running. This relies on
1787 the agent's atomic write support. */
1788 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1789 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1792 "E.Jump pad too far from tracepoint "
1793 "(offset 0x%" PRIx64
" > int32).", loffset
);
1797 offset
= (int) loffset
;
1799 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1800 memcpy (buf
+ 1, &offset
, 4);
1801 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1802 *jjump_pad_insn_size
= sizeof (jump_insn
);
1804 /* Return the end address of our pad. */
1805 *jump_entry
= buildaddr
;
1810 #endif /* __x86_64__ */
1812 /* Build a jump pad that saves registers and calls a collection
1813 function. Writes a jump instruction to the jump pad to
1814 JJUMPAD_INSN. The caller is responsible to write it in at the
1815 tracepoint address. */
1818 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1819 CORE_ADDR collector
,
1822 CORE_ADDR
*jump_entry
,
1823 CORE_ADDR
*trampoline
,
1824 ULONGEST
*trampoline_size
,
1825 unsigned char *jjump_pad_insn
,
1826 ULONGEST
*jjump_pad_insn_size
,
1827 CORE_ADDR
*adjusted_insn_addr
,
1828 CORE_ADDR
*adjusted_insn_addr_end
,
1831 unsigned char buf
[0x100];
1833 CORE_ADDR buildaddr
= *jump_entry
;
1835 /* Build the jump pad. */
1837 /* First, do tracepoint data collection. Save registers. */
1839 buf
[i
++] = 0x60; /* pushad */
1840 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1841 *((int *)(buf
+ i
)) = (int) tpaddr
;
1843 buf
[i
++] = 0x9c; /* pushf */
1844 buf
[i
++] = 0x1e; /* push %ds */
1845 buf
[i
++] = 0x06; /* push %es */
1846 buf
[i
++] = 0x0f; /* push %fs */
1848 buf
[i
++] = 0x0f; /* push %gs */
1850 buf
[i
++] = 0x16; /* push %ss */
1851 buf
[i
++] = 0x0e; /* push %cs */
1852 append_insns (&buildaddr
, i
, buf
);
1854 /* Stack space for the collecting_t object. */
1856 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1858 /* Build the object. */
1859 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1860 memcpy (buf
+ i
, &tpoint
, 4);
1862 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1864 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1865 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1866 append_insns (&buildaddr
, i
, buf
);
1868 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1869 If we cared for it, this could be using xchg alternatively. */
1872 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1873 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1875 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1877 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1878 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1879 append_insns (&buildaddr
, i
, buf
);
1882 /* Set up arguments to the gdb_collect call. */
1884 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1885 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1886 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1887 append_insns (&buildaddr
, i
, buf
);
1890 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1891 append_insns (&buildaddr
, i
, buf
);
1894 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1895 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1897 append_insns (&buildaddr
, i
, buf
);
1899 buf
[0] = 0xe8; /* call <reladdr> */
1900 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1901 memcpy (buf
+ 1, &offset
, 4);
1902 append_insns (&buildaddr
, 5, buf
);
1903 /* Clean up after the call. */
1904 buf
[0] = 0x83; /* add $0x8,%esp */
1907 append_insns (&buildaddr
, 3, buf
);
1910 /* Clear the spin-lock. This would need the LOCK prefix on older
1913 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1914 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1915 memcpy (buf
+ i
, &lockaddr
, 4);
1917 append_insns (&buildaddr
, i
, buf
);
1920 /* Remove stack that had been used for the collect_t object. */
1922 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1923 append_insns (&buildaddr
, i
, buf
);
1926 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1929 buf
[i
++] = 0x17; /* pop %ss */
1930 buf
[i
++] = 0x0f; /* pop %gs */
1932 buf
[i
++] = 0x0f; /* pop %fs */
1934 buf
[i
++] = 0x07; /* pop %es */
1935 buf
[i
++] = 0x1f; /* pop %ds */
1936 buf
[i
++] = 0x9d; /* popf */
1937 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1940 buf
[i
++] = 0x61; /* popad */
1941 append_insns (&buildaddr
, i
, buf
);
1943 /* Now, adjust the original instruction to execute in the jump
1945 *adjusted_insn_addr
= buildaddr
;
1946 relocate_instruction (&buildaddr
, tpaddr
);
1947 *adjusted_insn_addr_end
= buildaddr
;
1949 /* Write the jump back to the program. */
1950 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1951 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1952 memcpy (buf
+ 1, &offset
, 4);
1953 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1955 /* The jump pad is now built. Wire in a jump to our jump pad. This
1956 is always done last (by our caller actually), so that we can
1957 install fast tracepoints with threads running. This relies on
1958 the agent's atomic write support. */
1961 /* Create a trampoline. */
1962 *trampoline_size
= sizeof (jump_insn
);
1963 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1965 /* No trampoline space available. */
1967 "E.Cannot allocate trampoline space needed for fast "
1968 "tracepoints on 4-byte instructions.");
1972 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1973 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1974 memcpy (buf
+ 1, &offset
, 4);
1975 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1977 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1978 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1979 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1980 memcpy (buf
+ 2, &offset
, 2);
1981 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1982 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1986 /* Else use a 32-bit relative jump instruction. */
1987 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1988 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1989 memcpy (buf
+ 1, &offset
, 4);
1990 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1991 *jjump_pad_insn_size
= sizeof (jump_insn
);
1994 /* Return the end address of our pad. */
1995 *jump_entry
= buildaddr
;
2001 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
2002 CORE_ADDR collector
,
2005 CORE_ADDR
*jump_entry
,
2006 CORE_ADDR
*trampoline
,
2007 ULONGEST
*trampoline_size
,
2008 unsigned char *jjump_pad_insn
,
2009 ULONGEST
*jjump_pad_insn_size
,
2010 CORE_ADDR
*adjusted_insn_addr
,
2011 CORE_ADDR
*adjusted_insn_addr_end
,
2015 if (is_64bit_tdesc ())
2016 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
2017 collector
, lockaddr
,
2018 orig_size
, jump_entry
,
2019 trampoline
, trampoline_size
,
2021 jjump_pad_insn_size
,
2023 adjusted_insn_addr_end
,
2027 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
2028 collector
, lockaddr
,
2029 orig_size
, jump_entry
,
2030 trampoline
, trampoline_size
,
2032 jjump_pad_insn_size
,
2034 adjusted_insn_addr_end
,
2038 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
2042 x86_get_min_fast_tracepoint_insn_len (void)
2044 static int warned_about_fast_tracepoints
= 0;
2047 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
2048 used for fast tracepoints. */
2049 if (is_64bit_tdesc ())
2053 if (agent_loaded_p ())
2055 char errbuf
[IPA_BUFSIZ
];
2059 /* On x86, if trampolines are available, then 4-byte jump instructions
2060 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2061 with a 4-byte offset are used instead. */
2062 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
2066 /* GDB has no channel to explain to user why a shorter fast
2067 tracepoint is not possible, but at least make GDBserver
2068 mention that something has gone awry. */
2069 if (!warned_about_fast_tracepoints
)
2071 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
2072 warned_about_fast_tracepoints
= 1;
2079 /* Indicate that the minimum length is currently unknown since the IPA
2080 has not loaded yet. */
2086 add_insns (unsigned char *start
, int len
)
2088 CORE_ADDR buildaddr
= current_insn_ptr
;
2091 debug_printf ("Adding %d bytes of insn at %s\n",
2092 len
, paddress (buildaddr
));
2094 append_insns (&buildaddr
, len
, start
);
2095 current_insn_ptr
= buildaddr
;
2098 /* Our general strategy for emitting code is to avoid specifying raw
2099 bytes whenever possible, and instead copy a block of inline asm
2100 that is embedded in the function. This is a little messy, because
2101 we need to keep the compiler from discarding what looks like dead
2102 code, plus suppress various warnings. */
2104 #define EMIT_ASM(NAME, INSNS) \
2107 extern unsigned char start_ ## NAME, end_ ## NAME; \
2108 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2109 __asm__ ("jmp end_" #NAME "\n" \
2110 "\t" "start_" #NAME ":" \
2112 "\t" "end_" #NAME ":"); \
2117 #define EMIT_ASM32(NAME,INSNS) \
2120 extern unsigned char start_ ## NAME, end_ ## NAME; \
2121 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2122 __asm__ (".code32\n" \
2123 "\t" "jmp end_" #NAME "\n" \
2124 "\t" "start_" #NAME ":\n" \
2126 "\t" "end_" #NAME ":\n" \
2132 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2139 amd64_emit_prologue (void)
2141 EMIT_ASM (amd64_prologue
,
2143 "movq %rsp,%rbp\n\t"
2144 "sub $0x20,%rsp\n\t"
2145 "movq %rdi,-8(%rbp)\n\t"
2146 "movq %rsi,-16(%rbp)");
2151 amd64_emit_epilogue (void)
2153 EMIT_ASM (amd64_epilogue
,
2154 "movq -16(%rbp),%rdi\n\t"
2155 "movq %rax,(%rdi)\n\t"
2162 amd64_emit_add (void)
2164 EMIT_ASM (amd64_add
,
2165 "add (%rsp),%rax\n\t"
2166 "lea 0x8(%rsp),%rsp");
2170 amd64_emit_sub (void)
2172 EMIT_ASM (amd64_sub
,
2173 "sub %rax,(%rsp)\n\t"
2178 amd64_emit_mul (void)
2184 amd64_emit_lsh (void)
2190 amd64_emit_rsh_signed (void)
2196 amd64_emit_rsh_unsigned (void)
2202 amd64_emit_ext (int arg
)
2207 EMIT_ASM (amd64_ext_8
,
2213 EMIT_ASM (amd64_ext_16
,
2218 EMIT_ASM (amd64_ext_32
,
2227 amd64_emit_log_not (void)
2229 EMIT_ASM (amd64_log_not
,
2230 "test %rax,%rax\n\t"
2236 amd64_emit_bit_and (void)
2238 EMIT_ASM (amd64_and
,
2239 "and (%rsp),%rax\n\t"
2240 "lea 0x8(%rsp),%rsp");
2244 amd64_emit_bit_or (void)
2247 "or (%rsp),%rax\n\t"
2248 "lea 0x8(%rsp),%rsp");
2252 amd64_emit_bit_xor (void)
2254 EMIT_ASM (amd64_xor
,
2255 "xor (%rsp),%rax\n\t"
2256 "lea 0x8(%rsp),%rsp");
2260 amd64_emit_bit_not (void)
2262 EMIT_ASM (amd64_bit_not
,
2263 "xorq $0xffffffffffffffff,%rax");
2267 amd64_emit_equal (void)
2269 EMIT_ASM (amd64_equal
,
2270 "cmp %rax,(%rsp)\n\t"
2271 "je .Lamd64_equal_true\n\t"
2273 "jmp .Lamd64_equal_end\n\t"
2274 ".Lamd64_equal_true:\n\t"
2276 ".Lamd64_equal_end:\n\t"
2277 "lea 0x8(%rsp),%rsp");
2281 amd64_emit_less_signed (void)
2283 EMIT_ASM (amd64_less_signed
,
2284 "cmp %rax,(%rsp)\n\t"
2285 "jl .Lamd64_less_signed_true\n\t"
2287 "jmp .Lamd64_less_signed_end\n\t"
2288 ".Lamd64_less_signed_true:\n\t"
2290 ".Lamd64_less_signed_end:\n\t"
2291 "lea 0x8(%rsp),%rsp");
2295 amd64_emit_less_unsigned (void)
2297 EMIT_ASM (amd64_less_unsigned
,
2298 "cmp %rax,(%rsp)\n\t"
2299 "jb .Lamd64_less_unsigned_true\n\t"
2301 "jmp .Lamd64_less_unsigned_end\n\t"
2302 ".Lamd64_less_unsigned_true:\n\t"
2304 ".Lamd64_less_unsigned_end:\n\t"
2305 "lea 0x8(%rsp),%rsp");
2309 amd64_emit_ref (int size
)
2314 EMIT_ASM (amd64_ref1
,
2318 EMIT_ASM (amd64_ref2
,
2322 EMIT_ASM (amd64_ref4
,
2323 "movl (%rax),%eax");
2326 EMIT_ASM (amd64_ref8
,
2327 "movq (%rax),%rax");
2333 amd64_emit_if_goto (int *offset_p
, int *size_p
)
2335 EMIT_ASM (amd64_if_goto
,
2339 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2347 amd64_emit_goto (int *offset_p
, int *size_p
)
2349 EMIT_ASM (amd64_goto
,
2350 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2358 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2360 int diff
= (to
- (from
+ size
));
2361 unsigned char buf
[sizeof (int)];
2369 memcpy (buf
, &diff
, sizeof (int));
2370 write_inferior_memory (from
, buf
, sizeof (int));
2374 amd64_emit_const (LONGEST num
)
2376 unsigned char buf
[16];
2378 CORE_ADDR buildaddr
= current_insn_ptr
;
2381 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
2382 memcpy (&buf
[i
], &num
, sizeof (num
));
2384 append_insns (&buildaddr
, i
, buf
);
2385 current_insn_ptr
= buildaddr
;
2389 amd64_emit_call (CORE_ADDR fn
)
2391 unsigned char buf
[16];
2393 CORE_ADDR buildaddr
;
2396 /* The destination function being in the shared library, may be
2397 >31-bits away off the compiled code pad. */
2399 buildaddr
= current_insn_ptr
;
2401 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
2405 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
2407 /* Offset is too large for a call. Use callq, but that requires
2408 a register, so avoid it if possible. Use r10, since it is
2409 call-clobbered, we don't have to push/pop it. */
2410 buf
[i
++] = 0x48; /* mov $fn,%r10 */
2412 memcpy (buf
+ i
, &fn
, 8);
2414 buf
[i
++] = 0xff; /* callq *%r10 */
2419 int offset32
= offset64
; /* we know we can't overflow here. */
2420 memcpy (buf
+ i
, &offset32
, 4);
2424 append_insns (&buildaddr
, i
, buf
);
2425 current_insn_ptr
= buildaddr
;
2429 amd64_emit_reg (int reg
)
2431 unsigned char buf
[16];
2433 CORE_ADDR buildaddr
;
2435 /* Assume raw_regs is still in %rdi. */
2436 buildaddr
= current_insn_ptr
;
2438 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
2439 memcpy (&buf
[i
], ®
, sizeof (reg
));
2441 append_insns (&buildaddr
, i
, buf
);
2442 current_insn_ptr
= buildaddr
;
2443 amd64_emit_call (get_raw_reg_func_addr ());
2447 amd64_emit_pop (void)
2449 EMIT_ASM (amd64_pop
,
2454 amd64_emit_stack_flush (void)
2456 EMIT_ASM (amd64_stack_flush
,
2461 amd64_emit_zero_ext (int arg
)
2466 EMIT_ASM (amd64_zero_ext_8
,
2470 EMIT_ASM (amd64_zero_ext_16
,
2471 "and $0xffff,%rax");
2474 EMIT_ASM (amd64_zero_ext_32
,
2475 "mov $0xffffffff,%rcx\n\t"
2484 amd64_emit_swap (void)
2486 EMIT_ASM (amd64_swap
,
2493 amd64_emit_stack_adjust (int n
)
2495 unsigned char buf
[16];
2497 CORE_ADDR buildaddr
= current_insn_ptr
;
2500 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2504 /* This only handles adjustments up to 16, but we don't expect any more. */
2506 append_insns (&buildaddr
, i
, buf
);
2507 current_insn_ptr
= buildaddr
;
2510 /* FN's prototype is `LONGEST(*fn)(int)'. */
2513 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2515 unsigned char buf
[16];
2517 CORE_ADDR buildaddr
;
2519 buildaddr
= current_insn_ptr
;
2521 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2522 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2524 append_insns (&buildaddr
, i
, buf
);
2525 current_insn_ptr
= buildaddr
;
2526 amd64_emit_call (fn
);
2529 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2532 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2534 unsigned char buf
[16];
2536 CORE_ADDR buildaddr
;
2538 buildaddr
= current_insn_ptr
;
2540 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2541 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2543 append_insns (&buildaddr
, i
, buf
);
2544 current_insn_ptr
= buildaddr
;
2545 EMIT_ASM (amd64_void_call_2_a
,
2546 /* Save away a copy of the stack top. */
2548 /* Also pass top as the second argument. */
2550 amd64_emit_call (fn
);
2551 EMIT_ASM (amd64_void_call_2_b
,
2552 /* Restore the stack top, %rax may have been trashed. */
2557 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2560 "cmp %rax,(%rsp)\n\t"
2561 "jne .Lamd64_eq_fallthru\n\t"
2562 "lea 0x8(%rsp),%rsp\n\t"
2564 /* jmp, but don't trust the assembler to choose the right jump */
2565 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2566 ".Lamd64_eq_fallthru:\n\t"
2567 "lea 0x8(%rsp),%rsp\n\t"
2577 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2580 "cmp %rax,(%rsp)\n\t"
2581 "je .Lamd64_ne_fallthru\n\t"
2582 "lea 0x8(%rsp),%rsp\n\t"
2584 /* jmp, but don't trust the assembler to choose the right jump */
2585 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2586 ".Lamd64_ne_fallthru:\n\t"
2587 "lea 0x8(%rsp),%rsp\n\t"
2597 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2600 "cmp %rax,(%rsp)\n\t"
2601 "jnl .Lamd64_lt_fallthru\n\t"
2602 "lea 0x8(%rsp),%rsp\n\t"
2604 /* jmp, but don't trust the assembler to choose the right jump */
2605 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2606 ".Lamd64_lt_fallthru:\n\t"
2607 "lea 0x8(%rsp),%rsp\n\t"
2617 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2620 "cmp %rax,(%rsp)\n\t"
2621 "jnle .Lamd64_le_fallthru\n\t"
2622 "lea 0x8(%rsp),%rsp\n\t"
2624 /* jmp, but don't trust the assembler to choose the right jump */
2625 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2626 ".Lamd64_le_fallthru:\n\t"
2627 "lea 0x8(%rsp),%rsp\n\t"
2637 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2640 "cmp %rax,(%rsp)\n\t"
2641 "jng .Lamd64_gt_fallthru\n\t"
2642 "lea 0x8(%rsp),%rsp\n\t"
2644 /* jmp, but don't trust the assembler to choose the right jump */
2645 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2646 ".Lamd64_gt_fallthru:\n\t"
2647 "lea 0x8(%rsp),%rsp\n\t"
2657 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2660 "cmp %rax,(%rsp)\n\t"
2661 "jnge .Lamd64_ge_fallthru\n\t"
2662 ".Lamd64_ge_jump:\n\t"
2663 "lea 0x8(%rsp),%rsp\n\t"
2665 /* jmp, but don't trust the assembler to choose the right jump */
2666 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2667 ".Lamd64_ge_fallthru:\n\t"
2668 "lea 0x8(%rsp),%rsp\n\t"
2677 struct emit_ops amd64_emit_ops
=
2679 amd64_emit_prologue
,
2680 amd64_emit_epilogue
,
2685 amd64_emit_rsh_signed
,
2686 amd64_emit_rsh_unsigned
,
2694 amd64_emit_less_signed
,
2695 amd64_emit_less_unsigned
,
2699 amd64_write_goto_address
,
2704 amd64_emit_stack_flush
,
2705 amd64_emit_zero_ext
,
2707 amd64_emit_stack_adjust
,
2708 amd64_emit_int_call_1
,
2709 amd64_emit_void_call_2
,
2718 #endif /* __x86_64__ */
2721 i386_emit_prologue (void)
2723 EMIT_ASM32 (i386_prologue
,
2727 /* At this point, the raw regs base address is at 8(%ebp), and the
2728 value pointer is at 12(%ebp). */
2732 i386_emit_epilogue (void)
2734 EMIT_ASM32 (i386_epilogue
,
2735 "mov 12(%ebp),%ecx\n\t"
2736 "mov %eax,(%ecx)\n\t"
2737 "mov %ebx,0x4(%ecx)\n\t"
2745 i386_emit_add (void)
2747 EMIT_ASM32 (i386_add
,
2748 "add (%esp),%eax\n\t"
2749 "adc 0x4(%esp),%ebx\n\t"
2750 "lea 0x8(%esp),%esp");
2754 i386_emit_sub (void)
2756 EMIT_ASM32 (i386_sub
,
2757 "subl %eax,(%esp)\n\t"
2758 "sbbl %ebx,4(%esp)\n\t"
2764 i386_emit_mul (void)
2770 i386_emit_lsh (void)
2776 i386_emit_rsh_signed (void)
2782 i386_emit_rsh_unsigned (void)
2788 i386_emit_ext (int arg
)
2793 EMIT_ASM32 (i386_ext_8
,
2796 "movl %eax,%ebx\n\t"
2800 EMIT_ASM32 (i386_ext_16
,
2802 "movl %eax,%ebx\n\t"
2806 EMIT_ASM32 (i386_ext_32
,
2807 "movl %eax,%ebx\n\t"
2816 i386_emit_log_not (void)
2818 EMIT_ASM32 (i386_log_not
,
2820 "test %eax,%eax\n\t"
2827 i386_emit_bit_and (void)
2829 EMIT_ASM32 (i386_and
,
2830 "and (%esp),%eax\n\t"
2831 "and 0x4(%esp),%ebx\n\t"
2832 "lea 0x8(%esp),%esp");
2836 i386_emit_bit_or (void)
2838 EMIT_ASM32 (i386_or
,
2839 "or (%esp),%eax\n\t"
2840 "or 0x4(%esp),%ebx\n\t"
2841 "lea 0x8(%esp),%esp");
2845 i386_emit_bit_xor (void)
2847 EMIT_ASM32 (i386_xor
,
2848 "xor (%esp),%eax\n\t"
2849 "xor 0x4(%esp),%ebx\n\t"
2850 "lea 0x8(%esp),%esp");
2854 i386_emit_bit_not (void)
2856 EMIT_ASM32 (i386_bit_not
,
2857 "xor $0xffffffff,%eax\n\t"
2858 "xor $0xffffffff,%ebx\n\t");
2862 i386_emit_equal (void)
2864 EMIT_ASM32 (i386_equal
,
2865 "cmpl %ebx,4(%esp)\n\t"
2866 "jne .Li386_equal_false\n\t"
2867 "cmpl %eax,(%esp)\n\t"
2868 "je .Li386_equal_true\n\t"
2869 ".Li386_equal_false:\n\t"
2871 "jmp .Li386_equal_end\n\t"
2872 ".Li386_equal_true:\n\t"
2874 ".Li386_equal_end:\n\t"
2876 "lea 0x8(%esp),%esp");
2880 i386_emit_less_signed (void)
2882 EMIT_ASM32 (i386_less_signed
,
2883 "cmpl %ebx,4(%esp)\n\t"
2884 "jl .Li386_less_signed_true\n\t"
2885 "jne .Li386_less_signed_false\n\t"
2886 "cmpl %eax,(%esp)\n\t"
2887 "jl .Li386_less_signed_true\n\t"
2888 ".Li386_less_signed_false:\n\t"
2890 "jmp .Li386_less_signed_end\n\t"
2891 ".Li386_less_signed_true:\n\t"
2893 ".Li386_less_signed_end:\n\t"
2895 "lea 0x8(%esp),%esp");
2899 i386_emit_less_unsigned (void)
2901 EMIT_ASM32 (i386_less_unsigned
,
2902 "cmpl %ebx,4(%esp)\n\t"
2903 "jb .Li386_less_unsigned_true\n\t"
2904 "jne .Li386_less_unsigned_false\n\t"
2905 "cmpl %eax,(%esp)\n\t"
2906 "jb .Li386_less_unsigned_true\n\t"
2907 ".Li386_less_unsigned_false:\n\t"
2909 "jmp .Li386_less_unsigned_end\n\t"
2910 ".Li386_less_unsigned_true:\n\t"
2912 ".Li386_less_unsigned_end:\n\t"
2914 "lea 0x8(%esp),%esp");
2918 i386_emit_ref (int size
)
2923 EMIT_ASM32 (i386_ref1
,
2927 EMIT_ASM32 (i386_ref2
,
2931 EMIT_ASM32 (i386_ref4
,
2932 "movl (%eax),%eax");
2935 EMIT_ASM32 (i386_ref8
,
2936 "movl 4(%eax),%ebx\n\t"
2937 "movl (%eax),%eax");
2943 i386_emit_if_goto (int *offset_p
, int *size_p
)
2945 EMIT_ASM32 (i386_if_goto
,
2951 /* Don't trust the assembler to choose the right jump */
2952 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2955 *offset_p
= 11; /* be sure that this matches the sequence above */
2961 i386_emit_goto (int *offset_p
, int *size_p
)
2963 EMIT_ASM32 (i386_goto
,
2964 /* Don't trust the assembler to choose the right jump */
2965 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2973 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2975 int diff
= (to
- (from
+ size
));
2976 unsigned char buf
[sizeof (int)];
2978 /* We're only doing 4-byte sizes at the moment. */
2985 memcpy (buf
, &diff
, sizeof (int));
2986 write_inferior_memory (from
, buf
, sizeof (int));
2990 i386_emit_const (LONGEST num
)
2992 unsigned char buf
[16];
2994 CORE_ADDR buildaddr
= current_insn_ptr
;
2997 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2998 lo
= num
& 0xffffffff;
2999 memcpy (&buf
[i
], &lo
, sizeof (lo
));
3001 hi
= ((num
>> 32) & 0xffffffff);
3004 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
3005 memcpy (&buf
[i
], &hi
, sizeof (hi
));
3010 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
3012 append_insns (&buildaddr
, i
, buf
);
3013 current_insn_ptr
= buildaddr
;
3017 i386_emit_call (CORE_ADDR fn
)
3019 unsigned char buf
[16];
3021 CORE_ADDR buildaddr
;
3023 buildaddr
= current_insn_ptr
;
3025 buf
[i
++] = 0xe8; /* call <reladdr> */
3026 offset
= ((int) fn
) - (buildaddr
+ 5);
3027 memcpy (buf
+ 1, &offset
, 4);
3028 append_insns (&buildaddr
, 5, buf
);
3029 current_insn_ptr
= buildaddr
;
3033 i386_emit_reg (int reg
)
3035 unsigned char buf
[16];
3037 CORE_ADDR buildaddr
;
3039 EMIT_ASM32 (i386_reg_a
,
3041 buildaddr
= current_insn_ptr
;
3043 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
3044 memcpy (&buf
[i
], ®
, sizeof (reg
));
3046 append_insns (&buildaddr
, i
, buf
);
3047 current_insn_ptr
= buildaddr
;
3048 EMIT_ASM32 (i386_reg_b
,
3049 "mov %eax,4(%esp)\n\t"
3050 "mov 8(%ebp),%eax\n\t"
3052 i386_emit_call (get_raw_reg_func_addr ());
3053 EMIT_ASM32 (i386_reg_c
,
3055 "lea 0x8(%esp),%esp");
3059 i386_emit_pop (void)
3061 EMIT_ASM32 (i386_pop
,
3067 i386_emit_stack_flush (void)
3069 EMIT_ASM32 (i386_stack_flush
,
3075 i386_emit_zero_ext (int arg
)
3080 EMIT_ASM32 (i386_zero_ext_8
,
3081 "and $0xff,%eax\n\t"
3085 EMIT_ASM32 (i386_zero_ext_16
,
3086 "and $0xffff,%eax\n\t"
3090 EMIT_ASM32 (i386_zero_ext_32
,
3099 i386_emit_swap (void)
3101 EMIT_ASM32 (i386_swap
,
3111 i386_emit_stack_adjust (int n
)
3113 unsigned char buf
[16];
3115 CORE_ADDR buildaddr
= current_insn_ptr
;
3118 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
3122 append_insns (&buildaddr
, i
, buf
);
3123 current_insn_ptr
= buildaddr
;
3126 /* FN's prototype is `LONGEST(*fn)(int)'. */
3129 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
3131 unsigned char buf
[16];
3133 CORE_ADDR buildaddr
;
3135 EMIT_ASM32 (i386_int_call_1_a
,
3136 /* Reserve a bit of stack space. */
3138 /* Put the one argument on the stack. */
3139 buildaddr
= current_insn_ptr
;
3141 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3144 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3146 append_insns (&buildaddr
, i
, buf
);
3147 current_insn_ptr
= buildaddr
;
3148 i386_emit_call (fn
);
3149 EMIT_ASM32 (i386_int_call_1_c
,
3151 "lea 0x8(%esp),%esp");
3154 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3157 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
3159 unsigned char buf
[16];
3161 CORE_ADDR buildaddr
;
3163 EMIT_ASM32 (i386_void_call_2_a
,
3164 /* Preserve %eax only; we don't have to worry about %ebx. */
3166 /* Reserve a bit of stack space for arguments. */
3167 "sub $0x10,%esp\n\t"
3168 /* Copy "top" to the second argument position. (Note that
3169 we can't assume function won't scribble on its
3170 arguments, so don't try to restore from this.) */
3171 "mov %eax,4(%esp)\n\t"
3172 "mov %ebx,8(%esp)");
3173 /* Put the first argument on the stack. */
3174 buildaddr
= current_insn_ptr
;
3176 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3179 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3181 append_insns (&buildaddr
, i
, buf
);
3182 current_insn_ptr
= buildaddr
;
3183 i386_emit_call (fn
);
3184 EMIT_ASM32 (i386_void_call_2_b
,
3185 "lea 0x10(%esp),%esp\n\t"
3186 /* Restore original stack top. */
3192 i386_emit_eq_goto (int *offset_p
, int *size_p
)
3195 /* Check low half first, more likely to be decider */
3196 "cmpl %eax,(%esp)\n\t"
3197 "jne .Leq_fallthru\n\t"
3198 "cmpl %ebx,4(%esp)\n\t"
3199 "jne .Leq_fallthru\n\t"
3200 "lea 0x8(%esp),%esp\n\t"
3203 /* jmp, but don't trust the assembler to choose the right jump */
3204 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3205 ".Leq_fallthru:\n\t"
3206 "lea 0x8(%esp),%esp\n\t"
3217 i386_emit_ne_goto (int *offset_p
, int *size_p
)
3220 /* Check low half first, more likely to be decider */
3221 "cmpl %eax,(%esp)\n\t"
3223 "cmpl %ebx,4(%esp)\n\t"
3224 "je .Lne_fallthru\n\t"
3226 "lea 0x8(%esp),%esp\n\t"
3229 /* jmp, but don't trust the assembler to choose the right jump */
3230 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3231 ".Lne_fallthru:\n\t"
3232 "lea 0x8(%esp),%esp\n\t"
3243 i386_emit_lt_goto (int *offset_p
, int *size_p
)
3246 "cmpl %ebx,4(%esp)\n\t"
3248 "jne .Llt_fallthru\n\t"
3249 "cmpl %eax,(%esp)\n\t"
3250 "jnl .Llt_fallthru\n\t"
3252 "lea 0x8(%esp),%esp\n\t"
3255 /* jmp, but don't trust the assembler to choose the right jump */
3256 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3257 ".Llt_fallthru:\n\t"
3258 "lea 0x8(%esp),%esp\n\t"
3269 i386_emit_le_goto (int *offset_p
, int *size_p
)
3272 "cmpl %ebx,4(%esp)\n\t"
3274 "jne .Lle_fallthru\n\t"
3275 "cmpl %eax,(%esp)\n\t"
3276 "jnle .Lle_fallthru\n\t"
3278 "lea 0x8(%esp),%esp\n\t"
3281 /* jmp, but don't trust the assembler to choose the right jump */
3282 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3283 ".Lle_fallthru:\n\t"
3284 "lea 0x8(%esp),%esp\n\t"
3295 i386_emit_gt_goto (int *offset_p
, int *size_p
)
3298 "cmpl %ebx,4(%esp)\n\t"
3300 "jne .Lgt_fallthru\n\t"
3301 "cmpl %eax,(%esp)\n\t"
3302 "jng .Lgt_fallthru\n\t"
3304 "lea 0x8(%esp),%esp\n\t"
3307 /* jmp, but don't trust the assembler to choose the right jump */
3308 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3309 ".Lgt_fallthru:\n\t"
3310 "lea 0x8(%esp),%esp\n\t"
3321 i386_emit_ge_goto (int *offset_p
, int *size_p
)
3324 "cmpl %ebx,4(%esp)\n\t"
3326 "jne .Lge_fallthru\n\t"
3327 "cmpl %eax,(%esp)\n\t"
3328 "jnge .Lge_fallthru\n\t"
3330 "lea 0x8(%esp),%esp\n\t"
3333 /* jmp, but don't trust the assembler to choose the right jump */
3334 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3335 ".Lge_fallthru:\n\t"
3336 "lea 0x8(%esp),%esp\n\t"
3346 struct emit_ops i386_emit_ops
=
3354 i386_emit_rsh_signed
,
3355 i386_emit_rsh_unsigned
,
3363 i386_emit_less_signed
,
3364 i386_emit_less_unsigned
,
3368 i386_write_goto_address
,
3373 i386_emit_stack_flush
,
3376 i386_emit_stack_adjust
,
3377 i386_emit_int_call_1
,
3378 i386_emit_void_call_2
,
3388 static struct emit_ops
*
3392 if (is_64bit_tdesc ())
3393 return &amd64_emit_ops
;
3396 return &i386_emit_ops
;
3400 x86_supports_range_stepping (void)
3405 /* This is initialized assuming an amd64 target.
3406 x86_arch_setup will correct it for i386 or amd64 targets. */
3408 struct linux_target_ops the_low_target
=
3411 x86_linux_regs_info
,
3412 x86_cannot_fetch_register
,
3413 x86_cannot_store_register
,
3414 NULL
, /* fetch_register */
3422 x86_supports_z_point_type
,
3425 x86_stopped_by_watchpoint
,
3426 x86_stopped_data_address
,
3427 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3428 native i386 case (no registers smaller than an xfer unit), and are not
3429 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3432 /* need to fix up i386 siginfo if host is amd64 */
3434 x86_linux_new_process
,
3435 x86_linux_new_thread
,
3436 x86_linux_prepare_to_resume
,
3437 x86_linux_process_qsupported
,
3438 x86_supports_tracepoints
,
3439 x86_get_thread_area
,
3440 x86_install_fast_tracepoint_jump_pad
,
3442 x86_get_min_fast_tracepoint_insn_len
,
3443 x86_supports_range_stepping
,
3447 initialize_low_arch (void)
3449 /* Initialize the Linux target descriptions. */
3451 init_registers_amd64_linux ();
3452 init_registers_amd64_avx_linux ();
3453 init_registers_amd64_avx512_linux ();
3454 init_registers_amd64_mpx_linux ();
3456 init_registers_x32_linux ();
3457 init_registers_x32_avx_linux ();
3458 init_registers_x32_avx512_linux ();
3460 tdesc_amd64_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3461 copy_target_description (tdesc_amd64_linux_no_xml
, tdesc_amd64_linux
);
3462 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
3464 init_registers_i386_linux ();
3465 init_registers_i386_mmx_linux ();
3466 init_registers_i386_avx_linux ();
3467 init_registers_i386_avx512_linux ();
3468 init_registers_i386_mpx_linux ();
3470 tdesc_i386_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3471 copy_target_description (tdesc_i386_linux_no_xml
, tdesc_i386_linux
);
3472 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3474 initialize_regsets_info (&x86_regsets_info
);