1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2013 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "linux-low.h"
28 #include "i386-xstate.h"
29 #include "elf/common.h"
31 #include "gdb_proc_service.h"
34 #include "tracepoint.h"
38 /* Defined in auto-generated file amd64-linux.c. */
39 void init_registers_amd64_linux (void);
40 extern const struct target_desc
*tdesc_amd64_linux
;
42 /* Defined in auto-generated file amd64-avx-linux.c. */
43 void init_registers_amd64_avx_linux (void);
44 extern const struct target_desc
*tdesc_amd64_avx_linux
;
46 /* Defined in auto-generated file amd64-mpx-linux.c. */
47 void init_registers_amd64_mpx_linux (void);
48 extern const struct target_desc
*tdesc_amd64_mpx_linux
;
50 /* Defined in auto-generated file x32-linux.c. */
51 void init_registers_x32_linux (void);
52 extern const struct target_desc
*tdesc_x32_linux
;
54 /* Defined in auto-generated file x32-avx-linux.c. */
55 void init_registers_x32_avx_linux (void);
56 extern const struct target_desc
*tdesc_x32_avx_linux
;
60 /* Defined in auto-generated file i386-linux.c. */
61 void init_registers_i386_linux (void);
62 extern const struct target_desc
*tdesc_i386_linux
;
64 /* Defined in auto-generated file i386-mmx-linux.c. */
65 void init_registers_i386_mmx_linux (void);
66 extern const struct target_desc
*tdesc_i386_mmx_linux
;
68 /* Defined in auto-generated file i386-avx-linux.c. */
69 void init_registers_i386_avx_linux (void);
70 extern const struct target_desc
*tdesc_i386_avx_linux
;
72 /* Defined in auto-generated file i386-mpx-linux.c. */
73 void init_registers_i386_mpx_linux (void);
74 extern const struct target_desc
*tdesc_i386_mpx_linux
;
77 static struct target_desc
*tdesc_amd64_linux_no_xml
;
79 static struct target_desc
*tdesc_i386_linux_no_xml
;
82 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
83 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
85 /* Backward compatibility for gdb without XML support. */
87 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
88 <architecture>i386</architecture>\
89 <osabi>GNU/Linux</osabi>\
93 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
94 <architecture>i386:x86-64</architecture>\
95 <osabi>GNU/Linux</osabi>\
100 #include <sys/procfs.h>
101 #include <sys/ptrace.h>
104 #ifndef PTRACE_GETREGSET
105 #define PTRACE_GETREGSET 0x4204
108 #ifndef PTRACE_SETREGSET
109 #define PTRACE_SETREGSET 0x4205
113 #ifndef PTRACE_GET_THREAD_AREA
114 #define PTRACE_GET_THREAD_AREA 25
117 /* This definition comes from prctl.h, but some kernels may not have it. */
118 #ifndef PTRACE_ARCH_PRCTL
119 #define PTRACE_ARCH_PRCTL 30
122 /* The following definitions come from prctl.h, but may be absent
123 for certain configurations. */
125 #define ARCH_SET_GS 0x1001
126 #define ARCH_SET_FS 0x1002
127 #define ARCH_GET_FS 0x1003
128 #define ARCH_GET_GS 0x1004
131 /* Per-process arch-specific data we want to keep. */
133 struct arch_process_info
135 struct i386_debug_reg_state debug_reg_state
;
138 /* Per-thread arch-specific data we want to keep. */
142 /* Non-zero if our copy differs from what's recorded in the thread. */
143 int debug_registers_changed
;
148 /* Mapping between the general-purpose registers in `struct user'
149 format and GDB's register array layout.
150 Note that the transfer layout uses 64-bit regs. */
151 static /*const*/ int i386_regmap
[] =
153 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
154 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
155 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
156 DS
* 8, ES
* 8, FS
* 8, GS
* 8
159 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
161 /* So code below doesn't have to care, i386 or amd64. */
162 #define ORIG_EAX ORIG_RAX
164 static const int x86_64_regmap
[] =
166 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
167 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
168 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
169 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
170 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
171 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
172 -1, -1, -1, -1, -1, -1, -1, -1,
173 -1, -1, -1, -1, -1, -1, -1, -1,
174 -1, -1, -1, -1, -1, -1, -1, -1,
176 -1, -1, -1, -1, -1, -1, -1, -1,
178 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
179 -1, -1 /* MPX registers BNDCFGU, BNDSTATUS. */
182 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
184 #else /* ! __x86_64__ */
186 /* Mapping between the general-purpose registers in `struct user'
187 format and GDB's register array layout. */
188 static /*const*/ int i386_regmap
[] =
190 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
191 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
192 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
193 DS
* 4, ES
* 4, FS
* 4, GS
* 4
196 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
202 /* Returns true if the current inferior belongs to a x86-64 process,
206 is_64bit_tdesc (void)
208 struct regcache
*regcache
= get_thread_regcache (current_inferior
, 0);
210 return register_size (regcache
->tdesc
, 0) == 8;
216 /* Called by libthread_db. */
219 ps_get_thread_area (const struct ps_prochandle
*ph
,
220 lwpid_t lwpid
, int idx
, void **base
)
223 int use_64bit
= is_64bit_tdesc ();
230 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
234 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
245 unsigned int desc
[4];
247 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
248 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
251 /* Ensure we properly extend the value to 64-bits for x86_64. */
252 *base
= (void *) (uintptr_t) desc
[1];
257 /* Get the thread area address. This is used to recognize which
258 thread is which when tracing with the in-process agent library. We
259 don't read anything from the address, and treat it as opaque; it's
260 the address itself that we assume is unique per-thread. */
263 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
266 int use_64bit
= is_64bit_tdesc ();
271 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
273 *addr
= (CORE_ADDR
) (uintptr_t) base
;
282 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
283 struct regcache
*regcache
= get_thread_regcache (get_lwp_thread (lwp
), 1);
284 unsigned int desc
[4];
286 const int reg_thread_area
= 3; /* bits to scale down register value. */
289 collect_register_by_name (regcache
, "gs", &gs
);
291 idx
= gs
>> reg_thread_area
;
293 if (ptrace (PTRACE_GET_THREAD_AREA
,
295 (void *) (long) idx
, (unsigned long) &desc
) < 0)
306 x86_cannot_store_register (int regno
)
309 if (is_64bit_tdesc ())
313 return regno
>= I386_NUM_REGS
;
317 x86_cannot_fetch_register (int regno
)
320 if (is_64bit_tdesc ())
324 return regno
>= I386_NUM_REGS
;
328 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
333 if (register_size (regcache
->tdesc
, 0) == 8)
335 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
336 if (x86_64_regmap
[i
] != -1)
337 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
342 for (i
= 0; i
< I386_NUM_REGS
; i
++)
343 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
345 collect_register_by_name (regcache
, "orig_eax",
346 ((char *) buf
) + ORIG_EAX
* 4);
350 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
355 if (register_size (regcache
->tdesc
, 0) == 8)
357 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
358 if (x86_64_regmap
[i
] != -1)
359 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
364 for (i
= 0; i
< I386_NUM_REGS
; i
++)
365 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
367 supply_register_by_name (regcache
, "orig_eax",
368 ((char *) buf
) + ORIG_EAX
* 4);
372 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
375 i387_cache_to_fxsave (regcache
, buf
);
377 i387_cache_to_fsave (regcache
, buf
);
382 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
385 i387_fxsave_to_cache (regcache
, buf
);
387 i387_fsave_to_cache (regcache
, buf
);
394 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
396 i387_cache_to_fxsave (regcache
, buf
);
400 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
402 i387_fxsave_to_cache (regcache
, buf
);
408 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
410 i387_cache_to_xsave (regcache
, buf
);
414 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
416 i387_xsave_to_cache (regcache
, buf
);
419 /* ??? The non-biarch i386 case stores all the i387 regs twice.
420 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
421 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
422 doesn't work. IWBN to avoid the duplication in the case where it
423 does work. Maybe the arch_setup routine could check whether it works
424 and update the supported regsets accordingly. */
426 static struct regset_info x86_regsets
[] =
428 #ifdef HAVE_PTRACE_GETREGS
429 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
431 x86_fill_gregset
, x86_store_gregset
},
432 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
433 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
435 # ifdef HAVE_PTRACE_GETFPXREGS
436 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
438 x86_fill_fpxregset
, x86_store_fpxregset
},
441 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
443 x86_fill_fpregset
, x86_store_fpregset
},
444 #endif /* HAVE_PTRACE_GETREGS */
445 { 0, 0, 0, -1, -1, NULL
, NULL
}
449 x86_get_pc (struct regcache
*regcache
)
451 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
456 collect_register_by_name (regcache
, "rip", &pc
);
457 return (CORE_ADDR
) pc
;
462 collect_register_by_name (regcache
, "eip", &pc
);
463 return (CORE_ADDR
) pc
;
468 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
470 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
474 unsigned long newpc
= pc
;
475 supply_register_by_name (regcache
, "rip", &newpc
);
479 unsigned int newpc
= pc
;
480 supply_register_by_name (regcache
, "eip", &newpc
);
484 static const unsigned char x86_breakpoint
[] = { 0xCC };
485 #define x86_breakpoint_len 1
488 x86_breakpoint_at (CORE_ADDR pc
)
492 (*the_target
->read_memory
) (pc
, &c
, 1);
499 /* Support for debug registers. */
502 x86_linux_dr_get (ptid_t ptid
, int regnum
)
507 tid
= ptid_get_lwp (ptid
);
510 value
= ptrace (PTRACE_PEEKUSER
, tid
,
511 offsetof (struct user
, u_debugreg
[regnum
]), 0);
513 error ("Couldn't read debug register");
519 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
523 tid
= ptid_get_lwp (ptid
);
526 ptrace (PTRACE_POKEUSER
, tid
,
527 offsetof (struct user
, u_debugreg
[regnum
]), value
);
529 error ("Couldn't write debug register");
533 update_debug_registers_callback (struct inferior_list_entry
*entry
,
536 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
537 int pid
= *(int *) pid_p
;
539 /* Only update the threads of this process. */
540 if (pid_of (lwp
) == pid
)
542 /* The actual update is done later just before resuming the lwp,
543 we just mark that the registers need updating. */
544 lwp
->arch_private
->debug_registers_changed
= 1;
546 /* If the lwp isn't stopped, force it to momentarily pause, so
547 we can update its debug registers. */
549 linux_stop_lwp (lwp
);
555 /* Update the inferior's debug register REGNUM from STATE. */
558 i386_dr_low_set_addr (const struct i386_debug_reg_state
*state
, int regnum
)
560 /* Only update the threads of this process. */
561 int pid
= pid_of (get_thread_lwp (current_inferior
));
563 if (! (regnum
>= 0 && regnum
<= DR_LASTADDR
- DR_FIRSTADDR
))
564 fatal ("Invalid debug register %d", regnum
);
566 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
569 /* Return the inferior's debug register REGNUM. */
572 i386_dr_low_get_addr (int regnum
)
574 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
575 ptid_t ptid
= ptid_of (lwp
);
577 /* DR6 and DR7 are retrieved with some other way. */
578 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
580 return x86_linux_dr_get (ptid
, regnum
);
583 /* Update the inferior's DR7 debug control register from STATE. */
586 i386_dr_low_set_control (const struct i386_debug_reg_state
*state
)
588 /* Only update the threads of this process. */
589 int pid
= pid_of (get_thread_lwp (current_inferior
));
591 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
594 /* Return the inferior's DR7 debug control register. */
597 i386_dr_low_get_control (void)
599 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
600 ptid_t ptid
= ptid_of (lwp
);
602 return x86_linux_dr_get (ptid
, DR_CONTROL
);
605 /* Get the value of the DR6 debug status register from the inferior
606 and record it in STATE. */
609 i386_dr_low_get_status (void)
611 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
612 ptid_t ptid
= ptid_of (lwp
);
614 return x86_linux_dr_get (ptid
, DR_STATUS
);
617 /* Breakpoint/Watchpoint support. */
620 x86_insert_point (char type
, CORE_ADDR addr
, int len
)
622 struct process_info
*proc
= current_process ();
625 case '0': /* software-breakpoint */
629 ret
= prepare_to_access_memory ();
632 ret
= set_gdb_breakpoint_at (addr
);
633 done_accessing_memory ();
636 case '1': /* hardware-breakpoint */
637 case '2': /* write watchpoint */
638 case '3': /* read watchpoint */
639 case '4': /* access watchpoint */
640 return i386_low_insert_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
650 x86_remove_point (char type
, CORE_ADDR addr
, int len
)
652 struct process_info
*proc
= current_process ();
655 case '0': /* software-breakpoint */
659 ret
= prepare_to_access_memory ();
662 ret
= delete_gdb_breakpoint_at (addr
);
663 done_accessing_memory ();
666 case '1': /* hardware-breakpoint */
667 case '2': /* write watchpoint */
668 case '3': /* read watchpoint */
669 case '4': /* access watchpoint */
670 return i386_low_remove_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
679 x86_stopped_by_watchpoint (void)
681 struct process_info
*proc
= current_process ();
682 return i386_low_stopped_by_watchpoint (&proc
->private->arch_private
->debug_reg_state
);
686 x86_stopped_data_address (void)
688 struct process_info
*proc
= current_process ();
690 if (i386_low_stopped_data_address (&proc
->private->arch_private
->debug_reg_state
,
696 /* Called when a new process is created. */
698 static struct arch_process_info
*
699 x86_linux_new_process (void)
701 struct arch_process_info
*info
= xcalloc (1, sizeof (*info
));
703 i386_low_init_dregs (&info
->debug_reg_state
);
708 /* Called when a new thread is detected. */
710 static struct arch_lwp_info
*
711 x86_linux_new_thread (void)
713 struct arch_lwp_info
*info
= xcalloc (1, sizeof (*info
));
715 info
->debug_registers_changed
= 1;
720 /* Called when resuming a thread.
721 If the debug regs have changed, update the thread's copies. */
724 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
726 ptid_t ptid
= ptid_of (lwp
);
727 int clear_status
= 0;
729 if (lwp
->arch_private
->debug_registers_changed
)
732 int pid
= ptid_get_pid (ptid
);
733 struct process_info
*proc
= find_process_pid (pid
);
734 struct i386_debug_reg_state
*state
735 = &proc
->private->arch_private
->debug_reg_state
;
737 for (i
= DR_FIRSTADDR
; i
<= DR_LASTADDR
; i
++)
738 if (state
->dr_ref_count
[i
] > 0)
740 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
742 /* If we're setting a watchpoint, any change the inferior
743 had done itself to the debug registers needs to be
744 discarded, otherwise, i386_low_stopped_data_address can
749 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
751 lwp
->arch_private
->debug_registers_changed
= 0;
754 if (clear_status
|| lwp
->stopped_by_watchpoint
)
755 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
758 /* When GDBSERVER is built as a 64-bit application on linux, the
759 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
760 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
761 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
762 conversion in-place ourselves. */
764 /* These types below (compat_*) define a siginfo type that is layout
765 compatible with the siginfo type exported by the 32-bit userspace
770 typedef int compat_int_t
;
771 typedef unsigned int compat_uptr_t
;
773 typedef int compat_time_t
;
774 typedef int compat_timer_t
;
775 typedef int compat_clock_t
;
777 struct compat_timeval
779 compat_time_t tv_sec
;
783 typedef union compat_sigval
785 compat_int_t sival_int
;
786 compat_uptr_t sival_ptr
;
789 typedef struct compat_siginfo
797 int _pad
[((128 / sizeof (int)) - 3)];
806 /* POSIX.1b timers */
811 compat_sigval_t _sigval
;
814 /* POSIX.1b signals */
819 compat_sigval_t _sigval
;
828 compat_clock_t _utime
;
829 compat_clock_t _stime
;
832 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
847 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
848 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t
;
850 typedef struct compat_x32_siginfo
858 int _pad
[((128 / sizeof (int)) - 3)];
867 /* POSIX.1b timers */
872 compat_sigval_t _sigval
;
875 /* POSIX.1b signals */
880 compat_sigval_t _sigval
;
889 compat_x32_clock_t _utime
;
890 compat_x32_clock_t _stime
;
893 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
906 } compat_x32_siginfo_t
__attribute__ ((__aligned__ (8)));
908 #define cpt_si_pid _sifields._kill._pid
909 #define cpt_si_uid _sifields._kill._uid
910 #define cpt_si_timerid _sifields._timer._tid
911 #define cpt_si_overrun _sifields._timer._overrun
912 #define cpt_si_status _sifields._sigchld._status
913 #define cpt_si_utime _sifields._sigchld._utime
914 #define cpt_si_stime _sifields._sigchld._stime
915 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
916 #define cpt_si_addr _sifields._sigfault._addr
917 #define cpt_si_band _sifields._sigpoll._band
918 #define cpt_si_fd _sifields._sigpoll._fd
920 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
921 In their place is si_timer1,si_timer2. */
923 #define si_timerid si_timer1
926 #define si_overrun si_timer2
930 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
932 memset (to
, 0, sizeof (*to
));
934 to
->si_signo
= from
->si_signo
;
935 to
->si_errno
= from
->si_errno
;
936 to
->si_code
= from
->si_code
;
938 if (to
->si_code
== SI_TIMER
)
940 to
->cpt_si_timerid
= from
->si_timerid
;
941 to
->cpt_si_overrun
= from
->si_overrun
;
942 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
944 else if (to
->si_code
== SI_USER
)
946 to
->cpt_si_pid
= from
->si_pid
;
947 to
->cpt_si_uid
= from
->si_uid
;
949 else if (to
->si_code
< 0)
951 to
->cpt_si_pid
= from
->si_pid
;
952 to
->cpt_si_uid
= from
->si_uid
;
953 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
957 switch (to
->si_signo
)
960 to
->cpt_si_pid
= from
->si_pid
;
961 to
->cpt_si_uid
= from
->si_uid
;
962 to
->cpt_si_status
= from
->si_status
;
963 to
->cpt_si_utime
= from
->si_utime
;
964 to
->cpt_si_stime
= from
->si_stime
;
970 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
973 to
->cpt_si_band
= from
->si_band
;
974 to
->cpt_si_fd
= from
->si_fd
;
977 to
->cpt_si_pid
= from
->si_pid
;
978 to
->cpt_si_uid
= from
->si_uid
;
979 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
986 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
988 memset (to
, 0, sizeof (*to
));
990 to
->si_signo
= from
->si_signo
;
991 to
->si_errno
= from
->si_errno
;
992 to
->si_code
= from
->si_code
;
994 if (to
->si_code
== SI_TIMER
)
996 to
->si_timerid
= from
->cpt_si_timerid
;
997 to
->si_overrun
= from
->cpt_si_overrun
;
998 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1000 else if (to
->si_code
== SI_USER
)
1002 to
->si_pid
= from
->cpt_si_pid
;
1003 to
->si_uid
= from
->cpt_si_uid
;
1005 else if (to
->si_code
< 0)
1007 to
->si_pid
= from
->cpt_si_pid
;
1008 to
->si_uid
= from
->cpt_si_uid
;
1009 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1013 switch (to
->si_signo
)
1016 to
->si_pid
= from
->cpt_si_pid
;
1017 to
->si_uid
= from
->cpt_si_uid
;
1018 to
->si_status
= from
->cpt_si_status
;
1019 to
->si_utime
= from
->cpt_si_utime
;
1020 to
->si_stime
= from
->cpt_si_stime
;
1026 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1029 to
->si_band
= from
->cpt_si_band
;
1030 to
->si_fd
= from
->cpt_si_fd
;
1033 to
->si_pid
= from
->cpt_si_pid
;
1034 to
->si_uid
= from
->cpt_si_uid
;
1035 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1042 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t
*to
,
1045 memset (to
, 0, sizeof (*to
));
1047 to
->si_signo
= from
->si_signo
;
1048 to
->si_errno
= from
->si_errno
;
1049 to
->si_code
= from
->si_code
;
1051 if (to
->si_code
== SI_TIMER
)
1053 to
->cpt_si_timerid
= from
->si_timerid
;
1054 to
->cpt_si_overrun
= from
->si_overrun
;
1055 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1057 else if (to
->si_code
== SI_USER
)
1059 to
->cpt_si_pid
= from
->si_pid
;
1060 to
->cpt_si_uid
= from
->si_uid
;
1062 else if (to
->si_code
< 0)
1064 to
->cpt_si_pid
= from
->si_pid
;
1065 to
->cpt_si_uid
= from
->si_uid
;
1066 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1070 switch (to
->si_signo
)
1073 to
->cpt_si_pid
= from
->si_pid
;
1074 to
->cpt_si_uid
= from
->si_uid
;
1075 to
->cpt_si_status
= from
->si_status
;
1076 to
->cpt_si_utime
= from
->si_utime
;
1077 to
->cpt_si_stime
= from
->si_stime
;
1083 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1086 to
->cpt_si_band
= from
->si_band
;
1087 to
->cpt_si_fd
= from
->si_fd
;
1090 to
->cpt_si_pid
= from
->si_pid
;
1091 to
->cpt_si_uid
= from
->si_uid
;
1092 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1099 siginfo_from_compat_x32_siginfo (siginfo_t
*to
,
1100 compat_x32_siginfo_t
*from
)
1102 memset (to
, 0, sizeof (*to
));
1104 to
->si_signo
= from
->si_signo
;
1105 to
->si_errno
= from
->si_errno
;
1106 to
->si_code
= from
->si_code
;
1108 if (to
->si_code
== SI_TIMER
)
1110 to
->si_timerid
= from
->cpt_si_timerid
;
1111 to
->si_overrun
= from
->cpt_si_overrun
;
1112 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1114 else if (to
->si_code
== SI_USER
)
1116 to
->si_pid
= from
->cpt_si_pid
;
1117 to
->si_uid
= from
->cpt_si_uid
;
1119 else if (to
->si_code
< 0)
1121 to
->si_pid
= from
->cpt_si_pid
;
1122 to
->si_uid
= from
->cpt_si_uid
;
1123 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1127 switch (to
->si_signo
)
1130 to
->si_pid
= from
->cpt_si_pid
;
1131 to
->si_uid
= from
->cpt_si_uid
;
1132 to
->si_status
= from
->cpt_si_status
;
1133 to
->si_utime
= from
->cpt_si_utime
;
1134 to
->si_stime
= from
->cpt_si_stime
;
1140 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1143 to
->si_band
= from
->cpt_si_band
;
1144 to
->si_fd
= from
->cpt_si_fd
;
1147 to
->si_pid
= from
->cpt_si_pid
;
1148 to
->si_uid
= from
->cpt_si_uid
;
1149 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1155 #endif /* __x86_64__ */
1157 /* Convert a native/host siginfo object, into/from the siginfo in the
1158 layout of the inferiors' architecture. Returns true if any
1159 conversion was done; false otherwise. If DIRECTION is 1, then copy
1160 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1164 x86_siginfo_fixup (siginfo_t
*native
, void *inf
, int direction
)
1167 unsigned int machine
;
1168 int tid
= lwpid_of (get_thread_lwp (current_inferior
));
1169 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1171 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1172 if (!is_64bit_tdesc ())
1174 if (sizeof (siginfo_t
) != sizeof (compat_siginfo_t
))
1175 fatal ("unexpected difference in siginfo");
1178 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
1180 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
1184 /* No fixup for native x32 GDB. */
1185 else if (!is_elf64
&& sizeof (void *) == 8)
1187 if (sizeof (siginfo_t
) != sizeof (compat_x32_siginfo_t
))
1188 fatal ("unexpected difference in siginfo");
1191 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo
*) inf
,
1194 siginfo_from_compat_x32_siginfo (native
,
1195 (struct compat_x32_siginfo
*) inf
);
1206 /* Format of XSAVE extended state is:
1209 fxsave_bytes[0..463]
1210 sw_usable_bytes[464..511]
1211 xstate_hdr_bytes[512..575]
1216 Same memory layout will be used for the coredump NT_X86_XSTATE
1217 representing the XSAVE extended state registers.
1219 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1220 extended state mask, which is the same as the extended control register
1221 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1222 together with the mask saved in the xstate_hdr_bytes to determine what
1223 states the processor/OS supports and what state, used or initialized,
1224 the process/thread is in. */
1225 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1227 /* Does the current host support the GETFPXREGS request? The header
1228 file may or may not define it, and even if it is defined, the
1229 kernel will return EIO if it's running on a pre-SSE processor. */
1230 int have_ptrace_getfpxregs
=
1231 #ifdef HAVE_PTRACE_GETFPXREGS
1238 /* Does the current host support PTRACE_GETREGSET? */
1239 static int have_ptrace_getregset
= -1;
1241 /* Get Linux/x86 target description from running target. */
1243 static const struct target_desc
*
1244 x86_linux_read_description (void)
1246 unsigned int machine
;
1250 static uint64_t xcr0
;
1251 struct regset_info
*regset
;
1253 tid
= lwpid_of (get_thread_lwp (current_inferior
));
1255 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1257 if (sizeof (void *) == 4)
1260 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1262 else if (machine
== EM_X86_64
)
1263 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1267 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1268 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
1270 elf_fpxregset_t fpxregs
;
1272 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
1274 have_ptrace_getfpxregs
= 0;
1275 have_ptrace_getregset
= 0;
1276 return tdesc_i386_mmx_linux
;
1279 have_ptrace_getfpxregs
= 1;
1285 x86_xcr0
= I386_XSTATE_SSE_MASK
;
1287 /* Don't use XML. */
1289 if (machine
== EM_X86_64
)
1290 return tdesc_amd64_linux_no_xml
;
1293 return tdesc_i386_linux_no_xml
;
1296 if (have_ptrace_getregset
== -1)
1298 uint64_t xstateregs
[(I386_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
1301 iov
.iov_base
= xstateregs
;
1302 iov
.iov_len
= sizeof (xstateregs
);
1304 /* Check if PTRACE_GETREGSET works. */
1305 if (ptrace (PTRACE_GETREGSET
, tid
,
1306 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
1307 have_ptrace_getregset
= 0;
1310 have_ptrace_getregset
= 1;
1312 /* Get XCR0 from XSAVE extended state. */
1313 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
1314 / sizeof (uint64_t))];
1316 /* Use PTRACE_GETREGSET if it is available. */
1317 for (regset
= x86_regsets
;
1318 regset
->fill_function
!= NULL
; regset
++)
1319 if (regset
->get_request
== PTRACE_GETREGSET
)
1320 regset
->size
= I386_XSTATE_SIZE (xcr0
);
1321 else if (regset
->type
!= GENERAL_REGS
)
1326 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1327 xcr0_features
= (have_ptrace_getregset
1328 && (xcr0
& I386_XSTATE_ALL_MASK
));
1333 if (machine
== EM_X86_64
)
1340 switch (xcr0
& I386_XSTATE_ALL_MASK
)
1342 case I386_XSTATE_MPX_MASK
:
1343 return tdesc_amd64_mpx_linux
;
1345 case I386_XSTATE_AVX_MASK
:
1346 return tdesc_amd64_avx_linux
;
1349 return tdesc_amd64_linux
;
1353 return tdesc_amd64_linux
;
1359 switch (xcr0
& I386_XSTATE_ALL_MASK
)
1361 case I386_XSTATE_MPX_MASK
: /* No MPX on x32. */
1362 case I386_XSTATE_AVX_MASK
:
1363 return tdesc_x32_avx_linux
;
1366 return tdesc_x32_linux
;
1370 return tdesc_x32_linux
;
1378 switch (xcr0
& I386_XSTATE_ALL_MASK
)
1380 case (I386_XSTATE_MPX_MASK
):
1381 return tdesc_i386_mpx_linux
;
1383 case (I386_XSTATE_AVX_MASK
):
1384 return tdesc_i386_avx_linux
;
1387 return tdesc_i386_linux
;
1391 return tdesc_i386_linux
;
1394 gdb_assert_not_reached ("failed to return tdesc");
1397 /* Callback for find_inferior. Stops iteration when a thread with a
1398 given PID is found. */
1401 same_process_callback (struct inferior_list_entry
*entry
, void *data
)
1403 int pid
= *(int *) data
;
1405 return (ptid_get_pid (entry
->id
) == pid
);
1408 /* Callback for for_each_inferior. Calls the arch_setup routine for
1412 x86_arch_setup_process_callback (struct inferior_list_entry
*entry
)
1414 int pid
= ptid_get_pid (entry
->id
);
1416 /* Look up any thread of this processes. */
1418 = (struct thread_info
*) find_inferior (&all_threads
,
1419 same_process_callback
, &pid
);
1421 the_low_target
.arch_setup ();
1424 /* Update all the target description of all processes; a new GDB
1425 connected, and it may or not support xml target descriptions. */
1428 x86_linux_update_xmltarget (void)
1430 struct thread_info
*save_inferior
= current_inferior
;
1432 /* Before changing the register cache's internal layout, flush the
1433 contents of the current valid caches back to the threads, and
1434 release the current regcache objects. */
1435 regcache_release ();
1437 for_each_inferior (&all_processes
, x86_arch_setup_process_callback
);
1439 current_inferior
= save_inferior
;
1442 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1443 PTRACE_GETREGSET. */
1446 x86_linux_process_qsupported (const char *query
)
1448 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1449 with "i386" in qSupported query, it supports x86 XML target
1452 if (query
!= NULL
&& strncmp (query
, "xmlRegisters=", 13) == 0)
1454 char *copy
= xstrdup (query
+ 13);
1457 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1459 if (strcmp (p
, "i386") == 0)
1469 x86_linux_update_xmltarget ();
1472 /* Common for x86/x86-64. */
1474 static struct regsets_info x86_regsets_info
=
1476 x86_regsets
, /* regsets */
1477 0, /* num_regsets */
1478 NULL
, /* disabled_regsets */
1482 static struct regs_info amd64_linux_regs_info
=
1484 NULL
, /* regset_bitmap */
1485 NULL
, /* usrregs_info */
1489 static struct usrregs_info i386_linux_usrregs_info
=
1495 static struct regs_info i386_linux_regs_info
=
1497 NULL
, /* regset_bitmap */
1498 &i386_linux_usrregs_info
,
1502 const struct regs_info
*
1503 x86_linux_regs_info (void)
1506 if (is_64bit_tdesc ())
1507 return &amd64_linux_regs_info
;
1510 return &i386_linux_regs_info
;
1513 /* Initialize the target description for the architecture of the
1517 x86_arch_setup (void)
1519 current_process ()->tdesc
= x86_linux_read_description ();
1523 x86_supports_tracepoints (void)
1529 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1531 write_inferior_memory (*to
, buf
, len
);
1536 push_opcode (unsigned char *buf
, char *op
)
1538 unsigned char *buf_org
= buf
;
1543 unsigned long ul
= strtoul (op
, &endptr
, 16);
1552 return buf
- buf_org
;
1557 /* Build a jump pad that saves registers and calls a collection
1558 function. Writes a jump instruction to the jump pad to
1559 JJUMPAD_INSN. The caller is responsible to write it in at the
1560 tracepoint address. */
1563 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1564 CORE_ADDR collector
,
1567 CORE_ADDR
*jump_entry
,
1568 CORE_ADDR
*trampoline
,
1569 ULONGEST
*trampoline_size
,
1570 unsigned char *jjump_pad_insn
,
1571 ULONGEST
*jjump_pad_insn_size
,
1572 CORE_ADDR
*adjusted_insn_addr
,
1573 CORE_ADDR
*adjusted_insn_addr_end
,
1576 unsigned char buf
[40];
1580 CORE_ADDR buildaddr
= *jump_entry
;
1582 /* Build the jump pad. */
1584 /* First, do tracepoint data collection. Save registers. */
1586 /* Need to ensure stack pointer saved first. */
1587 buf
[i
++] = 0x54; /* push %rsp */
1588 buf
[i
++] = 0x55; /* push %rbp */
1589 buf
[i
++] = 0x57; /* push %rdi */
1590 buf
[i
++] = 0x56; /* push %rsi */
1591 buf
[i
++] = 0x52; /* push %rdx */
1592 buf
[i
++] = 0x51; /* push %rcx */
1593 buf
[i
++] = 0x53; /* push %rbx */
1594 buf
[i
++] = 0x50; /* push %rax */
1595 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1596 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1597 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1598 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1599 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1600 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1601 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1602 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1603 buf
[i
++] = 0x9c; /* pushfq */
1604 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1606 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1607 i
+= sizeof (unsigned long);
1608 buf
[i
++] = 0x57; /* push %rdi */
1609 append_insns (&buildaddr
, i
, buf
);
1611 /* Stack space for the collecting_t object. */
1613 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1614 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1615 memcpy (buf
+ i
, &tpoint
, 8);
1617 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1618 i
+= push_opcode (&buf
[i
],
1619 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1620 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1621 append_insns (&buildaddr
, i
, buf
);
1625 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1626 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1628 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1629 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1630 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1631 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1632 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1633 append_insns (&buildaddr
, i
, buf
);
1635 /* Set up the gdb_collect call. */
1636 /* At this point, (stack pointer + 0x18) is the base of our saved
1640 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1641 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1643 /* tpoint address may be 64-bit wide. */
1644 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1645 memcpy (buf
+ i
, &tpoint
, 8);
1647 append_insns (&buildaddr
, i
, buf
);
1649 /* The collector function being in the shared library, may be
1650 >31-bits away off the jump pad. */
1652 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1653 memcpy (buf
+ i
, &collector
, 8);
1655 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1656 append_insns (&buildaddr
, i
, buf
);
1658 /* Clear the spin-lock. */
1660 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1661 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1662 memcpy (buf
+ i
, &lockaddr
, 8);
1664 append_insns (&buildaddr
, i
, buf
);
1666 /* Remove stack that had been used for the collect_t object. */
1668 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1669 append_insns (&buildaddr
, i
, buf
);
1671 /* Restore register state. */
1673 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1677 buf
[i
++] = 0x9d; /* popfq */
1678 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1679 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1680 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1681 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1682 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1683 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1684 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1685 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1686 buf
[i
++] = 0x58; /* pop %rax */
1687 buf
[i
++] = 0x5b; /* pop %rbx */
1688 buf
[i
++] = 0x59; /* pop %rcx */
1689 buf
[i
++] = 0x5a; /* pop %rdx */
1690 buf
[i
++] = 0x5e; /* pop %rsi */
1691 buf
[i
++] = 0x5f; /* pop %rdi */
1692 buf
[i
++] = 0x5d; /* pop %rbp */
1693 buf
[i
++] = 0x5c; /* pop %rsp */
1694 append_insns (&buildaddr
, i
, buf
);
1696 /* Now, adjust the original instruction to execute in the jump
1698 *adjusted_insn_addr
= buildaddr
;
1699 relocate_instruction (&buildaddr
, tpaddr
);
1700 *adjusted_insn_addr_end
= buildaddr
;
1702 /* Finally, write a jump back to the program. */
1704 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1705 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1708 "E.Jump back from jump pad too far from tracepoint "
1709 "(offset 0x%" PRIx64
" > int32).", loffset
);
1713 offset
= (int) loffset
;
1714 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1715 memcpy (buf
+ 1, &offset
, 4);
1716 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1718 /* The jump pad is now built. Wire in a jump to our jump pad. This
1719 is always done last (by our caller actually), so that we can
1720 install fast tracepoints with threads running. This relies on
1721 the agent's atomic write support. */
1722 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1723 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1726 "E.Jump pad too far from tracepoint "
1727 "(offset 0x%" PRIx64
" > int32).", loffset
);
1731 offset
= (int) loffset
;
1733 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1734 memcpy (buf
+ 1, &offset
, 4);
1735 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1736 *jjump_pad_insn_size
= sizeof (jump_insn
);
1738 /* Return the end address of our pad. */
1739 *jump_entry
= buildaddr
;
1744 #endif /* __x86_64__ */
1746 /* Build a jump pad that saves registers and calls a collection
1747 function. Writes a jump instruction to the jump pad to
1748 JJUMPAD_INSN. The caller is responsible to write it in at the
1749 tracepoint address. */
1752 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1753 CORE_ADDR collector
,
1756 CORE_ADDR
*jump_entry
,
1757 CORE_ADDR
*trampoline
,
1758 ULONGEST
*trampoline_size
,
1759 unsigned char *jjump_pad_insn
,
1760 ULONGEST
*jjump_pad_insn_size
,
1761 CORE_ADDR
*adjusted_insn_addr
,
1762 CORE_ADDR
*adjusted_insn_addr_end
,
1765 unsigned char buf
[0x100];
1767 CORE_ADDR buildaddr
= *jump_entry
;
1769 /* Build the jump pad. */
1771 /* First, do tracepoint data collection. Save registers. */
1773 buf
[i
++] = 0x60; /* pushad */
1774 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1775 *((int *)(buf
+ i
)) = (int) tpaddr
;
1777 buf
[i
++] = 0x9c; /* pushf */
1778 buf
[i
++] = 0x1e; /* push %ds */
1779 buf
[i
++] = 0x06; /* push %es */
1780 buf
[i
++] = 0x0f; /* push %fs */
1782 buf
[i
++] = 0x0f; /* push %gs */
1784 buf
[i
++] = 0x16; /* push %ss */
1785 buf
[i
++] = 0x0e; /* push %cs */
1786 append_insns (&buildaddr
, i
, buf
);
1788 /* Stack space for the collecting_t object. */
1790 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1792 /* Build the object. */
1793 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1794 memcpy (buf
+ i
, &tpoint
, 4);
1796 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1798 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1799 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1800 append_insns (&buildaddr
, i
, buf
);
1802 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1803 If we cared for it, this could be using xchg alternatively. */
1806 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1807 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1809 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1811 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1812 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1813 append_insns (&buildaddr
, i
, buf
);
1816 /* Set up arguments to the gdb_collect call. */
1818 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1819 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1820 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1821 append_insns (&buildaddr
, i
, buf
);
1824 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1825 append_insns (&buildaddr
, i
, buf
);
1828 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1829 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1831 append_insns (&buildaddr
, i
, buf
);
1833 buf
[0] = 0xe8; /* call <reladdr> */
1834 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1835 memcpy (buf
+ 1, &offset
, 4);
1836 append_insns (&buildaddr
, 5, buf
);
1837 /* Clean up after the call. */
1838 buf
[0] = 0x83; /* add $0x8,%esp */
1841 append_insns (&buildaddr
, 3, buf
);
1844 /* Clear the spin-lock. This would need the LOCK prefix on older
1847 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1848 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1849 memcpy (buf
+ i
, &lockaddr
, 4);
1851 append_insns (&buildaddr
, i
, buf
);
1854 /* Remove stack that had been used for the collect_t object. */
1856 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1857 append_insns (&buildaddr
, i
, buf
);
1860 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1863 buf
[i
++] = 0x17; /* pop %ss */
1864 buf
[i
++] = 0x0f; /* pop %gs */
1866 buf
[i
++] = 0x0f; /* pop %fs */
1868 buf
[i
++] = 0x07; /* pop %es */
1869 buf
[i
++] = 0x1f; /* pop %ds */
1870 buf
[i
++] = 0x9d; /* popf */
1871 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1874 buf
[i
++] = 0x61; /* popad */
1875 append_insns (&buildaddr
, i
, buf
);
1877 /* Now, adjust the original instruction to execute in the jump
1879 *adjusted_insn_addr
= buildaddr
;
1880 relocate_instruction (&buildaddr
, tpaddr
);
1881 *adjusted_insn_addr_end
= buildaddr
;
1883 /* Write the jump back to the program. */
1884 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1885 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1886 memcpy (buf
+ 1, &offset
, 4);
1887 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1889 /* The jump pad is now built. Wire in a jump to our jump pad. This
1890 is always done last (by our caller actually), so that we can
1891 install fast tracepoints with threads running. This relies on
1892 the agent's atomic write support. */
1895 /* Create a trampoline. */
1896 *trampoline_size
= sizeof (jump_insn
);
1897 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1899 /* No trampoline space available. */
1901 "E.Cannot allocate trampoline space needed for fast "
1902 "tracepoints on 4-byte instructions.");
1906 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1907 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1908 memcpy (buf
+ 1, &offset
, 4);
1909 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1911 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1912 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1913 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1914 memcpy (buf
+ 2, &offset
, 2);
1915 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1916 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1920 /* Else use a 32-bit relative jump instruction. */
1921 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1922 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1923 memcpy (buf
+ 1, &offset
, 4);
1924 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1925 *jjump_pad_insn_size
= sizeof (jump_insn
);
1928 /* Return the end address of our pad. */
1929 *jump_entry
= buildaddr
;
1935 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1936 CORE_ADDR collector
,
1939 CORE_ADDR
*jump_entry
,
1940 CORE_ADDR
*trampoline
,
1941 ULONGEST
*trampoline_size
,
1942 unsigned char *jjump_pad_insn
,
1943 ULONGEST
*jjump_pad_insn_size
,
1944 CORE_ADDR
*adjusted_insn_addr
,
1945 CORE_ADDR
*adjusted_insn_addr_end
,
1949 if (is_64bit_tdesc ())
1950 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1951 collector
, lockaddr
,
1952 orig_size
, jump_entry
,
1953 trampoline
, trampoline_size
,
1955 jjump_pad_insn_size
,
1957 adjusted_insn_addr_end
,
1961 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1962 collector
, lockaddr
,
1963 orig_size
, jump_entry
,
1964 trampoline
, trampoline_size
,
1966 jjump_pad_insn_size
,
1968 adjusted_insn_addr_end
,
1972 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1976 x86_get_min_fast_tracepoint_insn_len (void)
1978 static int warned_about_fast_tracepoints
= 0;
1981 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1982 used for fast tracepoints. */
1983 if (is_64bit_tdesc ())
1987 if (agent_loaded_p ())
1989 char errbuf
[IPA_BUFSIZ
];
1993 /* On x86, if trampolines are available, then 4-byte jump instructions
1994 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1995 with a 4-byte offset are used instead. */
1996 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
2000 /* GDB has no channel to explain to user why a shorter fast
2001 tracepoint is not possible, but at least make GDBserver
2002 mention that something has gone awry. */
2003 if (!warned_about_fast_tracepoints
)
2005 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
2006 warned_about_fast_tracepoints
= 1;
2013 /* Indicate that the minimum length is currently unknown since the IPA
2014 has not loaded yet. */
2020 add_insns (unsigned char *start
, int len
)
2022 CORE_ADDR buildaddr
= current_insn_ptr
;
2025 fprintf (stderr
, "Adding %d bytes of insn at %s\n",
2026 len
, paddress (buildaddr
));
2028 append_insns (&buildaddr
, len
, start
);
2029 current_insn_ptr
= buildaddr
;
2032 /* Our general strategy for emitting code is to avoid specifying raw
2033 bytes whenever possible, and instead copy a block of inline asm
2034 that is embedded in the function. This is a little messy, because
2035 we need to keep the compiler from discarding what looks like dead
2036 code, plus suppress various warnings. */
2038 #define EMIT_ASM(NAME, INSNS) \
2041 extern unsigned char start_ ## NAME, end_ ## NAME; \
2042 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2043 __asm__ ("jmp end_" #NAME "\n" \
2044 "\t" "start_" #NAME ":" \
2046 "\t" "end_" #NAME ":"); \
2051 #define EMIT_ASM32(NAME,INSNS) \
2054 extern unsigned char start_ ## NAME, end_ ## NAME; \
2055 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2056 __asm__ (".code32\n" \
2057 "\t" "jmp end_" #NAME "\n" \
2058 "\t" "start_" #NAME ":\n" \
2060 "\t" "end_" #NAME ":\n" \
2066 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2073 amd64_emit_prologue (void)
2075 EMIT_ASM (amd64_prologue
,
2077 "movq %rsp,%rbp\n\t"
2078 "sub $0x20,%rsp\n\t"
2079 "movq %rdi,-8(%rbp)\n\t"
2080 "movq %rsi,-16(%rbp)");
2085 amd64_emit_epilogue (void)
2087 EMIT_ASM (amd64_epilogue
,
2088 "movq -16(%rbp),%rdi\n\t"
2089 "movq %rax,(%rdi)\n\t"
2096 amd64_emit_add (void)
2098 EMIT_ASM (amd64_add
,
2099 "add (%rsp),%rax\n\t"
2100 "lea 0x8(%rsp),%rsp");
2104 amd64_emit_sub (void)
2106 EMIT_ASM (amd64_sub
,
2107 "sub %rax,(%rsp)\n\t"
2112 amd64_emit_mul (void)
2118 amd64_emit_lsh (void)
2124 amd64_emit_rsh_signed (void)
2130 amd64_emit_rsh_unsigned (void)
2136 amd64_emit_ext (int arg
)
2141 EMIT_ASM (amd64_ext_8
,
2147 EMIT_ASM (amd64_ext_16
,
2152 EMIT_ASM (amd64_ext_32
,
2161 amd64_emit_log_not (void)
2163 EMIT_ASM (amd64_log_not
,
2164 "test %rax,%rax\n\t"
2170 amd64_emit_bit_and (void)
2172 EMIT_ASM (amd64_and
,
2173 "and (%rsp),%rax\n\t"
2174 "lea 0x8(%rsp),%rsp");
2178 amd64_emit_bit_or (void)
2181 "or (%rsp),%rax\n\t"
2182 "lea 0x8(%rsp),%rsp");
2186 amd64_emit_bit_xor (void)
2188 EMIT_ASM (amd64_xor
,
2189 "xor (%rsp),%rax\n\t"
2190 "lea 0x8(%rsp),%rsp");
2194 amd64_emit_bit_not (void)
2196 EMIT_ASM (amd64_bit_not
,
2197 "xorq $0xffffffffffffffff,%rax");
2201 amd64_emit_equal (void)
2203 EMIT_ASM (amd64_equal
,
2204 "cmp %rax,(%rsp)\n\t"
2205 "je .Lamd64_equal_true\n\t"
2207 "jmp .Lamd64_equal_end\n\t"
2208 ".Lamd64_equal_true:\n\t"
2210 ".Lamd64_equal_end:\n\t"
2211 "lea 0x8(%rsp),%rsp");
2215 amd64_emit_less_signed (void)
2217 EMIT_ASM (amd64_less_signed
,
2218 "cmp %rax,(%rsp)\n\t"
2219 "jl .Lamd64_less_signed_true\n\t"
2221 "jmp .Lamd64_less_signed_end\n\t"
2222 ".Lamd64_less_signed_true:\n\t"
2224 ".Lamd64_less_signed_end:\n\t"
2225 "lea 0x8(%rsp),%rsp");
2229 amd64_emit_less_unsigned (void)
2231 EMIT_ASM (amd64_less_unsigned
,
2232 "cmp %rax,(%rsp)\n\t"
2233 "jb .Lamd64_less_unsigned_true\n\t"
2235 "jmp .Lamd64_less_unsigned_end\n\t"
2236 ".Lamd64_less_unsigned_true:\n\t"
2238 ".Lamd64_less_unsigned_end:\n\t"
2239 "lea 0x8(%rsp),%rsp");
2243 amd64_emit_ref (int size
)
2248 EMIT_ASM (amd64_ref1
,
2252 EMIT_ASM (amd64_ref2
,
2256 EMIT_ASM (amd64_ref4
,
2257 "movl (%rax),%eax");
2260 EMIT_ASM (amd64_ref8
,
2261 "movq (%rax),%rax");
2267 amd64_emit_if_goto (int *offset_p
, int *size_p
)
2269 EMIT_ASM (amd64_if_goto
,
2273 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2281 amd64_emit_goto (int *offset_p
, int *size_p
)
2283 EMIT_ASM (amd64_goto
,
2284 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2292 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2294 int diff
= (to
- (from
+ size
));
2295 unsigned char buf
[sizeof (int)];
2303 memcpy (buf
, &diff
, sizeof (int));
2304 write_inferior_memory (from
, buf
, sizeof (int));
2308 amd64_emit_const (LONGEST num
)
2310 unsigned char buf
[16];
2312 CORE_ADDR buildaddr
= current_insn_ptr
;
2315 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
2316 memcpy (&buf
[i
], &num
, sizeof (num
));
2318 append_insns (&buildaddr
, i
, buf
);
2319 current_insn_ptr
= buildaddr
;
2323 amd64_emit_call (CORE_ADDR fn
)
2325 unsigned char buf
[16];
2327 CORE_ADDR buildaddr
;
2330 /* The destination function being in the shared library, may be
2331 >31-bits away off the compiled code pad. */
2333 buildaddr
= current_insn_ptr
;
2335 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
2339 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
2341 /* Offset is too large for a call. Use callq, but that requires
2342 a register, so avoid it if possible. Use r10, since it is
2343 call-clobbered, we don't have to push/pop it. */
2344 buf
[i
++] = 0x48; /* mov $fn,%r10 */
2346 memcpy (buf
+ i
, &fn
, 8);
2348 buf
[i
++] = 0xff; /* callq *%r10 */
2353 int offset32
= offset64
; /* we know we can't overflow here. */
2354 memcpy (buf
+ i
, &offset32
, 4);
2358 append_insns (&buildaddr
, i
, buf
);
2359 current_insn_ptr
= buildaddr
;
2363 amd64_emit_reg (int reg
)
2365 unsigned char buf
[16];
2367 CORE_ADDR buildaddr
;
2369 /* Assume raw_regs is still in %rdi. */
2370 buildaddr
= current_insn_ptr
;
2372 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
2373 memcpy (&buf
[i
], ®
, sizeof (reg
));
2375 append_insns (&buildaddr
, i
, buf
);
2376 current_insn_ptr
= buildaddr
;
2377 amd64_emit_call (get_raw_reg_func_addr ());
2381 amd64_emit_pop (void)
2383 EMIT_ASM (amd64_pop
,
2388 amd64_emit_stack_flush (void)
2390 EMIT_ASM (amd64_stack_flush
,
2395 amd64_emit_zero_ext (int arg
)
2400 EMIT_ASM (amd64_zero_ext_8
,
2404 EMIT_ASM (amd64_zero_ext_16
,
2405 "and $0xffff,%rax");
2408 EMIT_ASM (amd64_zero_ext_32
,
2409 "mov $0xffffffff,%rcx\n\t"
2418 amd64_emit_swap (void)
2420 EMIT_ASM (amd64_swap
,
2427 amd64_emit_stack_adjust (int n
)
2429 unsigned char buf
[16];
2431 CORE_ADDR buildaddr
= current_insn_ptr
;
2434 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2438 /* This only handles adjustments up to 16, but we don't expect any more. */
2440 append_insns (&buildaddr
, i
, buf
);
2441 current_insn_ptr
= buildaddr
;
2444 /* FN's prototype is `LONGEST(*fn)(int)'. */
2447 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2449 unsigned char buf
[16];
2451 CORE_ADDR buildaddr
;
2453 buildaddr
= current_insn_ptr
;
2455 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2456 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2458 append_insns (&buildaddr
, i
, buf
);
2459 current_insn_ptr
= buildaddr
;
2460 amd64_emit_call (fn
);
2463 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2466 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2468 unsigned char buf
[16];
2470 CORE_ADDR buildaddr
;
2472 buildaddr
= current_insn_ptr
;
2474 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2475 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2477 append_insns (&buildaddr
, i
, buf
);
2478 current_insn_ptr
= buildaddr
;
2479 EMIT_ASM (amd64_void_call_2_a
,
2480 /* Save away a copy of the stack top. */
2482 /* Also pass top as the second argument. */
2484 amd64_emit_call (fn
);
2485 EMIT_ASM (amd64_void_call_2_b
,
2486 /* Restore the stack top, %rax may have been trashed. */
2491 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2494 "cmp %rax,(%rsp)\n\t"
2495 "jne .Lamd64_eq_fallthru\n\t"
2496 "lea 0x8(%rsp),%rsp\n\t"
2498 /* jmp, but don't trust the assembler to choose the right jump */
2499 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2500 ".Lamd64_eq_fallthru:\n\t"
2501 "lea 0x8(%rsp),%rsp\n\t"
2511 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2514 "cmp %rax,(%rsp)\n\t"
2515 "je .Lamd64_ne_fallthru\n\t"
2516 "lea 0x8(%rsp),%rsp\n\t"
2518 /* jmp, but don't trust the assembler to choose the right jump */
2519 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2520 ".Lamd64_ne_fallthru:\n\t"
2521 "lea 0x8(%rsp),%rsp\n\t"
2531 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2534 "cmp %rax,(%rsp)\n\t"
2535 "jnl .Lamd64_lt_fallthru\n\t"
2536 "lea 0x8(%rsp),%rsp\n\t"
2538 /* jmp, but don't trust the assembler to choose the right jump */
2539 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2540 ".Lamd64_lt_fallthru:\n\t"
2541 "lea 0x8(%rsp),%rsp\n\t"
2551 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2554 "cmp %rax,(%rsp)\n\t"
2555 "jnle .Lamd64_le_fallthru\n\t"
2556 "lea 0x8(%rsp),%rsp\n\t"
2558 /* jmp, but don't trust the assembler to choose the right jump */
2559 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2560 ".Lamd64_le_fallthru:\n\t"
2561 "lea 0x8(%rsp),%rsp\n\t"
2571 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2574 "cmp %rax,(%rsp)\n\t"
2575 "jng .Lamd64_gt_fallthru\n\t"
2576 "lea 0x8(%rsp),%rsp\n\t"
2578 /* jmp, but don't trust the assembler to choose the right jump */
2579 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2580 ".Lamd64_gt_fallthru:\n\t"
2581 "lea 0x8(%rsp),%rsp\n\t"
2591 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2594 "cmp %rax,(%rsp)\n\t"
2595 "jnge .Lamd64_ge_fallthru\n\t"
2596 ".Lamd64_ge_jump:\n\t"
2597 "lea 0x8(%rsp),%rsp\n\t"
2599 /* jmp, but don't trust the assembler to choose the right jump */
2600 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2601 ".Lamd64_ge_fallthru:\n\t"
2602 "lea 0x8(%rsp),%rsp\n\t"
2611 struct emit_ops amd64_emit_ops
=
2613 amd64_emit_prologue
,
2614 amd64_emit_epilogue
,
2619 amd64_emit_rsh_signed
,
2620 amd64_emit_rsh_unsigned
,
2628 amd64_emit_less_signed
,
2629 amd64_emit_less_unsigned
,
2633 amd64_write_goto_address
,
2638 amd64_emit_stack_flush
,
2639 amd64_emit_zero_ext
,
2641 amd64_emit_stack_adjust
,
2642 amd64_emit_int_call_1
,
2643 amd64_emit_void_call_2
,
2652 #endif /* __x86_64__ */
2655 i386_emit_prologue (void)
2657 EMIT_ASM32 (i386_prologue
,
2661 /* At this point, the raw regs base address is at 8(%ebp), and the
2662 value pointer is at 12(%ebp). */
2666 i386_emit_epilogue (void)
2668 EMIT_ASM32 (i386_epilogue
,
2669 "mov 12(%ebp),%ecx\n\t"
2670 "mov %eax,(%ecx)\n\t"
2671 "mov %ebx,0x4(%ecx)\n\t"
2679 i386_emit_add (void)
2681 EMIT_ASM32 (i386_add
,
2682 "add (%esp),%eax\n\t"
2683 "adc 0x4(%esp),%ebx\n\t"
2684 "lea 0x8(%esp),%esp");
2688 i386_emit_sub (void)
2690 EMIT_ASM32 (i386_sub
,
2691 "subl %eax,(%esp)\n\t"
2692 "sbbl %ebx,4(%esp)\n\t"
2698 i386_emit_mul (void)
2704 i386_emit_lsh (void)
2710 i386_emit_rsh_signed (void)
2716 i386_emit_rsh_unsigned (void)
2722 i386_emit_ext (int arg
)
2727 EMIT_ASM32 (i386_ext_8
,
2730 "movl %eax,%ebx\n\t"
2734 EMIT_ASM32 (i386_ext_16
,
2736 "movl %eax,%ebx\n\t"
2740 EMIT_ASM32 (i386_ext_32
,
2741 "movl %eax,%ebx\n\t"
2750 i386_emit_log_not (void)
2752 EMIT_ASM32 (i386_log_not
,
2754 "test %eax,%eax\n\t"
2761 i386_emit_bit_and (void)
2763 EMIT_ASM32 (i386_and
,
2764 "and (%esp),%eax\n\t"
2765 "and 0x4(%esp),%ebx\n\t"
2766 "lea 0x8(%esp),%esp");
2770 i386_emit_bit_or (void)
2772 EMIT_ASM32 (i386_or
,
2773 "or (%esp),%eax\n\t"
2774 "or 0x4(%esp),%ebx\n\t"
2775 "lea 0x8(%esp),%esp");
2779 i386_emit_bit_xor (void)
2781 EMIT_ASM32 (i386_xor
,
2782 "xor (%esp),%eax\n\t"
2783 "xor 0x4(%esp),%ebx\n\t"
2784 "lea 0x8(%esp),%esp");
2788 i386_emit_bit_not (void)
2790 EMIT_ASM32 (i386_bit_not
,
2791 "xor $0xffffffff,%eax\n\t"
2792 "xor $0xffffffff,%ebx\n\t");
2796 i386_emit_equal (void)
2798 EMIT_ASM32 (i386_equal
,
2799 "cmpl %ebx,4(%esp)\n\t"
2800 "jne .Li386_equal_false\n\t"
2801 "cmpl %eax,(%esp)\n\t"
2802 "je .Li386_equal_true\n\t"
2803 ".Li386_equal_false:\n\t"
2805 "jmp .Li386_equal_end\n\t"
2806 ".Li386_equal_true:\n\t"
2808 ".Li386_equal_end:\n\t"
2810 "lea 0x8(%esp),%esp");
2814 i386_emit_less_signed (void)
2816 EMIT_ASM32 (i386_less_signed
,
2817 "cmpl %ebx,4(%esp)\n\t"
2818 "jl .Li386_less_signed_true\n\t"
2819 "jne .Li386_less_signed_false\n\t"
2820 "cmpl %eax,(%esp)\n\t"
2821 "jl .Li386_less_signed_true\n\t"
2822 ".Li386_less_signed_false:\n\t"
2824 "jmp .Li386_less_signed_end\n\t"
2825 ".Li386_less_signed_true:\n\t"
2827 ".Li386_less_signed_end:\n\t"
2829 "lea 0x8(%esp),%esp");
2833 i386_emit_less_unsigned (void)
2835 EMIT_ASM32 (i386_less_unsigned
,
2836 "cmpl %ebx,4(%esp)\n\t"
2837 "jb .Li386_less_unsigned_true\n\t"
2838 "jne .Li386_less_unsigned_false\n\t"
2839 "cmpl %eax,(%esp)\n\t"
2840 "jb .Li386_less_unsigned_true\n\t"
2841 ".Li386_less_unsigned_false:\n\t"
2843 "jmp .Li386_less_unsigned_end\n\t"
2844 ".Li386_less_unsigned_true:\n\t"
2846 ".Li386_less_unsigned_end:\n\t"
2848 "lea 0x8(%esp),%esp");
2852 i386_emit_ref (int size
)
2857 EMIT_ASM32 (i386_ref1
,
2861 EMIT_ASM32 (i386_ref2
,
2865 EMIT_ASM32 (i386_ref4
,
2866 "movl (%eax),%eax");
2869 EMIT_ASM32 (i386_ref8
,
2870 "movl 4(%eax),%ebx\n\t"
2871 "movl (%eax),%eax");
2877 i386_emit_if_goto (int *offset_p
, int *size_p
)
2879 EMIT_ASM32 (i386_if_goto
,
2885 /* Don't trust the assembler to choose the right jump */
2886 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2889 *offset_p
= 11; /* be sure that this matches the sequence above */
2895 i386_emit_goto (int *offset_p
, int *size_p
)
2897 EMIT_ASM32 (i386_goto
,
2898 /* Don't trust the assembler to choose the right jump */
2899 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2907 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2909 int diff
= (to
- (from
+ size
));
2910 unsigned char buf
[sizeof (int)];
2912 /* We're only doing 4-byte sizes at the moment. */
2919 memcpy (buf
, &diff
, sizeof (int));
2920 write_inferior_memory (from
, buf
, sizeof (int));
2924 i386_emit_const (LONGEST num
)
2926 unsigned char buf
[16];
2928 CORE_ADDR buildaddr
= current_insn_ptr
;
2931 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2932 lo
= num
& 0xffffffff;
2933 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2935 hi
= ((num
>> 32) & 0xffffffff);
2938 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2939 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2944 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2946 append_insns (&buildaddr
, i
, buf
);
2947 current_insn_ptr
= buildaddr
;
2951 i386_emit_call (CORE_ADDR fn
)
2953 unsigned char buf
[16];
2955 CORE_ADDR buildaddr
;
2957 buildaddr
= current_insn_ptr
;
2959 buf
[i
++] = 0xe8; /* call <reladdr> */
2960 offset
= ((int) fn
) - (buildaddr
+ 5);
2961 memcpy (buf
+ 1, &offset
, 4);
2962 append_insns (&buildaddr
, 5, buf
);
2963 current_insn_ptr
= buildaddr
;
2967 i386_emit_reg (int reg
)
2969 unsigned char buf
[16];
2971 CORE_ADDR buildaddr
;
2973 EMIT_ASM32 (i386_reg_a
,
2975 buildaddr
= current_insn_ptr
;
2977 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2978 memcpy (&buf
[i
], ®
, sizeof (reg
));
2980 append_insns (&buildaddr
, i
, buf
);
2981 current_insn_ptr
= buildaddr
;
2982 EMIT_ASM32 (i386_reg_b
,
2983 "mov %eax,4(%esp)\n\t"
2984 "mov 8(%ebp),%eax\n\t"
2986 i386_emit_call (get_raw_reg_func_addr ());
2987 EMIT_ASM32 (i386_reg_c
,
2989 "lea 0x8(%esp),%esp");
2993 i386_emit_pop (void)
2995 EMIT_ASM32 (i386_pop
,
3001 i386_emit_stack_flush (void)
3003 EMIT_ASM32 (i386_stack_flush
,
3009 i386_emit_zero_ext (int arg
)
3014 EMIT_ASM32 (i386_zero_ext_8
,
3015 "and $0xff,%eax\n\t"
3019 EMIT_ASM32 (i386_zero_ext_16
,
3020 "and $0xffff,%eax\n\t"
3024 EMIT_ASM32 (i386_zero_ext_32
,
3033 i386_emit_swap (void)
3035 EMIT_ASM32 (i386_swap
,
3045 i386_emit_stack_adjust (int n
)
3047 unsigned char buf
[16];
3049 CORE_ADDR buildaddr
= current_insn_ptr
;
3052 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
3056 append_insns (&buildaddr
, i
, buf
);
3057 current_insn_ptr
= buildaddr
;
3060 /* FN's prototype is `LONGEST(*fn)(int)'. */
3063 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
3065 unsigned char buf
[16];
3067 CORE_ADDR buildaddr
;
3069 EMIT_ASM32 (i386_int_call_1_a
,
3070 /* Reserve a bit of stack space. */
3072 /* Put the one argument on the stack. */
3073 buildaddr
= current_insn_ptr
;
3075 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3078 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3080 append_insns (&buildaddr
, i
, buf
);
3081 current_insn_ptr
= buildaddr
;
3082 i386_emit_call (fn
);
3083 EMIT_ASM32 (i386_int_call_1_c
,
3085 "lea 0x8(%esp),%esp");
3088 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3091 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
3093 unsigned char buf
[16];
3095 CORE_ADDR buildaddr
;
3097 EMIT_ASM32 (i386_void_call_2_a
,
3098 /* Preserve %eax only; we don't have to worry about %ebx. */
3100 /* Reserve a bit of stack space for arguments. */
3101 "sub $0x10,%esp\n\t"
3102 /* Copy "top" to the second argument position. (Note that
3103 we can't assume function won't scribble on its
3104 arguments, so don't try to restore from this.) */
3105 "mov %eax,4(%esp)\n\t"
3106 "mov %ebx,8(%esp)");
3107 /* Put the first argument on the stack. */
3108 buildaddr
= current_insn_ptr
;
3110 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3113 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3115 append_insns (&buildaddr
, i
, buf
);
3116 current_insn_ptr
= buildaddr
;
3117 i386_emit_call (fn
);
3118 EMIT_ASM32 (i386_void_call_2_b
,
3119 "lea 0x10(%esp),%esp\n\t"
3120 /* Restore original stack top. */
3126 i386_emit_eq_goto (int *offset_p
, int *size_p
)
3129 /* Check low half first, more likely to be decider */
3130 "cmpl %eax,(%esp)\n\t"
3131 "jne .Leq_fallthru\n\t"
3132 "cmpl %ebx,4(%esp)\n\t"
3133 "jne .Leq_fallthru\n\t"
3134 "lea 0x8(%esp),%esp\n\t"
3137 /* jmp, but don't trust the assembler to choose the right jump */
3138 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3139 ".Leq_fallthru:\n\t"
3140 "lea 0x8(%esp),%esp\n\t"
3151 i386_emit_ne_goto (int *offset_p
, int *size_p
)
3154 /* Check low half first, more likely to be decider */
3155 "cmpl %eax,(%esp)\n\t"
3157 "cmpl %ebx,4(%esp)\n\t"
3158 "je .Lne_fallthru\n\t"
3160 "lea 0x8(%esp),%esp\n\t"
3163 /* jmp, but don't trust the assembler to choose the right jump */
3164 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3165 ".Lne_fallthru:\n\t"
3166 "lea 0x8(%esp),%esp\n\t"
3177 i386_emit_lt_goto (int *offset_p
, int *size_p
)
3180 "cmpl %ebx,4(%esp)\n\t"
3182 "jne .Llt_fallthru\n\t"
3183 "cmpl %eax,(%esp)\n\t"
3184 "jnl .Llt_fallthru\n\t"
3186 "lea 0x8(%esp),%esp\n\t"
3189 /* jmp, but don't trust the assembler to choose the right jump */
3190 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3191 ".Llt_fallthru:\n\t"
3192 "lea 0x8(%esp),%esp\n\t"
3203 i386_emit_le_goto (int *offset_p
, int *size_p
)
3206 "cmpl %ebx,4(%esp)\n\t"
3208 "jne .Lle_fallthru\n\t"
3209 "cmpl %eax,(%esp)\n\t"
3210 "jnle .Lle_fallthru\n\t"
3212 "lea 0x8(%esp),%esp\n\t"
3215 /* jmp, but don't trust the assembler to choose the right jump */
3216 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3217 ".Lle_fallthru:\n\t"
3218 "lea 0x8(%esp),%esp\n\t"
3229 i386_emit_gt_goto (int *offset_p
, int *size_p
)
3232 "cmpl %ebx,4(%esp)\n\t"
3234 "jne .Lgt_fallthru\n\t"
3235 "cmpl %eax,(%esp)\n\t"
3236 "jng .Lgt_fallthru\n\t"
3238 "lea 0x8(%esp),%esp\n\t"
3241 /* jmp, but don't trust the assembler to choose the right jump */
3242 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3243 ".Lgt_fallthru:\n\t"
3244 "lea 0x8(%esp),%esp\n\t"
3255 i386_emit_ge_goto (int *offset_p
, int *size_p
)
3258 "cmpl %ebx,4(%esp)\n\t"
3260 "jne .Lge_fallthru\n\t"
3261 "cmpl %eax,(%esp)\n\t"
3262 "jnge .Lge_fallthru\n\t"
3264 "lea 0x8(%esp),%esp\n\t"
3267 /* jmp, but don't trust the assembler to choose the right jump */
3268 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3269 ".Lge_fallthru:\n\t"
3270 "lea 0x8(%esp),%esp\n\t"
3280 struct emit_ops i386_emit_ops
=
3288 i386_emit_rsh_signed
,
3289 i386_emit_rsh_unsigned
,
3297 i386_emit_less_signed
,
3298 i386_emit_less_unsigned
,
3302 i386_write_goto_address
,
3307 i386_emit_stack_flush
,
3310 i386_emit_stack_adjust
,
3311 i386_emit_int_call_1
,
3312 i386_emit_void_call_2
,
3322 static struct emit_ops
*
3326 if (is_64bit_tdesc ())
3327 return &amd64_emit_ops
;
3330 return &i386_emit_ops
;
3334 x86_supports_range_stepping (void)
3339 /* This is initialized assuming an amd64 target.
3340 x86_arch_setup will correct it for i386 or amd64 targets. */
3342 struct linux_target_ops the_low_target
=
3345 x86_linux_regs_info
,
3346 x86_cannot_fetch_register
,
3347 x86_cannot_store_register
,
3348 NULL
, /* fetch_register */
3358 x86_stopped_by_watchpoint
,
3359 x86_stopped_data_address
,
3360 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3361 native i386 case (no registers smaller than an xfer unit), and are not
3362 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3365 /* need to fix up i386 siginfo if host is amd64 */
3367 x86_linux_new_process
,
3368 x86_linux_new_thread
,
3369 x86_linux_prepare_to_resume
,
3370 x86_linux_process_qsupported
,
3371 x86_supports_tracepoints
,
3372 x86_get_thread_area
,
3373 x86_install_fast_tracepoint_jump_pad
,
3375 x86_get_min_fast_tracepoint_insn_len
,
3376 x86_supports_range_stepping
,
3380 initialize_low_arch (void)
3382 /* Initialize the Linux target descriptions. */
3384 init_registers_amd64_linux ();
3385 init_registers_amd64_avx_linux ();
3386 init_registers_amd64_mpx_linux ();
3388 init_registers_x32_linux ();
3389 init_registers_x32_avx_linux ();
3391 tdesc_amd64_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3392 copy_target_description (tdesc_amd64_linux_no_xml
, tdesc_amd64_linux
);
3393 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
3395 init_registers_i386_linux ();
3396 init_registers_i386_mmx_linux ();
3397 init_registers_i386_avx_linux ();
3398 init_registers_i386_mpx_linux ();
3400 tdesc_i386_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3401 copy_target_description (tdesc_i386_linux_no_xml
, tdesc_i386_linux
);
3402 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3404 initialize_regsets_info (&x86_regsets_info
);