PowerPC: Don't generate unused section symbols
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3666a048 3 Copyright (C) 2002-2021 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
d41f6d8e 20#include "server.h"
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "linux-low.h"
25#include "i387-fp.h"
df7e5265 26#include "x86-low.h"
268a13a5 27#include "gdbsupport/x86-xstate.h"
5826e159 28#include "nat/gdb_ptrace.h"
d0722149 29
93813b37
WT
30#ifdef __x86_64__
31#include "nat/amd64-linux-siginfo.h"
32#endif
33
d0722149 34#include "gdb_proc_service.h"
b5737fa9
PA
35/* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37#ifndef ELFMAG0
38#include "elf/common.h"
39#endif
40
268a13a5 41#include "gdbsupport/agent.h"
3aee8918 42#include "tdesc.h"
c144c7a0 43#include "tracepoint.h"
f699aaba 44#include "ax.h"
7b669087 45#include "nat/linux-nat.h"
4b134ca1 46#include "nat/x86-linux.h"
8e5d4070 47#include "nat/x86-linux-dregs.h"
ae91f625 48#include "linux-x86-tdesc.h"
a196ebeb 49
3aee8918 50#ifdef __x86_64__
51a948fd 51static target_desc_up tdesc_amd64_linux_no_xml;
3aee8918 52#endif
51a948fd 53static target_desc_up tdesc_i386_linux_no_xml;
3aee8918 54
1570b33e 55
fa593d66 56static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 57static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 58
1570b33e
L
59/* Backward compatibility for gdb without XML support. */
60
db92ac45 61static const char xmltarget_i386_linux_no_xml[] = "@<target>\
1570b33e
L
62<architecture>i386</architecture>\
63<osabi>GNU/Linux</osabi>\
64</target>";
f6d1620c
L
65
66#ifdef __x86_64__
db92ac45 67static const char xmltarget_amd64_linux_no_xml[] = "@<target>\
1570b33e
L
68<architecture>i386:x86-64</architecture>\
69<osabi>GNU/Linux</osabi>\
70</target>";
f6d1620c 71#endif
d0722149
DE
72
73#include <sys/reg.h>
74#include <sys/procfs.h>
1570b33e
L
75#include <sys/uio.h>
76
d0722149
DE
77#ifndef PTRACE_GET_THREAD_AREA
78#define PTRACE_GET_THREAD_AREA 25
79#endif
80
81/* This definition comes from prctl.h, but some kernels may not have it. */
82#ifndef PTRACE_ARCH_PRCTL
83#define PTRACE_ARCH_PRCTL 30
84#endif
85
86/* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88#ifndef ARCH_GET_FS
89#define ARCH_SET_GS 0x1001
90#define ARCH_SET_FS 0x1002
91#define ARCH_GET_FS 0x1003
92#define ARCH_GET_GS 0x1004
93#endif
94
ef0478f6
TBA
95/* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99class x86_target : public linux_process_target
100{
101public:
102
aa8d21c9
TBA
103 const regs_info *get_regs_info () override;
104
3ca4edb6
TBA
105 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
106
007c9b97
TBA
107 bool supports_z_point_type (char z_type) override;
108
b315b67d 109 void process_qsupported (gdb::array_view<const char * const> features) override;
a5b5da92 110
47f70aa7
TBA
111 bool supports_tracepoints () override;
112
809a0c35
TBA
113 bool supports_fast_tracepoints () override;
114
115 int install_fast_tracepoint_jump_pad
116 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
117 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
118 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
119 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
120 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
121 char *err) override;
122
123 int get_min_fast_tracepoint_insn_len () override;
124
ab64c999
TBA
125 struct emit_ops *emit_ops () override;
126
fc5ecdb6
TBA
127 int get_ipa_tdesc_idx () override;
128
797bcff5
TBA
129protected:
130
131 void low_arch_setup () override;
daca57a7
TBA
132
133 bool low_cannot_fetch_register (int regno) override;
134
135 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
136
137 bool low_supports_breakpoints () override;
138
139 CORE_ADDR low_get_pc (regcache *regcache) override;
140
141 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
d4807ea2
TBA
142
143 int low_decr_pc_after_break () override;
d7146cda
TBA
144
145 bool low_breakpoint_at (CORE_ADDR pc) override;
9db9aa23
TBA
146
147 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
148 int size, raw_breakpoint *bp) override;
149
150 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
151 int size, raw_breakpoint *bp) override;
ac1bbaca
TBA
152
153 bool low_stopped_by_watchpoint () override;
154
155 CORE_ADDR low_stopped_data_address () override;
b35db733
TBA
156
157 /* collect_ptrace_register/supply_ptrace_register are not needed in the
158 native i386 case (no registers smaller than an xfer unit), and are not
159 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
cb63de7c
TBA
160
161 /* Need to fix up i386 siginfo if host is amd64. */
162 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
163 int direction) override;
fd000fb3
TBA
164
165 arch_process_info *low_new_process () override;
166
167 void low_delete_process (arch_process_info *info) override;
168
169 void low_new_thread (lwp_info *) override;
170
171 void low_delete_thread (arch_lwp_info *) override;
172
173 void low_new_fork (process_info *parent, process_info *child) override;
d7599cc0
TBA
174
175 void low_prepare_to_resume (lwp_info *lwp) override;
a5b5da92 176
13e567af
TBA
177 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
178
9cfd8715
TBA
179 bool low_supports_range_stepping () override;
180
9eedd27d
TBA
181 bool low_supports_catch_syscall () override;
182
183 void low_get_syscall_trapinfo (regcache *regcache, int *sysno) override;
184
a5b5da92
TBA
185private:
186
187 /* Update all the target description of all processes; a new GDB
188 connected, and it may or not support xml target descriptions. */
189 void update_xmltarget ();
ef0478f6
TBA
190};
191
192/* The singleton target ops object. */
193
194static x86_target the_x86_target;
195
aa5ca48f
DE
196/* Per-process arch-specific data we want to keep. */
197
198struct arch_process_info
199{
df7e5265 200 struct x86_debug_reg_state debug_reg_state;
aa5ca48f
DE
201};
202
d0722149
DE
203#ifdef __x86_64__
204
205/* Mapping between the general-purpose registers in `struct user'
206 format and GDB's register array layout.
207 Note that the transfer layout uses 64-bit regs. */
208static /*const*/ int i386_regmap[] =
209{
210 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
211 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
212 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
213 DS * 8, ES * 8, FS * 8, GS * 8
214};
215
216#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
217
218/* So code below doesn't have to care, i386 or amd64. */
219#define ORIG_EAX ORIG_RAX
bc9540e8 220#define REGSIZE 8
d0722149
DE
221
222static const int x86_64_regmap[] =
223{
224 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
225 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
226 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
227 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
228 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
229 DS * 8, ES * 8, FS * 8, GS * 8,
230 -1, -1, -1, -1, -1, -1, -1, -1,
231 -1, -1, -1, -1, -1, -1, -1, -1,
232 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
233 -1,
234 -1, -1, -1, -1, -1, -1, -1, -1,
235 ORIG_RAX * 8,
2735833d 236 21 * 8, 22 * 8,
a196ebeb 237 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
01f9f808
MS
238 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
239 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
240 -1, -1, -1, -1, -1, -1, -1, -1,
241 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
242 -1, -1, -1, -1, -1, -1, -1, -1,
243 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
244 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
245 -1, -1, -1, -1, -1, -1, -1, -1,
246 -1, -1, -1, -1, -1, -1, -1, -1,
51547df6
MS
247 -1, -1, -1, -1, -1, -1, -1, -1,
248 -1 /* pkru */
d0722149
DE
249};
250
251#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
9e0aa64f 252#define X86_64_USER_REGS (GS + 1)
d0722149
DE
253
254#else /* ! __x86_64__ */
255
256/* Mapping between the general-purpose registers in `struct user'
257 format and GDB's register array layout. */
258static /*const*/ int i386_regmap[] =
259{
260 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
261 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
262 EIP * 4, EFL * 4, CS * 4, SS * 4,
263 DS * 4, ES * 4, FS * 4, GS * 4
264};
265
266#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
267
bc9540e8
PA
268#define REGSIZE 4
269
d0722149 270#endif
3aee8918
PA
271
272#ifdef __x86_64__
273
274/* Returns true if the current inferior belongs to a x86-64 process,
275 per the tdesc. */
276
277static int
278is_64bit_tdesc (void)
279{
0bfdf32f 280 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3aee8918
PA
281
282 return register_size (regcache->tdesc, 0) == 8;
283}
284
285#endif
286
d0722149
DE
287\f
288/* Called by libthread_db. */
289
290ps_err_e
754653a7 291ps_get_thread_area (struct ps_prochandle *ph,
d0722149
DE
292 lwpid_t lwpid, int idx, void **base)
293{
294#ifdef __x86_64__
3aee8918 295 int use_64bit = is_64bit_tdesc ();
d0722149
DE
296
297 if (use_64bit)
298 {
299 switch (idx)
300 {
301 case FS:
302 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
303 return PS_OK;
304 break;
305 case GS:
306 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
307 return PS_OK;
308 break;
309 default:
310 return PS_BADADDR;
311 }
312 return PS_ERR;
313 }
314#endif
315
316 {
317 unsigned int desc[4];
318
319 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
320 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
321 return PS_ERR;
322
d1ec4ce7
DE
323 /* Ensure we properly extend the value to 64-bits for x86_64. */
324 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
325 return PS_OK;
326 }
327}
fa593d66
PA
328
329/* Get the thread area address. This is used to recognize which
330 thread is which when tracing with the in-process agent library. We
331 don't read anything from the address, and treat it as opaque; it's
332 the address itself that we assume is unique per-thread. */
333
13e567af
TBA
334int
335x86_target::low_get_thread_area (int lwpid, CORE_ADDR *addr)
fa593d66
PA
336{
337#ifdef __x86_64__
3aee8918 338 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
339
340 if (use_64bit)
341 {
342 void *base;
343 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
344 {
345 *addr = (CORE_ADDR) (uintptr_t) base;
346 return 0;
347 }
348
349 return -1;
350 }
351#endif
352
353 {
f2907e49 354 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
d86d4aaf
DE
355 struct thread_info *thr = get_lwp_thread (lwp);
356 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
357 unsigned int desc[4];
358 ULONGEST gs = 0;
359 const int reg_thread_area = 3; /* bits to scale down register value. */
360 int idx;
361
362 collect_register_by_name (regcache, "gs", &gs);
363
364 idx = gs >> reg_thread_area;
365
366 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 367 lwpid_of (thr),
493e2a69 368 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
369 return -1;
370
371 *addr = desc[1];
372 return 0;
373 }
374}
375
376
d0722149 377\f
daca57a7
TBA
378bool
379x86_target::low_cannot_store_register (int regno)
d0722149 380{
3aee8918
PA
381#ifdef __x86_64__
382 if (is_64bit_tdesc ())
daca57a7 383 return false;
3aee8918
PA
384#endif
385
d0722149
DE
386 return regno >= I386_NUM_REGS;
387}
388
daca57a7
TBA
389bool
390x86_target::low_cannot_fetch_register (int regno)
d0722149 391{
3aee8918
PA
392#ifdef __x86_64__
393 if (is_64bit_tdesc ())
daca57a7 394 return false;
3aee8918
PA
395#endif
396
d0722149
DE
397 return regno >= I386_NUM_REGS;
398}
399
400static void
442ea881 401x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
402{
403 int i;
404
405#ifdef __x86_64__
3aee8918 406 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
407 {
408 for (i = 0; i < X86_64_NUM_REGS; i++)
409 if (x86_64_regmap[i] != -1)
442ea881 410 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d 411
d0722149
DE
412 return;
413 }
9e0aa64f
JK
414
415 /* 32-bit inferior registers need to be zero-extended.
416 Callers would read uninitialized memory otherwise. */
417 memset (buf, 0x00, X86_64_USER_REGS * 8);
d0722149
DE
418#endif
419
420 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 421 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 422
442ea881 423 collect_register_by_name (regcache, "orig_eax",
bc9540e8 424 ((char *) buf) + ORIG_EAX * REGSIZE);
3f52fdbc 425
e90a813d 426#ifdef __x86_64__
3f52fdbc
KB
427 /* Sign extend EAX value to avoid potential syscall restart
428 problems.
429
430 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
431 for a detailed explanation. */
432 if (register_size (regcache->tdesc, 0) == 4)
433 {
434 void *ptr = ((gdb_byte *) buf
dda83cd7 435 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
3f52fdbc
KB
436
437 *(int64_t *) ptr = *(int32_t *) ptr;
438 }
e90a813d 439#endif
d0722149
DE
440}
441
442static void
442ea881 443x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
444{
445 int i;
446
447#ifdef __x86_64__
3aee8918 448 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
449 {
450 for (i = 0; i < X86_64_NUM_REGS; i++)
451 if (x86_64_regmap[i] != -1)
442ea881 452 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d 453
d0722149
DE
454 return;
455 }
456#endif
457
458 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 459 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 460
442ea881 461 supply_register_by_name (regcache, "orig_eax",
bc9540e8 462 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
463}
464
465static void
442ea881 466x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
467{
468#ifdef __x86_64__
442ea881 469 i387_cache_to_fxsave (regcache, buf);
d0722149 470#else
442ea881 471 i387_cache_to_fsave (regcache, buf);
d0722149
DE
472#endif
473}
474
475static void
442ea881 476x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
477{
478#ifdef __x86_64__
442ea881 479 i387_fxsave_to_cache (regcache, buf);
d0722149 480#else
442ea881 481 i387_fsave_to_cache (regcache, buf);
d0722149
DE
482#endif
483}
484
485#ifndef __x86_64__
486
487static void
442ea881 488x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 489{
442ea881 490 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
491}
492
493static void
442ea881 494x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 495{
442ea881 496 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
497}
498
499#endif
500
1570b33e
L
501static void
502x86_fill_xstateregset (struct regcache *regcache, void *buf)
503{
504 i387_cache_to_xsave (regcache, buf);
505}
506
507static void
508x86_store_xstateregset (struct regcache *regcache, const void *buf)
509{
510 i387_xsave_to_cache (regcache, buf);
511}
512
d0722149
DE
513/* ??? The non-biarch i386 case stores all the i387 regs twice.
514 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
515 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
516 doesn't work. IWBN to avoid the duplication in the case where it
517 does work. Maybe the arch_setup routine could check whether it works
3aee8918 518 and update the supported regsets accordingly. */
d0722149 519
3aee8918 520static struct regset_info x86_regsets[] =
d0722149
DE
521{
522#ifdef HAVE_PTRACE_GETREGS
1570b33e 523 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
524 GENERAL_REGS,
525 x86_fill_gregset, x86_store_gregset },
1570b33e
L
526 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
527 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
528# ifndef __x86_64__
529# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 530 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
531 EXTENDED_REGS,
532 x86_fill_fpxregset, x86_store_fpxregset },
533# endif
534# endif
1570b33e 535 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
536 FP_REGS,
537 x86_fill_fpregset, x86_store_fpregset },
538#endif /* HAVE_PTRACE_GETREGS */
50bc912a 539 NULL_REGSET
d0722149
DE
540};
541
bf9ae9d8
TBA
542bool
543x86_target::low_supports_breakpoints ()
544{
545 return true;
546}
547
548CORE_ADDR
549x86_target::low_get_pc (regcache *regcache)
d0722149 550{
3aee8918 551 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
552
553 if (use_64bit)
554 {
6598661d
PA
555 uint64_t pc;
556
442ea881 557 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
558 return (CORE_ADDR) pc;
559 }
560 else
561 {
6598661d
PA
562 uint32_t pc;
563
442ea881 564 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
565 return (CORE_ADDR) pc;
566 }
567}
568
bf9ae9d8
TBA
569void
570x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
d0722149 571{
3aee8918 572 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
573
574 if (use_64bit)
575 {
6598661d
PA
576 uint64_t newpc = pc;
577
442ea881 578 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
579 }
580 else
581 {
6598661d
PA
582 uint32_t newpc = pc;
583
442ea881 584 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
585 }
586}
d4807ea2
TBA
587
588int
589x86_target::low_decr_pc_after_break ()
590{
591 return 1;
592}
593
d0722149 594\f
dd373349 595static const gdb_byte x86_breakpoint[] = { 0xCC };
d0722149
DE
596#define x86_breakpoint_len 1
597
d7146cda
TBA
598bool
599x86_target::low_breakpoint_at (CORE_ADDR pc)
d0722149
DE
600{
601 unsigned char c;
602
d7146cda 603 read_memory (pc, &c, 1);
d0722149 604 if (c == 0xCC)
d7146cda 605 return true;
d0722149 606
d7146cda 607 return false;
d0722149
DE
608}
609\f
42995dbd 610/* Low-level function vector. */
df7e5265 611struct x86_dr_low_type x86_dr_low =
42995dbd 612 {
d33472ad
GB
613 x86_linux_dr_set_control,
614 x86_linux_dr_set_addr,
615 x86_linux_dr_get_addr,
616 x86_linux_dr_get_status,
617 x86_linux_dr_get_control,
42995dbd
GB
618 sizeof (void *),
619 };
aa5ca48f 620\f
90d74c30 621/* Breakpoint/Watchpoint support. */
aa5ca48f 622
007c9b97
TBA
623bool
624x86_target::supports_z_point_type (char z_type)
802e8e6d
PA
625{
626 switch (z_type)
627 {
628 case Z_PACKET_SW_BP:
629 case Z_PACKET_HW_BP:
630 case Z_PACKET_WRITE_WP:
631 case Z_PACKET_ACCESS_WP:
007c9b97 632 return true;
802e8e6d 633 default:
007c9b97 634 return false;
802e8e6d
PA
635 }
636}
637
9db9aa23
TBA
638int
639x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
640 int size, raw_breakpoint *bp)
aa5ca48f
DE
641{
642 struct process_info *proc = current_process ();
802e8e6d 643
aa5ca48f
DE
644 switch (type)
645 {
802e8e6d
PA
646 case raw_bkpt_type_hw:
647 case raw_bkpt_type_write_wp:
648 case raw_bkpt_type_access_wp:
a4165e94 649 {
802e8e6d
PA
650 enum target_hw_bp_type hw_type
651 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 652 struct x86_debug_reg_state *state
fe978cb0 653 = &proc->priv->arch_private->debug_reg_state;
a4165e94 654
df7e5265 655 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
a4165e94 656 }
961bd387 657
aa5ca48f
DE
658 default:
659 /* Unsupported. */
660 return 1;
661 }
662}
663
9db9aa23
TBA
664int
665x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
666 int size, raw_breakpoint *bp)
aa5ca48f
DE
667{
668 struct process_info *proc = current_process ();
802e8e6d 669
aa5ca48f
DE
670 switch (type)
671 {
802e8e6d
PA
672 case raw_bkpt_type_hw:
673 case raw_bkpt_type_write_wp:
674 case raw_bkpt_type_access_wp:
a4165e94 675 {
802e8e6d
PA
676 enum target_hw_bp_type hw_type
677 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 678 struct x86_debug_reg_state *state
fe978cb0 679 = &proc->priv->arch_private->debug_reg_state;
a4165e94 680
df7e5265 681 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
a4165e94 682 }
aa5ca48f
DE
683 default:
684 /* Unsupported. */
685 return 1;
686 }
687}
688
ac1bbaca
TBA
689bool
690x86_target::low_stopped_by_watchpoint ()
aa5ca48f
DE
691{
692 struct process_info *proc = current_process ();
fe978cb0 693 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
aa5ca48f
DE
694}
695
ac1bbaca
TBA
696CORE_ADDR
697x86_target::low_stopped_data_address ()
aa5ca48f
DE
698{
699 struct process_info *proc = current_process ();
700 CORE_ADDR addr;
fe978cb0 701 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
df7e5265 702 &addr))
aa5ca48f
DE
703 return addr;
704 return 0;
705}
706\f
707/* Called when a new process is created. */
708
fd000fb3
TBA
709arch_process_info *
710x86_target::low_new_process ()
aa5ca48f 711{
ed859da7 712 struct arch_process_info *info = XCNEW (struct arch_process_info);
aa5ca48f 713
df7e5265 714 x86_low_init_dregs (&info->debug_reg_state);
aa5ca48f
DE
715
716 return info;
717}
718
04ec7890
SM
719/* Called when a process is being deleted. */
720
fd000fb3
TBA
721void
722x86_target::low_delete_process (arch_process_info *info)
04ec7890
SM
723{
724 xfree (info);
725}
726
fd000fb3
TBA
727void
728x86_target::low_new_thread (lwp_info *lwp)
729{
730 /* This comes from nat/. */
731 x86_linux_new_thread (lwp);
732}
3a8a0396 733
fd000fb3
TBA
734void
735x86_target::low_delete_thread (arch_lwp_info *alwp)
736{
737 /* This comes from nat/. */
738 x86_linux_delete_thread (alwp);
739}
740
741/* Target routine for new_fork. */
742
743void
744x86_target::low_new_fork (process_info *parent, process_info *child)
3a8a0396
DB
745{
746 /* These are allocated by linux_add_process. */
747 gdb_assert (parent->priv != NULL
748 && parent->priv->arch_private != NULL);
749 gdb_assert (child->priv != NULL
750 && child->priv->arch_private != NULL);
751
752 /* Linux kernel before 2.6.33 commit
753 72f674d203cd230426437cdcf7dd6f681dad8b0d
754 will inherit hardware debug registers from parent
755 on fork/vfork/clone. Newer Linux kernels create such tasks with
756 zeroed debug registers.
757
758 GDB core assumes the child inherits the watchpoints/hw
759 breakpoints of the parent, and will remove them all from the
760 forked off process. Copy the debug registers mirrors into the
761 new process so that all breakpoints and watchpoints can be
762 removed together. The debug registers mirror will become zeroed
763 in the end before detaching the forked off process, thus making
764 this compatible with older Linux kernels too. */
765
766 *child->priv->arch_private = *parent->priv->arch_private;
767}
768
d7599cc0
TBA
769void
770x86_target::low_prepare_to_resume (lwp_info *lwp)
771{
772 /* This comes from nat/. */
773 x86_linux_prepare_to_resume (lwp);
774}
775
70a0bb6b
GB
776/* See nat/x86-dregs.h. */
777
778struct x86_debug_reg_state *
779x86_debug_reg_state (pid_t pid)
780{
781 struct process_info *proc = find_process_pid (pid);
782
783 return &proc->priv->arch_private->debug_reg_state;
784}
aa5ca48f 785\f
d0722149
DE
786/* When GDBSERVER is built as a 64-bit application on linux, the
787 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
788 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
789 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
790 conversion in-place ourselves. */
791
9cf12d57 792/* Convert a ptrace/host siginfo object, into/from the siginfo in the
d0722149
DE
793 layout of the inferiors' architecture. Returns true if any
794 conversion was done; false otherwise. If DIRECTION is 1, then copy
9cf12d57 795 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
d0722149
DE
796 INF. */
797
cb63de7c
TBA
798bool
799x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
d0722149
DE
800{
801#ifdef __x86_64__
760256f9 802 unsigned int machine;
0bfdf32f 803 int tid = lwpid_of (current_thread);
760256f9
PA
804 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
805
d0722149 806 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 807 if (!is_64bit_tdesc ())
9cf12d57 808 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 809 FIXUP_32);
c92b5177 810 /* No fixup for native x32 GDB. */
760256f9 811 else if (!is_elf64 && sizeof (void *) == 8)
9cf12d57 812 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 813 FIXUP_X32);
d0722149
DE
814#endif
815
cb63de7c 816 return false;
d0722149
DE
817}
818\f
1570b33e
L
819static int use_xml;
820
3aee8918
PA
821/* Format of XSAVE extended state is:
822 struct
823 {
824 fxsave_bytes[0..463]
825 sw_usable_bytes[464..511]
826 xstate_hdr_bytes[512..575]
827 avx_bytes[576..831]
828 future_state etc
829 };
830
831 Same memory layout will be used for the coredump NT_X86_XSTATE
832 representing the XSAVE extended state registers.
833
834 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
835 extended state mask, which is the same as the extended control register
836 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
837 together with the mask saved in the xstate_hdr_bytes to determine what
838 states the processor/OS supports and what state, used or initialized,
839 the process/thread is in. */
840#define I386_LINUX_XSAVE_XCR0_OFFSET 464
841
842/* Does the current host support the GETFPXREGS request? The header
843 file may or may not define it, and even if it is defined, the
844 kernel will return EIO if it's running on a pre-SSE processor. */
845int have_ptrace_getfpxregs =
846#ifdef HAVE_PTRACE_GETFPXREGS
847 -1
848#else
849 0
850#endif
851;
1570b33e 852
3aee8918
PA
853/* Get Linux/x86 target description from running target. */
854
855static const struct target_desc *
856x86_linux_read_description (void)
1570b33e 857{
3aee8918
PA
858 unsigned int machine;
859 int is_elf64;
a196ebeb 860 int xcr0_features;
3aee8918
PA
861 int tid;
862 static uint64_t xcr0;
3a13a53b 863 struct regset_info *regset;
1570b33e 864
0bfdf32f 865 tid = lwpid_of (current_thread);
1570b33e 866
3aee8918 867 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 868
3aee8918 869 if (sizeof (void *) == 4)
3a13a53b 870 {
3aee8918
PA
871 if (is_elf64 > 0)
872 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
873#ifndef __x86_64__
874 else if (machine == EM_X86_64)
875 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
876#endif
877 }
3a13a53b 878
3aee8918
PA
879#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
880 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
881 {
882 elf_fpxregset_t fpxregs;
3a13a53b 883
3aee8918 884 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 885 {
3aee8918
PA
886 have_ptrace_getfpxregs = 0;
887 have_ptrace_getregset = 0;
f49ff000 888 return i386_linux_read_description (X86_XSTATE_X87);
3a13a53b 889 }
3aee8918
PA
890 else
891 have_ptrace_getfpxregs = 1;
3a13a53b 892 }
1570b33e
L
893#endif
894
895 if (!use_xml)
896 {
df7e5265 897 x86_xcr0 = X86_XSTATE_SSE_MASK;
3aee8918 898
1570b33e
L
899 /* Don't use XML. */
900#ifdef __x86_64__
3aee8918 901 if (machine == EM_X86_64)
51a948fd 902 return tdesc_amd64_linux_no_xml.get ();
1570b33e 903 else
1570b33e 904#endif
51a948fd 905 return tdesc_i386_linux_no_xml.get ();
1570b33e
L
906 }
907
1570b33e
L
908 if (have_ptrace_getregset == -1)
909 {
df7e5265 910 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 911 struct iovec iov;
1570b33e
L
912
913 iov.iov_base = xstateregs;
914 iov.iov_len = sizeof (xstateregs);
915
916 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
917 if (ptrace (PTRACE_GETREGSET, tid,
918 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
919 have_ptrace_getregset = 0;
920 else
1570b33e 921 {
3aee8918
PA
922 have_ptrace_getregset = 1;
923
924 /* Get XCR0 from XSAVE extended state. */
925 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
926 / sizeof (uint64_t))];
927
928 /* Use PTRACE_GETREGSET if it is available. */
929 for (regset = x86_regsets;
930 regset->fill_function != NULL; regset++)
931 if (regset->get_request == PTRACE_GETREGSET)
df7e5265 932 regset->size = X86_XSTATE_SIZE (xcr0);
3aee8918
PA
933 else if (regset->type != GENERAL_REGS)
934 regset->size = 0;
1570b33e 935 }
1570b33e
L
936 }
937
3aee8918 938 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb 939 xcr0_features = (have_ptrace_getregset
2e1e43e1 940 && (xcr0 & X86_XSTATE_ALL_MASK));
3aee8918 941
a196ebeb 942 if (xcr0_features)
3aee8918 943 x86_xcr0 = xcr0;
1570b33e 944
3aee8918
PA
945 if (machine == EM_X86_64)
946 {
1570b33e 947#ifdef __x86_64__
b4570e4b 948 const target_desc *tdesc = NULL;
a196ebeb 949
b4570e4b 950 if (xcr0_features)
3aee8918 951 {
b4570e4b
YQ
952 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
953 !is_elf64);
1570b33e 954 }
b4570e4b
YQ
955
956 if (tdesc == NULL)
957 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
958 return tdesc;
3aee8918 959#endif
1570b33e 960 }
3aee8918
PA
961 else
962 {
f49ff000 963 const target_desc *tdesc = NULL;
a1fa17ee 964
f49ff000
YQ
965 if (xcr0_features)
966 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
2b863f51 967
f49ff000
YQ
968 if (tdesc == NULL)
969 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
a196ebeb 970
f49ff000 971 return tdesc;
3aee8918
PA
972 }
973
974 gdb_assert_not_reached ("failed to return tdesc");
975}
976
3aee8918
PA
977/* Update all the target description of all processes; a new GDB
978 connected, and it may or not support xml target descriptions. */
979
797bcff5
TBA
980void
981x86_target::update_xmltarget ()
3aee8918 982{
0bfdf32f 983 struct thread_info *saved_thread = current_thread;
3aee8918
PA
984
985 /* Before changing the register cache's internal layout, flush the
986 contents of the current valid caches back to the threads, and
987 release the current regcache objects. */
988 regcache_release ();
989
797bcff5 990 for_each_process ([this] (process_info *proc) {
9179355e
SM
991 int pid = proc->pid;
992
993 /* Look up any thread of this process. */
994 current_thread = find_any_thread_of_pid (pid);
995
797bcff5 996 low_arch_setup ();
9179355e 997 });
3aee8918 998
0bfdf32f 999 current_thread = saved_thread;
1570b33e
L
1000}
1001
1002/* Process qSupported query, "xmlRegisters=". Update the buffer size for
1003 PTRACE_GETREGSET. */
1004
a5b5da92 1005void
b315b67d 1006x86_target::process_qsupported (gdb::array_view<const char * const> features)
1570b33e
L
1007{
1008 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1009 with "i386" in qSupported query, it supports x86 XML target
1010 descriptions. */
1011 use_xml = 0;
1570b33e 1012
b315b67d
SM
1013 for (const char *feature : features)
1014 {
06e03fff 1015 if (startswith (feature, "xmlRegisters="))
1570b33e 1016 {
06e03fff 1017 char *copy = xstrdup (feature + 13);
06e03fff 1018
ca3a04f6
CB
1019 char *saveptr;
1020 for (char *p = strtok_r (copy, ",", &saveptr);
1021 p != NULL;
1022 p = strtok_r (NULL, ",", &saveptr))
1570b33e 1023 {
06e03fff
PA
1024 if (strcmp (p, "i386") == 0)
1025 {
1026 use_xml = 1;
1027 break;
1028 }
1570b33e 1029 }
1570b33e 1030
06e03fff
PA
1031 free (copy);
1032 }
1570b33e 1033 }
b315b67d 1034
a5b5da92 1035 update_xmltarget ();
1570b33e
L
1036}
1037
3aee8918 1038/* Common for x86/x86-64. */
d0722149 1039
3aee8918
PA
1040static struct regsets_info x86_regsets_info =
1041 {
1042 x86_regsets, /* regsets */
1043 0, /* num_regsets */
1044 NULL, /* disabled_regsets */
1045 };
214d508e
L
1046
1047#ifdef __x86_64__
3aee8918
PA
1048static struct regs_info amd64_linux_regs_info =
1049 {
1050 NULL, /* regset_bitmap */
1051 NULL, /* usrregs_info */
1052 &x86_regsets_info
1053 };
d0722149 1054#endif
3aee8918
PA
1055static struct usrregs_info i386_linux_usrregs_info =
1056 {
1057 I386_NUM_REGS,
1058 i386_regmap,
1059 };
d0722149 1060
3aee8918
PA
1061static struct regs_info i386_linux_regs_info =
1062 {
1063 NULL, /* regset_bitmap */
1064 &i386_linux_usrregs_info,
1065 &x86_regsets_info
1066 };
d0722149 1067
aa8d21c9
TBA
1068const regs_info *
1069x86_target::get_regs_info ()
3aee8918
PA
1070{
1071#ifdef __x86_64__
1072 if (is_64bit_tdesc ())
1073 return &amd64_linux_regs_info;
1074 else
1075#endif
1076 return &i386_linux_regs_info;
1077}
d0722149 1078
3aee8918
PA
1079/* Initialize the target description for the architecture of the
1080 inferior. */
1570b33e 1081
797bcff5
TBA
1082void
1083x86_target::low_arch_setup ()
3aee8918
PA
1084{
1085 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1086}
1087
9eedd27d
TBA
1088bool
1089x86_target::low_supports_catch_syscall ()
1090{
1091 return true;
1092}
1093
82075af2
JS
1094/* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1095 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1096
9eedd27d
TBA
1097void
1098x86_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
82075af2
JS
1099{
1100 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1101
1102 if (use_64bit)
1103 {
1104 long l_sysno;
82075af2
JS
1105
1106 collect_register_by_name (regcache, "orig_rax", &l_sysno);
82075af2 1107 *sysno = (int) l_sysno;
82075af2
JS
1108 }
1109 else
4cc32bec 1110 collect_register_by_name (regcache, "orig_eax", sysno);
82075af2
JS
1111}
1112
47f70aa7
TBA
1113bool
1114x86_target::supports_tracepoints ()
219f2f23 1115{
47f70aa7 1116 return true;
219f2f23
PA
1117}
1118
fa593d66
PA
1119static void
1120append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1121{
4196ab2a 1122 target_write_memory (*to, buf, len);
fa593d66
PA
1123 *to += len;
1124}
1125
1126static int
a121b7c1 1127push_opcode (unsigned char *buf, const char *op)
fa593d66
PA
1128{
1129 unsigned char *buf_org = buf;
1130
1131 while (1)
1132 {
1133 char *endptr;
1134 unsigned long ul = strtoul (op, &endptr, 16);
1135
1136 if (endptr == op)
1137 break;
1138
1139 *buf++ = ul;
1140 op = endptr;
1141 }
1142
1143 return buf - buf_org;
1144}
1145
1146#ifdef __x86_64__
1147
1148/* Build a jump pad that saves registers and calls a collection
1149 function. Writes a jump instruction to the jump pad to
1150 JJUMPAD_INSN. The caller is responsible to write it in at the
1151 tracepoint address. */
1152
1153static int
1154amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1155 CORE_ADDR collector,
1156 CORE_ADDR lockaddr,
1157 ULONGEST orig_size,
1158 CORE_ADDR *jump_entry,
405f8e94
SS
1159 CORE_ADDR *trampoline,
1160 ULONGEST *trampoline_size,
fa593d66
PA
1161 unsigned char *jjump_pad_insn,
1162 ULONGEST *jjump_pad_insn_size,
1163 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1164 CORE_ADDR *adjusted_insn_addr_end,
1165 char *err)
fa593d66
PA
1166{
1167 unsigned char buf[40];
1168 int i, offset;
f4647387
YQ
1169 int64_t loffset;
1170
fa593d66
PA
1171 CORE_ADDR buildaddr = *jump_entry;
1172
1173 /* Build the jump pad. */
1174
1175 /* First, do tracepoint data collection. Save registers. */
1176 i = 0;
1177 /* Need to ensure stack pointer saved first. */
1178 buf[i++] = 0x54; /* push %rsp */
1179 buf[i++] = 0x55; /* push %rbp */
1180 buf[i++] = 0x57; /* push %rdi */
1181 buf[i++] = 0x56; /* push %rsi */
1182 buf[i++] = 0x52; /* push %rdx */
1183 buf[i++] = 0x51; /* push %rcx */
1184 buf[i++] = 0x53; /* push %rbx */
1185 buf[i++] = 0x50; /* push %rax */
1186 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1187 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1188 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1189 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1190 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1191 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1192 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1193 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1194 buf[i++] = 0x9c; /* pushfq */
c8ef42ee 1195 buf[i++] = 0x48; /* movabs <addr>,%rdi */
fa593d66 1196 buf[i++] = 0xbf;
c8ef42ee
PA
1197 memcpy (buf + i, &tpaddr, 8);
1198 i += 8;
fa593d66
PA
1199 buf[i++] = 0x57; /* push %rdi */
1200 append_insns (&buildaddr, i, buf);
1201
1202 /* Stack space for the collecting_t object. */
1203 i = 0;
1204 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1205 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1206 memcpy (buf + i, &tpoint, 8);
1207 i += 8;
1208 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1209 i += push_opcode (&buf[i],
1210 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1211 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1212 append_insns (&buildaddr, i, buf);
1213
1214 /* spin-lock. */
1215 i = 0;
1216 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1217 memcpy (&buf[i], (void *) &lockaddr, 8);
1218 i += 8;
1219 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1220 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1221 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1222 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1223 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1224 append_insns (&buildaddr, i, buf);
1225
1226 /* Set up the gdb_collect call. */
1227 /* At this point, (stack pointer + 0x18) is the base of our saved
1228 register block. */
1229
1230 i = 0;
1231 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1232 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1233
1234 /* tpoint address may be 64-bit wide. */
1235 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1236 memcpy (buf + i, &tpoint, 8);
1237 i += 8;
1238 append_insns (&buildaddr, i, buf);
1239
1240 /* The collector function being in the shared library, may be
1241 >31-bits away off the jump pad. */
1242 i = 0;
1243 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1244 memcpy (buf + i, &collector, 8);
1245 i += 8;
1246 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1247 append_insns (&buildaddr, i, buf);
1248
1249 /* Clear the spin-lock. */
1250 i = 0;
1251 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1252 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1253 memcpy (buf + i, &lockaddr, 8);
1254 i += 8;
1255 append_insns (&buildaddr, i, buf);
1256
1257 /* Remove stack that had been used for the collect_t object. */
1258 i = 0;
1259 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1260 append_insns (&buildaddr, i, buf);
1261
1262 /* Restore register state. */
1263 i = 0;
1264 buf[i++] = 0x48; /* add $0x8,%rsp */
1265 buf[i++] = 0x83;
1266 buf[i++] = 0xc4;
1267 buf[i++] = 0x08;
1268 buf[i++] = 0x9d; /* popfq */
1269 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1270 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1271 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1272 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1273 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1274 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1275 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1276 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1277 buf[i++] = 0x58; /* pop %rax */
1278 buf[i++] = 0x5b; /* pop %rbx */
1279 buf[i++] = 0x59; /* pop %rcx */
1280 buf[i++] = 0x5a; /* pop %rdx */
1281 buf[i++] = 0x5e; /* pop %rsi */
1282 buf[i++] = 0x5f; /* pop %rdi */
1283 buf[i++] = 0x5d; /* pop %rbp */
1284 buf[i++] = 0x5c; /* pop %rsp */
1285 append_insns (&buildaddr, i, buf);
1286
1287 /* Now, adjust the original instruction to execute in the jump
1288 pad. */
1289 *adjusted_insn_addr = buildaddr;
1290 relocate_instruction (&buildaddr, tpaddr);
1291 *adjusted_insn_addr_end = buildaddr;
1292
1293 /* Finally, write a jump back to the program. */
f4647387
YQ
1294
1295 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1296 if (loffset > INT_MAX || loffset < INT_MIN)
1297 {
1298 sprintf (err,
1299 "E.Jump back from jump pad too far from tracepoint "
1300 "(offset 0x%" PRIx64 " > int32).", loffset);
1301 return 1;
1302 }
1303
1304 offset = (int) loffset;
fa593d66
PA
1305 memcpy (buf, jump_insn, sizeof (jump_insn));
1306 memcpy (buf + 1, &offset, 4);
1307 append_insns (&buildaddr, sizeof (jump_insn), buf);
1308
1309 /* The jump pad is now built. Wire in a jump to our jump pad. This
1310 is always done last (by our caller actually), so that we can
1311 install fast tracepoints with threads running. This relies on
1312 the agent's atomic write support. */
f4647387
YQ
1313 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1314 if (loffset > INT_MAX || loffset < INT_MIN)
1315 {
1316 sprintf (err,
1317 "E.Jump pad too far from tracepoint "
1318 "(offset 0x%" PRIx64 " > int32).", loffset);
1319 return 1;
1320 }
1321
1322 offset = (int) loffset;
1323
fa593d66
PA
1324 memcpy (buf, jump_insn, sizeof (jump_insn));
1325 memcpy (buf + 1, &offset, 4);
1326 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1327 *jjump_pad_insn_size = sizeof (jump_insn);
1328
1329 /* Return the end address of our pad. */
1330 *jump_entry = buildaddr;
1331
1332 return 0;
1333}
1334
1335#endif /* __x86_64__ */
1336
1337/* Build a jump pad that saves registers and calls a collection
1338 function. Writes a jump instruction to the jump pad to
1339 JJUMPAD_INSN. The caller is responsible to write it in at the
1340 tracepoint address. */
1341
1342static int
1343i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1344 CORE_ADDR collector,
1345 CORE_ADDR lockaddr,
1346 ULONGEST orig_size,
1347 CORE_ADDR *jump_entry,
405f8e94
SS
1348 CORE_ADDR *trampoline,
1349 ULONGEST *trampoline_size,
fa593d66
PA
1350 unsigned char *jjump_pad_insn,
1351 ULONGEST *jjump_pad_insn_size,
1352 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1353 CORE_ADDR *adjusted_insn_addr_end,
1354 char *err)
fa593d66
PA
1355{
1356 unsigned char buf[0x100];
1357 int i, offset;
1358 CORE_ADDR buildaddr = *jump_entry;
1359
1360 /* Build the jump pad. */
1361
1362 /* First, do tracepoint data collection. Save registers. */
1363 i = 0;
1364 buf[i++] = 0x60; /* pushad */
1365 buf[i++] = 0x68; /* push tpaddr aka $pc */
1366 *((int *)(buf + i)) = (int) tpaddr;
1367 i += 4;
1368 buf[i++] = 0x9c; /* pushf */
1369 buf[i++] = 0x1e; /* push %ds */
1370 buf[i++] = 0x06; /* push %es */
1371 buf[i++] = 0x0f; /* push %fs */
1372 buf[i++] = 0xa0;
1373 buf[i++] = 0x0f; /* push %gs */
1374 buf[i++] = 0xa8;
1375 buf[i++] = 0x16; /* push %ss */
1376 buf[i++] = 0x0e; /* push %cs */
1377 append_insns (&buildaddr, i, buf);
1378
1379 /* Stack space for the collecting_t object. */
1380 i = 0;
1381 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1382
1383 /* Build the object. */
1384 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1385 memcpy (buf + i, &tpoint, 4);
1386 i += 4;
1387 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1388
1389 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1390 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1391 append_insns (&buildaddr, i, buf);
1392
1393 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1394 If we cared for it, this could be using xchg alternatively. */
1395
1396 i = 0;
1397 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1398 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1399 %esp,<lockaddr> */
1400 memcpy (&buf[i], (void *) &lockaddr, 4);
1401 i += 4;
1402 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1403 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1404 append_insns (&buildaddr, i, buf);
1405
1406
1407 /* Set up arguments to the gdb_collect call. */
1408 i = 0;
1409 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1410 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1411 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1412 append_insns (&buildaddr, i, buf);
1413
1414 i = 0;
1415 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1416 append_insns (&buildaddr, i, buf);
1417
1418 i = 0;
1419 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1420 memcpy (&buf[i], (void *) &tpoint, 4);
1421 i += 4;
1422 append_insns (&buildaddr, i, buf);
1423
1424 buf[0] = 0xe8; /* call <reladdr> */
1425 offset = collector - (buildaddr + sizeof (jump_insn));
1426 memcpy (buf + 1, &offset, 4);
1427 append_insns (&buildaddr, 5, buf);
1428 /* Clean up after the call. */
1429 buf[0] = 0x83; /* add $0x8,%esp */
1430 buf[1] = 0xc4;
1431 buf[2] = 0x08;
1432 append_insns (&buildaddr, 3, buf);
1433
1434
1435 /* Clear the spin-lock. This would need the LOCK prefix on older
1436 broken archs. */
1437 i = 0;
1438 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1439 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1440 memcpy (buf + i, &lockaddr, 4);
1441 i += 4;
1442 append_insns (&buildaddr, i, buf);
1443
1444
1445 /* Remove stack that had been used for the collect_t object. */
1446 i = 0;
1447 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1448 append_insns (&buildaddr, i, buf);
1449
1450 i = 0;
1451 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1452 buf[i++] = 0xc4;
1453 buf[i++] = 0x04;
1454 buf[i++] = 0x17; /* pop %ss */
1455 buf[i++] = 0x0f; /* pop %gs */
1456 buf[i++] = 0xa9;
1457 buf[i++] = 0x0f; /* pop %fs */
1458 buf[i++] = 0xa1;
1459 buf[i++] = 0x07; /* pop %es */
405f8e94 1460 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1461 buf[i++] = 0x9d; /* popf */
1462 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1463 buf[i++] = 0xc4;
1464 buf[i++] = 0x04;
1465 buf[i++] = 0x61; /* popad */
1466 append_insns (&buildaddr, i, buf);
1467
1468 /* Now, adjust the original instruction to execute in the jump
1469 pad. */
1470 *adjusted_insn_addr = buildaddr;
1471 relocate_instruction (&buildaddr, tpaddr);
1472 *adjusted_insn_addr_end = buildaddr;
1473
1474 /* Write the jump back to the program. */
1475 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1476 memcpy (buf, jump_insn, sizeof (jump_insn));
1477 memcpy (buf + 1, &offset, 4);
1478 append_insns (&buildaddr, sizeof (jump_insn), buf);
1479
1480 /* The jump pad is now built. Wire in a jump to our jump pad. This
1481 is always done last (by our caller actually), so that we can
1482 install fast tracepoints with threads running. This relies on
1483 the agent's atomic write support. */
405f8e94
SS
1484 if (orig_size == 4)
1485 {
1486 /* Create a trampoline. */
1487 *trampoline_size = sizeof (jump_insn);
1488 if (!claim_trampoline_space (*trampoline_size, trampoline))
1489 {
1490 /* No trampoline space available. */
1491 strcpy (err,
1492 "E.Cannot allocate trampoline space needed for fast "
1493 "tracepoints on 4-byte instructions.");
1494 return 1;
1495 }
1496
1497 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1498 memcpy (buf, jump_insn, sizeof (jump_insn));
1499 memcpy (buf + 1, &offset, 4);
4196ab2a 1500 target_write_memory (*trampoline, buf, sizeof (jump_insn));
405f8e94
SS
1501
1502 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1503 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1504 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1505 memcpy (buf + 2, &offset, 2);
1506 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1507 *jjump_pad_insn_size = sizeof (small_jump_insn);
1508 }
1509 else
1510 {
1511 /* Else use a 32-bit relative jump instruction. */
1512 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1513 memcpy (buf, jump_insn, sizeof (jump_insn));
1514 memcpy (buf + 1, &offset, 4);
1515 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1516 *jjump_pad_insn_size = sizeof (jump_insn);
1517 }
fa593d66
PA
1518
1519 /* Return the end address of our pad. */
1520 *jump_entry = buildaddr;
1521
1522 return 0;
1523}
1524
809a0c35
TBA
1525bool
1526x86_target::supports_fast_tracepoints ()
1527{
1528 return true;
1529}
1530
1531int
1532x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1533 CORE_ADDR tpaddr,
1534 CORE_ADDR collector,
1535 CORE_ADDR lockaddr,
1536 ULONGEST orig_size,
1537 CORE_ADDR *jump_entry,
1538 CORE_ADDR *trampoline,
1539 ULONGEST *trampoline_size,
1540 unsigned char *jjump_pad_insn,
1541 ULONGEST *jjump_pad_insn_size,
1542 CORE_ADDR *adjusted_insn_addr,
1543 CORE_ADDR *adjusted_insn_addr_end,
1544 char *err)
fa593d66
PA
1545{
1546#ifdef __x86_64__
3aee8918 1547 if (is_64bit_tdesc ())
fa593d66
PA
1548 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1549 collector, lockaddr,
1550 orig_size, jump_entry,
405f8e94 1551 trampoline, trampoline_size,
fa593d66
PA
1552 jjump_pad_insn,
1553 jjump_pad_insn_size,
1554 adjusted_insn_addr,
405f8e94
SS
1555 adjusted_insn_addr_end,
1556 err);
fa593d66
PA
1557#endif
1558
1559 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1560 collector, lockaddr,
1561 orig_size, jump_entry,
405f8e94 1562 trampoline, trampoline_size,
fa593d66
PA
1563 jjump_pad_insn,
1564 jjump_pad_insn_size,
1565 adjusted_insn_addr,
405f8e94
SS
1566 adjusted_insn_addr_end,
1567 err);
1568}
1569
1570/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1571 architectures. */
1572
809a0c35
TBA
1573int
1574x86_target::get_min_fast_tracepoint_insn_len ()
405f8e94
SS
1575{
1576 static int warned_about_fast_tracepoints = 0;
1577
1578#ifdef __x86_64__
1579 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1580 used for fast tracepoints. */
3aee8918 1581 if (is_64bit_tdesc ())
405f8e94
SS
1582 return 5;
1583#endif
1584
58b4daa5 1585 if (agent_loaded_p ())
405f8e94
SS
1586 {
1587 char errbuf[IPA_BUFSIZ];
1588
1589 errbuf[0] = '\0';
1590
1591 /* On x86, if trampolines are available, then 4-byte jump instructions
1592 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1593 with a 4-byte offset are used instead. */
1594 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1595 return 4;
1596 else
1597 {
1598 /* GDB has no channel to explain to user why a shorter fast
1599 tracepoint is not possible, but at least make GDBserver
1600 mention that something has gone awry. */
1601 if (!warned_about_fast_tracepoints)
1602 {
422186a9 1603 warning ("4-byte fast tracepoints not available; %s", errbuf);
405f8e94
SS
1604 warned_about_fast_tracepoints = 1;
1605 }
1606 return 5;
1607 }
1608 }
1609 else
1610 {
1611 /* Indicate that the minimum length is currently unknown since the IPA
1612 has not loaded yet. */
1613 return 0;
1614 }
fa593d66
PA
1615}
1616
6a271cae
PA
1617static void
1618add_insns (unsigned char *start, int len)
1619{
1620 CORE_ADDR buildaddr = current_insn_ptr;
1621
1622 if (debug_threads)
87ce2a04
DE
1623 debug_printf ("Adding %d bytes of insn at %s\n",
1624 len, paddress (buildaddr));
6a271cae
PA
1625
1626 append_insns (&buildaddr, len, start);
1627 current_insn_ptr = buildaddr;
1628}
1629
6a271cae
PA
1630/* Our general strategy for emitting code is to avoid specifying raw
1631 bytes whenever possible, and instead copy a block of inline asm
1632 that is embedded in the function. This is a little messy, because
1633 we need to keep the compiler from discarding what looks like dead
1634 code, plus suppress various warnings. */
1635
9e4344e5
PA
1636#define EMIT_ASM(NAME, INSNS) \
1637 do \
1638 { \
1639 extern unsigned char start_ ## NAME, end_ ## NAME; \
1640 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1641 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1642 "\t" "start_" #NAME ":" \
1643 "\t" INSNS "\n" \
1644 "\t" "end_" #NAME ":"); \
1645 } while (0)
6a271cae
PA
1646
1647#ifdef __x86_64__
1648
1649#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1650 do \
1651 { \
1652 extern unsigned char start_ ## NAME, end_ ## NAME; \
1653 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1654 __asm__ (".code32\n" \
1655 "\t" "jmp end_" #NAME "\n" \
1656 "\t" "start_" #NAME ":\n" \
1657 "\t" INSNS "\n" \
1658 "\t" "end_" #NAME ":\n" \
1659 ".code64\n"); \
1660 } while (0)
6a271cae
PA
1661
1662#else
1663
1664#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1665
1666#endif
1667
1668#ifdef __x86_64__
1669
1670static void
1671amd64_emit_prologue (void)
1672{
1673 EMIT_ASM (amd64_prologue,
1674 "pushq %rbp\n\t"
1675 "movq %rsp,%rbp\n\t"
1676 "sub $0x20,%rsp\n\t"
1677 "movq %rdi,-8(%rbp)\n\t"
1678 "movq %rsi,-16(%rbp)");
1679}
1680
1681
1682static void
1683amd64_emit_epilogue (void)
1684{
1685 EMIT_ASM (amd64_epilogue,
1686 "movq -16(%rbp),%rdi\n\t"
1687 "movq %rax,(%rdi)\n\t"
1688 "xor %rax,%rax\n\t"
1689 "leave\n\t"
1690 "ret");
1691}
1692
1693static void
1694amd64_emit_add (void)
1695{
1696 EMIT_ASM (amd64_add,
1697 "add (%rsp),%rax\n\t"
1698 "lea 0x8(%rsp),%rsp");
1699}
1700
1701static void
1702amd64_emit_sub (void)
1703{
1704 EMIT_ASM (amd64_sub,
1705 "sub %rax,(%rsp)\n\t"
1706 "pop %rax");
1707}
1708
1709static void
1710amd64_emit_mul (void)
1711{
1712 emit_error = 1;
1713}
1714
1715static void
1716amd64_emit_lsh (void)
1717{
1718 emit_error = 1;
1719}
1720
1721static void
1722amd64_emit_rsh_signed (void)
1723{
1724 emit_error = 1;
1725}
1726
1727static void
1728amd64_emit_rsh_unsigned (void)
1729{
1730 emit_error = 1;
1731}
1732
1733static void
1734amd64_emit_ext (int arg)
1735{
1736 switch (arg)
1737 {
1738 case 8:
1739 EMIT_ASM (amd64_ext_8,
1740 "cbtw\n\t"
1741 "cwtl\n\t"
1742 "cltq");
1743 break;
1744 case 16:
1745 EMIT_ASM (amd64_ext_16,
1746 "cwtl\n\t"
1747 "cltq");
1748 break;
1749 case 32:
1750 EMIT_ASM (amd64_ext_32,
1751 "cltq");
1752 break;
1753 default:
1754 emit_error = 1;
1755 }
1756}
1757
1758static void
1759amd64_emit_log_not (void)
1760{
1761 EMIT_ASM (amd64_log_not,
1762 "test %rax,%rax\n\t"
1763 "sete %cl\n\t"
1764 "movzbq %cl,%rax");
1765}
1766
1767static void
1768amd64_emit_bit_and (void)
1769{
1770 EMIT_ASM (amd64_and,
1771 "and (%rsp),%rax\n\t"
1772 "lea 0x8(%rsp),%rsp");
1773}
1774
1775static void
1776amd64_emit_bit_or (void)
1777{
1778 EMIT_ASM (amd64_or,
1779 "or (%rsp),%rax\n\t"
1780 "lea 0x8(%rsp),%rsp");
1781}
1782
1783static void
1784amd64_emit_bit_xor (void)
1785{
1786 EMIT_ASM (amd64_xor,
1787 "xor (%rsp),%rax\n\t"
1788 "lea 0x8(%rsp),%rsp");
1789}
1790
1791static void
1792amd64_emit_bit_not (void)
1793{
1794 EMIT_ASM (amd64_bit_not,
1795 "xorq $0xffffffffffffffff,%rax");
1796}
1797
1798static void
1799amd64_emit_equal (void)
1800{
1801 EMIT_ASM (amd64_equal,
1802 "cmp %rax,(%rsp)\n\t"
1803 "je .Lamd64_equal_true\n\t"
1804 "xor %rax,%rax\n\t"
1805 "jmp .Lamd64_equal_end\n\t"
1806 ".Lamd64_equal_true:\n\t"
1807 "mov $0x1,%rax\n\t"
1808 ".Lamd64_equal_end:\n\t"
1809 "lea 0x8(%rsp),%rsp");
1810}
1811
1812static void
1813amd64_emit_less_signed (void)
1814{
1815 EMIT_ASM (amd64_less_signed,
1816 "cmp %rax,(%rsp)\n\t"
1817 "jl .Lamd64_less_signed_true\n\t"
1818 "xor %rax,%rax\n\t"
1819 "jmp .Lamd64_less_signed_end\n\t"
1820 ".Lamd64_less_signed_true:\n\t"
1821 "mov $1,%rax\n\t"
1822 ".Lamd64_less_signed_end:\n\t"
1823 "lea 0x8(%rsp),%rsp");
1824}
1825
1826static void
1827amd64_emit_less_unsigned (void)
1828{
1829 EMIT_ASM (amd64_less_unsigned,
1830 "cmp %rax,(%rsp)\n\t"
1831 "jb .Lamd64_less_unsigned_true\n\t"
1832 "xor %rax,%rax\n\t"
1833 "jmp .Lamd64_less_unsigned_end\n\t"
1834 ".Lamd64_less_unsigned_true:\n\t"
1835 "mov $1,%rax\n\t"
1836 ".Lamd64_less_unsigned_end:\n\t"
1837 "lea 0x8(%rsp),%rsp");
1838}
1839
1840static void
1841amd64_emit_ref (int size)
1842{
1843 switch (size)
1844 {
1845 case 1:
1846 EMIT_ASM (amd64_ref1,
1847 "movb (%rax),%al");
1848 break;
1849 case 2:
1850 EMIT_ASM (amd64_ref2,
1851 "movw (%rax),%ax");
1852 break;
1853 case 4:
1854 EMIT_ASM (amd64_ref4,
1855 "movl (%rax),%eax");
1856 break;
1857 case 8:
1858 EMIT_ASM (amd64_ref8,
1859 "movq (%rax),%rax");
1860 break;
1861 }
1862}
1863
1864static void
1865amd64_emit_if_goto (int *offset_p, int *size_p)
1866{
1867 EMIT_ASM (amd64_if_goto,
1868 "mov %rax,%rcx\n\t"
1869 "pop %rax\n\t"
1870 "cmp $0,%rcx\n\t"
1871 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1872 if (offset_p)
1873 *offset_p = 10;
1874 if (size_p)
1875 *size_p = 4;
1876}
1877
1878static void
1879amd64_emit_goto (int *offset_p, int *size_p)
1880{
1881 EMIT_ASM (amd64_goto,
1882 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1883 if (offset_p)
1884 *offset_p = 1;
1885 if (size_p)
1886 *size_p = 4;
1887}
1888
1889static void
1890amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1891{
1892 int diff = (to - (from + size));
1893 unsigned char buf[sizeof (int)];
1894
1895 if (size != 4)
1896 {
1897 emit_error = 1;
1898 return;
1899 }
1900
1901 memcpy (buf, &diff, sizeof (int));
4196ab2a 1902 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
1903}
1904
1905static void
4e29fb54 1906amd64_emit_const (LONGEST num)
6a271cae
PA
1907{
1908 unsigned char buf[16];
1909 int i;
1910 CORE_ADDR buildaddr = current_insn_ptr;
1911
1912 i = 0;
1913 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 1914 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
1915 i += 8;
1916 append_insns (&buildaddr, i, buf);
1917 current_insn_ptr = buildaddr;
1918}
1919
1920static void
1921amd64_emit_call (CORE_ADDR fn)
1922{
1923 unsigned char buf[16];
1924 int i;
1925 CORE_ADDR buildaddr;
4e29fb54 1926 LONGEST offset64;
6a271cae
PA
1927
1928 /* The destination function being in the shared library, may be
1929 >31-bits away off the compiled code pad. */
1930
1931 buildaddr = current_insn_ptr;
1932
1933 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1934
1935 i = 0;
1936
1937 if (offset64 > INT_MAX || offset64 < INT_MIN)
1938 {
1939 /* Offset is too large for a call. Use callq, but that requires
1940 a register, so avoid it if possible. Use r10, since it is
1941 call-clobbered, we don't have to push/pop it. */
1942 buf[i++] = 0x48; /* mov $fn,%r10 */
1943 buf[i++] = 0xba;
1944 memcpy (buf + i, &fn, 8);
1945 i += 8;
1946 buf[i++] = 0xff; /* callq *%r10 */
1947 buf[i++] = 0xd2;
1948 }
1949 else
1950 {
1951 int offset32 = offset64; /* we know we can't overflow here. */
ed036b40
PA
1952
1953 buf[i++] = 0xe8; /* call <reladdr> */
6a271cae
PA
1954 memcpy (buf + i, &offset32, 4);
1955 i += 4;
1956 }
1957
1958 append_insns (&buildaddr, i, buf);
1959 current_insn_ptr = buildaddr;
1960}
1961
1962static void
1963amd64_emit_reg (int reg)
1964{
1965 unsigned char buf[16];
1966 int i;
1967 CORE_ADDR buildaddr;
1968
1969 /* Assume raw_regs is still in %rdi. */
1970 buildaddr = current_insn_ptr;
1971 i = 0;
1972 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 1973 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
1974 i += 4;
1975 append_insns (&buildaddr, i, buf);
1976 current_insn_ptr = buildaddr;
1977 amd64_emit_call (get_raw_reg_func_addr ());
1978}
1979
1980static void
1981amd64_emit_pop (void)
1982{
1983 EMIT_ASM (amd64_pop,
1984 "pop %rax");
1985}
1986
1987static void
1988amd64_emit_stack_flush (void)
1989{
1990 EMIT_ASM (amd64_stack_flush,
1991 "push %rax");
1992}
1993
1994static void
1995amd64_emit_zero_ext (int arg)
1996{
1997 switch (arg)
1998 {
1999 case 8:
2000 EMIT_ASM (amd64_zero_ext_8,
2001 "and $0xff,%rax");
2002 break;
2003 case 16:
2004 EMIT_ASM (amd64_zero_ext_16,
2005 "and $0xffff,%rax");
2006 break;
2007 case 32:
2008 EMIT_ASM (amd64_zero_ext_32,
2009 "mov $0xffffffff,%rcx\n\t"
2010 "and %rcx,%rax");
2011 break;
2012 default:
2013 emit_error = 1;
2014 }
2015}
2016
2017static void
2018amd64_emit_swap (void)
2019{
2020 EMIT_ASM (amd64_swap,
2021 "mov %rax,%rcx\n\t"
2022 "pop %rax\n\t"
2023 "push %rcx");
2024}
2025
2026static void
2027amd64_emit_stack_adjust (int n)
2028{
2029 unsigned char buf[16];
2030 int i;
2031 CORE_ADDR buildaddr = current_insn_ptr;
2032
2033 i = 0;
2034 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2035 buf[i++] = 0x8d;
2036 buf[i++] = 0x64;
2037 buf[i++] = 0x24;
2038 /* This only handles adjustments up to 16, but we don't expect any more. */
2039 buf[i++] = n * 8;
2040 append_insns (&buildaddr, i, buf);
2041 current_insn_ptr = buildaddr;
2042}
2043
2044/* FN's prototype is `LONGEST(*fn)(int)'. */
2045
2046static void
2047amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2048{
2049 unsigned char buf[16];
2050 int i;
2051 CORE_ADDR buildaddr;
2052
2053 buildaddr = current_insn_ptr;
2054 i = 0;
2055 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2056 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2057 i += 4;
2058 append_insns (&buildaddr, i, buf);
2059 current_insn_ptr = buildaddr;
2060 amd64_emit_call (fn);
2061}
2062
4e29fb54 2063/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2064
2065static void
2066amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2067{
2068 unsigned char buf[16];
2069 int i;
2070 CORE_ADDR buildaddr;
2071
2072 buildaddr = current_insn_ptr;
2073 i = 0;
2074 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2075 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2076 i += 4;
2077 append_insns (&buildaddr, i, buf);
2078 current_insn_ptr = buildaddr;
2079 EMIT_ASM (amd64_void_call_2_a,
2080 /* Save away a copy of the stack top. */
2081 "push %rax\n\t"
2082 /* Also pass top as the second argument. */
2083 "mov %rax,%rsi");
2084 amd64_emit_call (fn);
2085 EMIT_ASM (amd64_void_call_2_b,
2086 /* Restore the stack top, %rax may have been trashed. */
2087 "pop %rax");
2088}
2089
df4a0200 2090static void
6b9801d4
SS
2091amd64_emit_eq_goto (int *offset_p, int *size_p)
2092{
2093 EMIT_ASM (amd64_eq,
2094 "cmp %rax,(%rsp)\n\t"
2095 "jne .Lamd64_eq_fallthru\n\t"
2096 "lea 0x8(%rsp),%rsp\n\t"
2097 "pop %rax\n\t"
2098 /* jmp, but don't trust the assembler to choose the right jump */
2099 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2100 ".Lamd64_eq_fallthru:\n\t"
2101 "lea 0x8(%rsp),%rsp\n\t"
2102 "pop %rax");
2103
2104 if (offset_p)
2105 *offset_p = 13;
2106 if (size_p)
2107 *size_p = 4;
2108}
2109
df4a0200 2110static void
6b9801d4
SS
2111amd64_emit_ne_goto (int *offset_p, int *size_p)
2112{
2113 EMIT_ASM (amd64_ne,
2114 "cmp %rax,(%rsp)\n\t"
2115 "je .Lamd64_ne_fallthru\n\t"
2116 "lea 0x8(%rsp),%rsp\n\t"
2117 "pop %rax\n\t"
2118 /* jmp, but don't trust the assembler to choose the right jump */
2119 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2120 ".Lamd64_ne_fallthru:\n\t"
2121 "lea 0x8(%rsp),%rsp\n\t"
2122 "pop %rax");
2123
2124 if (offset_p)
2125 *offset_p = 13;
2126 if (size_p)
2127 *size_p = 4;
2128}
2129
df4a0200 2130static void
6b9801d4
SS
2131amd64_emit_lt_goto (int *offset_p, int *size_p)
2132{
2133 EMIT_ASM (amd64_lt,
2134 "cmp %rax,(%rsp)\n\t"
2135 "jnl .Lamd64_lt_fallthru\n\t"
2136 "lea 0x8(%rsp),%rsp\n\t"
2137 "pop %rax\n\t"
2138 /* jmp, but don't trust the assembler to choose the right jump */
2139 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2140 ".Lamd64_lt_fallthru:\n\t"
2141 "lea 0x8(%rsp),%rsp\n\t"
2142 "pop %rax");
2143
2144 if (offset_p)
2145 *offset_p = 13;
2146 if (size_p)
2147 *size_p = 4;
2148}
2149
df4a0200 2150static void
6b9801d4
SS
2151amd64_emit_le_goto (int *offset_p, int *size_p)
2152{
2153 EMIT_ASM (amd64_le,
2154 "cmp %rax,(%rsp)\n\t"
2155 "jnle .Lamd64_le_fallthru\n\t"
2156 "lea 0x8(%rsp),%rsp\n\t"
2157 "pop %rax\n\t"
2158 /* jmp, but don't trust the assembler to choose the right jump */
2159 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2160 ".Lamd64_le_fallthru:\n\t"
2161 "lea 0x8(%rsp),%rsp\n\t"
2162 "pop %rax");
2163
2164 if (offset_p)
2165 *offset_p = 13;
2166 if (size_p)
2167 *size_p = 4;
2168}
2169
df4a0200 2170static void
6b9801d4
SS
2171amd64_emit_gt_goto (int *offset_p, int *size_p)
2172{
2173 EMIT_ASM (amd64_gt,
2174 "cmp %rax,(%rsp)\n\t"
2175 "jng .Lamd64_gt_fallthru\n\t"
2176 "lea 0x8(%rsp),%rsp\n\t"
2177 "pop %rax\n\t"
2178 /* jmp, but don't trust the assembler to choose the right jump */
2179 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2180 ".Lamd64_gt_fallthru:\n\t"
2181 "lea 0x8(%rsp),%rsp\n\t"
2182 "pop %rax");
2183
2184 if (offset_p)
2185 *offset_p = 13;
2186 if (size_p)
2187 *size_p = 4;
2188}
2189
df4a0200 2190static void
6b9801d4
SS
2191amd64_emit_ge_goto (int *offset_p, int *size_p)
2192{
2193 EMIT_ASM (amd64_ge,
2194 "cmp %rax,(%rsp)\n\t"
2195 "jnge .Lamd64_ge_fallthru\n\t"
2196 ".Lamd64_ge_jump:\n\t"
2197 "lea 0x8(%rsp),%rsp\n\t"
2198 "pop %rax\n\t"
2199 /* jmp, but don't trust the assembler to choose the right jump */
2200 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2201 ".Lamd64_ge_fallthru:\n\t"
2202 "lea 0x8(%rsp),%rsp\n\t"
2203 "pop %rax");
2204
2205 if (offset_p)
2206 *offset_p = 13;
2207 if (size_p)
2208 *size_p = 4;
2209}
2210
6a271cae
PA
2211struct emit_ops amd64_emit_ops =
2212 {
2213 amd64_emit_prologue,
2214 amd64_emit_epilogue,
2215 amd64_emit_add,
2216 amd64_emit_sub,
2217 amd64_emit_mul,
2218 amd64_emit_lsh,
2219 amd64_emit_rsh_signed,
2220 amd64_emit_rsh_unsigned,
2221 amd64_emit_ext,
2222 amd64_emit_log_not,
2223 amd64_emit_bit_and,
2224 amd64_emit_bit_or,
2225 amd64_emit_bit_xor,
2226 amd64_emit_bit_not,
2227 amd64_emit_equal,
2228 amd64_emit_less_signed,
2229 amd64_emit_less_unsigned,
2230 amd64_emit_ref,
2231 amd64_emit_if_goto,
2232 amd64_emit_goto,
2233 amd64_write_goto_address,
2234 amd64_emit_const,
2235 amd64_emit_call,
2236 amd64_emit_reg,
2237 amd64_emit_pop,
2238 amd64_emit_stack_flush,
2239 amd64_emit_zero_ext,
2240 amd64_emit_swap,
2241 amd64_emit_stack_adjust,
2242 amd64_emit_int_call_1,
6b9801d4
SS
2243 amd64_emit_void_call_2,
2244 amd64_emit_eq_goto,
2245 amd64_emit_ne_goto,
2246 amd64_emit_lt_goto,
2247 amd64_emit_le_goto,
2248 amd64_emit_gt_goto,
2249 amd64_emit_ge_goto
6a271cae
PA
2250 };
2251
2252#endif /* __x86_64__ */
2253
2254static void
2255i386_emit_prologue (void)
2256{
2257 EMIT_ASM32 (i386_prologue,
2258 "push %ebp\n\t"
bf15cbda
SS
2259 "mov %esp,%ebp\n\t"
2260 "push %ebx");
6a271cae
PA
2261 /* At this point, the raw regs base address is at 8(%ebp), and the
2262 value pointer is at 12(%ebp). */
2263}
2264
2265static void
2266i386_emit_epilogue (void)
2267{
2268 EMIT_ASM32 (i386_epilogue,
2269 "mov 12(%ebp),%ecx\n\t"
2270 "mov %eax,(%ecx)\n\t"
2271 "mov %ebx,0x4(%ecx)\n\t"
2272 "xor %eax,%eax\n\t"
bf15cbda 2273 "pop %ebx\n\t"
6a271cae
PA
2274 "pop %ebp\n\t"
2275 "ret");
2276}
2277
2278static void
2279i386_emit_add (void)
2280{
2281 EMIT_ASM32 (i386_add,
2282 "add (%esp),%eax\n\t"
2283 "adc 0x4(%esp),%ebx\n\t"
2284 "lea 0x8(%esp),%esp");
2285}
2286
2287static void
2288i386_emit_sub (void)
2289{
2290 EMIT_ASM32 (i386_sub,
2291 "subl %eax,(%esp)\n\t"
2292 "sbbl %ebx,4(%esp)\n\t"
2293 "pop %eax\n\t"
2294 "pop %ebx\n\t");
2295}
2296
2297static void
2298i386_emit_mul (void)
2299{
2300 emit_error = 1;
2301}
2302
2303static void
2304i386_emit_lsh (void)
2305{
2306 emit_error = 1;
2307}
2308
2309static void
2310i386_emit_rsh_signed (void)
2311{
2312 emit_error = 1;
2313}
2314
2315static void
2316i386_emit_rsh_unsigned (void)
2317{
2318 emit_error = 1;
2319}
2320
2321static void
2322i386_emit_ext (int arg)
2323{
2324 switch (arg)
2325 {
2326 case 8:
2327 EMIT_ASM32 (i386_ext_8,
2328 "cbtw\n\t"
2329 "cwtl\n\t"
2330 "movl %eax,%ebx\n\t"
2331 "sarl $31,%ebx");
2332 break;
2333 case 16:
2334 EMIT_ASM32 (i386_ext_16,
2335 "cwtl\n\t"
2336 "movl %eax,%ebx\n\t"
2337 "sarl $31,%ebx");
2338 break;
2339 case 32:
2340 EMIT_ASM32 (i386_ext_32,
2341 "movl %eax,%ebx\n\t"
2342 "sarl $31,%ebx");
2343 break;
2344 default:
2345 emit_error = 1;
2346 }
2347}
2348
2349static void
2350i386_emit_log_not (void)
2351{
2352 EMIT_ASM32 (i386_log_not,
2353 "or %ebx,%eax\n\t"
2354 "test %eax,%eax\n\t"
2355 "sete %cl\n\t"
2356 "xor %ebx,%ebx\n\t"
2357 "movzbl %cl,%eax");
2358}
2359
2360static void
2361i386_emit_bit_and (void)
2362{
2363 EMIT_ASM32 (i386_and,
2364 "and (%esp),%eax\n\t"
2365 "and 0x4(%esp),%ebx\n\t"
2366 "lea 0x8(%esp),%esp");
2367}
2368
2369static void
2370i386_emit_bit_or (void)
2371{
2372 EMIT_ASM32 (i386_or,
2373 "or (%esp),%eax\n\t"
2374 "or 0x4(%esp),%ebx\n\t"
2375 "lea 0x8(%esp),%esp");
2376}
2377
2378static void
2379i386_emit_bit_xor (void)
2380{
2381 EMIT_ASM32 (i386_xor,
2382 "xor (%esp),%eax\n\t"
2383 "xor 0x4(%esp),%ebx\n\t"
2384 "lea 0x8(%esp),%esp");
2385}
2386
2387static void
2388i386_emit_bit_not (void)
2389{
2390 EMIT_ASM32 (i386_bit_not,
2391 "xor $0xffffffff,%eax\n\t"
2392 "xor $0xffffffff,%ebx\n\t");
2393}
2394
2395static void
2396i386_emit_equal (void)
2397{
2398 EMIT_ASM32 (i386_equal,
2399 "cmpl %ebx,4(%esp)\n\t"
2400 "jne .Li386_equal_false\n\t"
2401 "cmpl %eax,(%esp)\n\t"
2402 "je .Li386_equal_true\n\t"
2403 ".Li386_equal_false:\n\t"
2404 "xor %eax,%eax\n\t"
2405 "jmp .Li386_equal_end\n\t"
2406 ".Li386_equal_true:\n\t"
2407 "mov $1,%eax\n\t"
2408 ".Li386_equal_end:\n\t"
2409 "xor %ebx,%ebx\n\t"
2410 "lea 0x8(%esp),%esp");
2411}
2412
2413static void
2414i386_emit_less_signed (void)
2415{
2416 EMIT_ASM32 (i386_less_signed,
2417 "cmpl %ebx,4(%esp)\n\t"
2418 "jl .Li386_less_signed_true\n\t"
2419 "jne .Li386_less_signed_false\n\t"
2420 "cmpl %eax,(%esp)\n\t"
2421 "jl .Li386_less_signed_true\n\t"
2422 ".Li386_less_signed_false:\n\t"
2423 "xor %eax,%eax\n\t"
2424 "jmp .Li386_less_signed_end\n\t"
2425 ".Li386_less_signed_true:\n\t"
2426 "mov $1,%eax\n\t"
2427 ".Li386_less_signed_end:\n\t"
2428 "xor %ebx,%ebx\n\t"
2429 "lea 0x8(%esp),%esp");
2430}
2431
2432static void
2433i386_emit_less_unsigned (void)
2434{
2435 EMIT_ASM32 (i386_less_unsigned,
2436 "cmpl %ebx,4(%esp)\n\t"
2437 "jb .Li386_less_unsigned_true\n\t"
2438 "jne .Li386_less_unsigned_false\n\t"
2439 "cmpl %eax,(%esp)\n\t"
2440 "jb .Li386_less_unsigned_true\n\t"
2441 ".Li386_less_unsigned_false:\n\t"
2442 "xor %eax,%eax\n\t"
2443 "jmp .Li386_less_unsigned_end\n\t"
2444 ".Li386_less_unsigned_true:\n\t"
2445 "mov $1,%eax\n\t"
2446 ".Li386_less_unsigned_end:\n\t"
2447 "xor %ebx,%ebx\n\t"
2448 "lea 0x8(%esp),%esp");
2449}
2450
2451static void
2452i386_emit_ref (int size)
2453{
2454 switch (size)
2455 {
2456 case 1:
2457 EMIT_ASM32 (i386_ref1,
2458 "movb (%eax),%al");
2459 break;
2460 case 2:
2461 EMIT_ASM32 (i386_ref2,
2462 "movw (%eax),%ax");
2463 break;
2464 case 4:
2465 EMIT_ASM32 (i386_ref4,
2466 "movl (%eax),%eax");
2467 break;
2468 case 8:
2469 EMIT_ASM32 (i386_ref8,
2470 "movl 4(%eax),%ebx\n\t"
2471 "movl (%eax),%eax");
2472 break;
2473 }
2474}
2475
2476static void
2477i386_emit_if_goto (int *offset_p, int *size_p)
2478{
2479 EMIT_ASM32 (i386_if_goto,
2480 "mov %eax,%ecx\n\t"
2481 "or %ebx,%ecx\n\t"
2482 "pop %eax\n\t"
2483 "pop %ebx\n\t"
2484 "cmpl $0,%ecx\n\t"
2485 /* Don't trust the assembler to choose the right jump */
2486 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2487
2488 if (offset_p)
2489 *offset_p = 11; /* be sure that this matches the sequence above */
2490 if (size_p)
2491 *size_p = 4;
2492}
2493
2494static void
2495i386_emit_goto (int *offset_p, int *size_p)
2496{
2497 EMIT_ASM32 (i386_goto,
2498 /* Don't trust the assembler to choose the right jump */
2499 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2500 if (offset_p)
2501 *offset_p = 1;
2502 if (size_p)
2503 *size_p = 4;
2504}
2505
2506static void
2507i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2508{
2509 int diff = (to - (from + size));
2510 unsigned char buf[sizeof (int)];
2511
2512 /* We're only doing 4-byte sizes at the moment. */
2513 if (size != 4)
2514 {
2515 emit_error = 1;
2516 return;
2517 }
2518
2519 memcpy (buf, &diff, sizeof (int));
4196ab2a 2520 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
2521}
2522
2523static void
4e29fb54 2524i386_emit_const (LONGEST num)
6a271cae
PA
2525{
2526 unsigned char buf[16];
b00ad6ff 2527 int i, hi, lo;
6a271cae
PA
2528 CORE_ADDR buildaddr = current_insn_ptr;
2529
2530 i = 0;
2531 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2532 lo = num & 0xffffffff;
2533 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2534 i += 4;
2535 hi = ((num >> 32) & 0xffffffff);
2536 if (hi)
2537 {
2538 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2539 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2540 i += 4;
2541 }
2542 else
2543 {
2544 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2545 }
2546 append_insns (&buildaddr, i, buf);
2547 current_insn_ptr = buildaddr;
2548}
2549
2550static void
2551i386_emit_call (CORE_ADDR fn)
2552{
2553 unsigned char buf[16];
2554 int i, offset;
2555 CORE_ADDR buildaddr;
2556
2557 buildaddr = current_insn_ptr;
2558 i = 0;
2559 buf[i++] = 0xe8; /* call <reladdr> */
2560 offset = ((int) fn) - (buildaddr + 5);
2561 memcpy (buf + 1, &offset, 4);
2562 append_insns (&buildaddr, 5, buf);
2563 current_insn_ptr = buildaddr;
2564}
2565
2566static void
2567i386_emit_reg (int reg)
2568{
2569 unsigned char buf[16];
2570 int i;
2571 CORE_ADDR buildaddr;
2572
2573 EMIT_ASM32 (i386_reg_a,
2574 "sub $0x8,%esp");
2575 buildaddr = current_insn_ptr;
2576 i = 0;
2577 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2578 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2579 i += 4;
2580 append_insns (&buildaddr, i, buf);
2581 current_insn_ptr = buildaddr;
2582 EMIT_ASM32 (i386_reg_b,
2583 "mov %eax,4(%esp)\n\t"
2584 "mov 8(%ebp),%eax\n\t"
2585 "mov %eax,(%esp)");
2586 i386_emit_call (get_raw_reg_func_addr ());
2587 EMIT_ASM32 (i386_reg_c,
2588 "xor %ebx,%ebx\n\t"
2589 "lea 0x8(%esp),%esp");
2590}
2591
2592static void
2593i386_emit_pop (void)
2594{
2595 EMIT_ASM32 (i386_pop,
2596 "pop %eax\n\t"
2597 "pop %ebx");
2598}
2599
2600static void
2601i386_emit_stack_flush (void)
2602{
2603 EMIT_ASM32 (i386_stack_flush,
2604 "push %ebx\n\t"
2605 "push %eax");
2606}
2607
2608static void
2609i386_emit_zero_ext (int arg)
2610{
2611 switch (arg)
2612 {
2613 case 8:
2614 EMIT_ASM32 (i386_zero_ext_8,
2615 "and $0xff,%eax\n\t"
2616 "xor %ebx,%ebx");
2617 break;
2618 case 16:
2619 EMIT_ASM32 (i386_zero_ext_16,
2620 "and $0xffff,%eax\n\t"
2621 "xor %ebx,%ebx");
2622 break;
2623 case 32:
2624 EMIT_ASM32 (i386_zero_ext_32,
2625 "xor %ebx,%ebx");
2626 break;
2627 default:
2628 emit_error = 1;
2629 }
2630}
2631
2632static void
2633i386_emit_swap (void)
2634{
2635 EMIT_ASM32 (i386_swap,
2636 "mov %eax,%ecx\n\t"
2637 "mov %ebx,%edx\n\t"
2638 "pop %eax\n\t"
2639 "pop %ebx\n\t"
2640 "push %edx\n\t"
2641 "push %ecx");
2642}
2643
2644static void
2645i386_emit_stack_adjust (int n)
2646{
2647 unsigned char buf[16];
2648 int i;
2649 CORE_ADDR buildaddr = current_insn_ptr;
2650
2651 i = 0;
2652 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2653 buf[i++] = 0x64;
2654 buf[i++] = 0x24;
2655 buf[i++] = n * 8;
2656 append_insns (&buildaddr, i, buf);
2657 current_insn_ptr = buildaddr;
2658}
2659
2660/* FN's prototype is `LONGEST(*fn)(int)'. */
2661
2662static void
2663i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2664{
2665 unsigned char buf[16];
2666 int i;
2667 CORE_ADDR buildaddr;
2668
2669 EMIT_ASM32 (i386_int_call_1_a,
2670 /* Reserve a bit of stack space. */
2671 "sub $0x8,%esp");
2672 /* Put the one argument on the stack. */
2673 buildaddr = current_insn_ptr;
2674 i = 0;
2675 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2676 buf[i++] = 0x04;
2677 buf[i++] = 0x24;
b00ad6ff 2678 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2679 i += 4;
2680 append_insns (&buildaddr, i, buf);
2681 current_insn_ptr = buildaddr;
2682 i386_emit_call (fn);
2683 EMIT_ASM32 (i386_int_call_1_c,
2684 "mov %edx,%ebx\n\t"
2685 "lea 0x8(%esp),%esp");
2686}
2687
4e29fb54 2688/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2689
2690static void
2691i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2692{
2693 unsigned char buf[16];
2694 int i;
2695 CORE_ADDR buildaddr;
2696
2697 EMIT_ASM32 (i386_void_call_2_a,
2698 /* Preserve %eax only; we don't have to worry about %ebx. */
2699 "push %eax\n\t"
2700 /* Reserve a bit of stack space for arguments. */
2701 "sub $0x10,%esp\n\t"
2702 /* Copy "top" to the second argument position. (Note that
2703 we can't assume function won't scribble on its
2704 arguments, so don't try to restore from this.) */
2705 "mov %eax,4(%esp)\n\t"
2706 "mov %ebx,8(%esp)");
2707 /* Put the first argument on the stack. */
2708 buildaddr = current_insn_ptr;
2709 i = 0;
2710 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2711 buf[i++] = 0x04;
2712 buf[i++] = 0x24;
b00ad6ff 2713 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2714 i += 4;
2715 append_insns (&buildaddr, i, buf);
2716 current_insn_ptr = buildaddr;
2717 i386_emit_call (fn);
2718 EMIT_ASM32 (i386_void_call_2_b,
2719 "lea 0x10(%esp),%esp\n\t"
2720 /* Restore original stack top. */
2721 "pop %eax");
2722}
2723
6b9801d4 2724
df4a0200 2725static void
6b9801d4
SS
2726i386_emit_eq_goto (int *offset_p, int *size_p)
2727{
2728 EMIT_ASM32 (eq,
2729 /* Check low half first, more likely to be decider */
2730 "cmpl %eax,(%esp)\n\t"
2731 "jne .Leq_fallthru\n\t"
2732 "cmpl %ebx,4(%esp)\n\t"
2733 "jne .Leq_fallthru\n\t"
2734 "lea 0x8(%esp),%esp\n\t"
2735 "pop %eax\n\t"
2736 "pop %ebx\n\t"
2737 /* jmp, but don't trust the assembler to choose the right jump */
2738 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2739 ".Leq_fallthru:\n\t"
2740 "lea 0x8(%esp),%esp\n\t"
2741 "pop %eax\n\t"
2742 "pop %ebx");
2743
2744 if (offset_p)
2745 *offset_p = 18;
2746 if (size_p)
2747 *size_p = 4;
2748}
2749
df4a0200 2750static void
6b9801d4
SS
2751i386_emit_ne_goto (int *offset_p, int *size_p)
2752{
2753 EMIT_ASM32 (ne,
2754 /* Check low half first, more likely to be decider */
2755 "cmpl %eax,(%esp)\n\t"
2756 "jne .Lne_jump\n\t"
2757 "cmpl %ebx,4(%esp)\n\t"
2758 "je .Lne_fallthru\n\t"
2759 ".Lne_jump:\n\t"
2760 "lea 0x8(%esp),%esp\n\t"
2761 "pop %eax\n\t"
2762 "pop %ebx\n\t"
2763 /* jmp, but don't trust the assembler to choose the right jump */
2764 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2765 ".Lne_fallthru:\n\t"
2766 "lea 0x8(%esp),%esp\n\t"
2767 "pop %eax\n\t"
2768 "pop %ebx");
2769
2770 if (offset_p)
2771 *offset_p = 18;
2772 if (size_p)
2773 *size_p = 4;
2774}
2775
df4a0200 2776static void
6b9801d4
SS
2777i386_emit_lt_goto (int *offset_p, int *size_p)
2778{
2779 EMIT_ASM32 (lt,
2780 "cmpl %ebx,4(%esp)\n\t"
2781 "jl .Llt_jump\n\t"
2782 "jne .Llt_fallthru\n\t"
2783 "cmpl %eax,(%esp)\n\t"
2784 "jnl .Llt_fallthru\n\t"
2785 ".Llt_jump:\n\t"
2786 "lea 0x8(%esp),%esp\n\t"
2787 "pop %eax\n\t"
2788 "pop %ebx\n\t"
2789 /* jmp, but don't trust the assembler to choose the right jump */
2790 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2791 ".Llt_fallthru:\n\t"
2792 "lea 0x8(%esp),%esp\n\t"
2793 "pop %eax\n\t"
2794 "pop %ebx");
2795
2796 if (offset_p)
2797 *offset_p = 20;
2798 if (size_p)
2799 *size_p = 4;
2800}
2801
df4a0200 2802static void
6b9801d4
SS
2803i386_emit_le_goto (int *offset_p, int *size_p)
2804{
2805 EMIT_ASM32 (le,
2806 "cmpl %ebx,4(%esp)\n\t"
2807 "jle .Lle_jump\n\t"
2808 "jne .Lle_fallthru\n\t"
2809 "cmpl %eax,(%esp)\n\t"
2810 "jnle .Lle_fallthru\n\t"
2811 ".Lle_jump:\n\t"
2812 "lea 0x8(%esp),%esp\n\t"
2813 "pop %eax\n\t"
2814 "pop %ebx\n\t"
2815 /* jmp, but don't trust the assembler to choose the right jump */
2816 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2817 ".Lle_fallthru:\n\t"
2818 "lea 0x8(%esp),%esp\n\t"
2819 "pop %eax\n\t"
2820 "pop %ebx");
2821
2822 if (offset_p)
2823 *offset_p = 20;
2824 if (size_p)
2825 *size_p = 4;
2826}
2827
df4a0200 2828static void
6b9801d4
SS
2829i386_emit_gt_goto (int *offset_p, int *size_p)
2830{
2831 EMIT_ASM32 (gt,
2832 "cmpl %ebx,4(%esp)\n\t"
2833 "jg .Lgt_jump\n\t"
2834 "jne .Lgt_fallthru\n\t"
2835 "cmpl %eax,(%esp)\n\t"
2836 "jng .Lgt_fallthru\n\t"
2837 ".Lgt_jump:\n\t"
2838 "lea 0x8(%esp),%esp\n\t"
2839 "pop %eax\n\t"
2840 "pop %ebx\n\t"
2841 /* jmp, but don't trust the assembler to choose the right jump */
2842 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2843 ".Lgt_fallthru:\n\t"
2844 "lea 0x8(%esp),%esp\n\t"
2845 "pop %eax\n\t"
2846 "pop %ebx");
2847
2848 if (offset_p)
2849 *offset_p = 20;
2850 if (size_p)
2851 *size_p = 4;
2852}
2853
df4a0200 2854static void
6b9801d4
SS
2855i386_emit_ge_goto (int *offset_p, int *size_p)
2856{
2857 EMIT_ASM32 (ge,
2858 "cmpl %ebx,4(%esp)\n\t"
2859 "jge .Lge_jump\n\t"
2860 "jne .Lge_fallthru\n\t"
2861 "cmpl %eax,(%esp)\n\t"
2862 "jnge .Lge_fallthru\n\t"
2863 ".Lge_jump:\n\t"
2864 "lea 0x8(%esp),%esp\n\t"
2865 "pop %eax\n\t"
2866 "pop %ebx\n\t"
2867 /* jmp, but don't trust the assembler to choose the right jump */
2868 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2869 ".Lge_fallthru:\n\t"
2870 "lea 0x8(%esp),%esp\n\t"
2871 "pop %eax\n\t"
2872 "pop %ebx");
2873
2874 if (offset_p)
2875 *offset_p = 20;
2876 if (size_p)
2877 *size_p = 4;
2878}
2879
6a271cae
PA
2880struct emit_ops i386_emit_ops =
2881 {
2882 i386_emit_prologue,
2883 i386_emit_epilogue,
2884 i386_emit_add,
2885 i386_emit_sub,
2886 i386_emit_mul,
2887 i386_emit_lsh,
2888 i386_emit_rsh_signed,
2889 i386_emit_rsh_unsigned,
2890 i386_emit_ext,
2891 i386_emit_log_not,
2892 i386_emit_bit_and,
2893 i386_emit_bit_or,
2894 i386_emit_bit_xor,
2895 i386_emit_bit_not,
2896 i386_emit_equal,
2897 i386_emit_less_signed,
2898 i386_emit_less_unsigned,
2899 i386_emit_ref,
2900 i386_emit_if_goto,
2901 i386_emit_goto,
2902 i386_write_goto_address,
2903 i386_emit_const,
2904 i386_emit_call,
2905 i386_emit_reg,
2906 i386_emit_pop,
2907 i386_emit_stack_flush,
2908 i386_emit_zero_ext,
2909 i386_emit_swap,
2910 i386_emit_stack_adjust,
2911 i386_emit_int_call_1,
6b9801d4
SS
2912 i386_emit_void_call_2,
2913 i386_emit_eq_goto,
2914 i386_emit_ne_goto,
2915 i386_emit_lt_goto,
2916 i386_emit_le_goto,
2917 i386_emit_gt_goto,
2918 i386_emit_ge_goto
6a271cae
PA
2919 };
2920
2921
ab64c999
TBA
2922emit_ops *
2923x86_target::emit_ops ()
6a271cae
PA
2924{
2925#ifdef __x86_64__
3aee8918 2926 if (is_64bit_tdesc ())
6a271cae
PA
2927 return &amd64_emit_ops;
2928 else
2929#endif
2930 return &i386_emit_ops;
2931}
2932
3ca4edb6 2933/* Implementation of target ops method "sw_breakpoint_from_kind". */
dd373349 2934
3ca4edb6
TBA
2935const gdb_byte *
2936x86_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349
AT
2937{
2938 *size = x86_breakpoint_len;
2939 return x86_breakpoint;
2940}
2941
9cfd8715
TBA
2942bool
2943x86_target::low_supports_range_stepping ()
c2d6af84 2944{
9cfd8715 2945 return true;
c2d6af84
PA
2946}
2947
fc5ecdb6
TBA
2948int
2949x86_target::get_ipa_tdesc_idx ()
ae91f625
MK
2950{
2951 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2952 const struct target_desc *tdesc = regcache->tdesc;
2953
2954#ifdef __x86_64__
b4570e4b 2955 return amd64_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2956#endif
2957
51a948fd 2958 if (tdesc == tdesc_i386_linux_no_xml.get ())
ae91f625 2959 return X86_TDESC_SSE;
ae91f625 2960
f49ff000 2961 return i386_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2962}
2963
ef0478f6
TBA
2964/* The linux target ops object. */
2965
2966linux_process_target *the_linux_target = &the_x86_target;
2967
3aee8918
PA
2968void
2969initialize_low_arch (void)
2970{
2971 /* Initialize the Linux target descriptions. */
2972#ifdef __x86_64__
cc397f3a 2973 tdesc_amd64_linux_no_xml = allocate_target_description ();
51a948fd 2974 copy_target_description (tdesc_amd64_linux_no_xml.get (),
b4570e4b
YQ
2975 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2976 false));
3aee8918
PA
2977 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2978#endif
f49ff000 2979
cc397f3a 2980 tdesc_i386_linux_no_xml = allocate_target_description ();
51a948fd 2981 copy_target_description (tdesc_i386_linux_no_xml.get (),
f49ff000 2982 i386_linux_read_description (X86_XSTATE_SSE_MASK));
3aee8918
PA
2983 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2984
2985 initialize_regsets_info (&x86_regsets_info);
2986}
This page took 1.46212 seconds and 4 git commands to generate.