gdbserver/linux-low: turn 'supports_hardware_single_step' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
b811d2c2 3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
d41f6d8e 20#include "server.h"
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "linux-low.h"
25#include "i387-fp.h"
df7e5265 26#include "x86-low.h"
268a13a5 27#include "gdbsupport/x86-xstate.h"
5826e159 28#include "nat/gdb_ptrace.h"
d0722149 29
93813b37
WT
30#ifdef __x86_64__
31#include "nat/amd64-linux-siginfo.h"
32#endif
33
d0722149 34#include "gdb_proc_service.h"
b5737fa9
PA
35/* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37#ifndef ELFMAG0
38#include "elf/common.h"
39#endif
40
268a13a5 41#include "gdbsupport/agent.h"
3aee8918 42#include "tdesc.h"
c144c7a0 43#include "tracepoint.h"
f699aaba 44#include "ax.h"
7b669087 45#include "nat/linux-nat.h"
4b134ca1 46#include "nat/x86-linux.h"
8e5d4070 47#include "nat/x86-linux-dregs.h"
ae91f625 48#include "linux-x86-tdesc.h"
a196ebeb 49
3aee8918
PA
50#ifdef __x86_64__
51static struct target_desc *tdesc_amd64_linux_no_xml;
52#endif
53static struct target_desc *tdesc_i386_linux_no_xml;
54
1570b33e 55
fa593d66 56static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 57static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 58
1570b33e
L
59/* Backward compatibility for gdb without XML support. */
60
61static const char *xmltarget_i386_linux_no_xml = "@<target>\
62<architecture>i386</architecture>\
63<osabi>GNU/Linux</osabi>\
64</target>";
f6d1620c
L
65
66#ifdef __x86_64__
1570b33e
L
67static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68<architecture>i386:x86-64</architecture>\
69<osabi>GNU/Linux</osabi>\
70</target>";
f6d1620c 71#endif
d0722149
DE
72
73#include <sys/reg.h>
74#include <sys/procfs.h>
1570b33e
L
75#include <sys/uio.h>
76
d0722149
DE
77#ifndef PTRACE_GET_THREAD_AREA
78#define PTRACE_GET_THREAD_AREA 25
79#endif
80
81/* This definition comes from prctl.h, but some kernels may not have it. */
82#ifndef PTRACE_ARCH_PRCTL
83#define PTRACE_ARCH_PRCTL 30
84#endif
85
86/* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88#ifndef ARCH_GET_FS
89#define ARCH_SET_GS 0x1001
90#define ARCH_SET_FS 0x1002
91#define ARCH_GET_FS 0x1003
92#define ARCH_GET_GS 0x1004
93#endif
94
ef0478f6
TBA
95/* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99class x86_target : public linux_process_target
100{
101public:
102
aa8d21c9
TBA
103 const regs_info *get_regs_info () override;
104
3ca4edb6
TBA
105 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
106
007c9b97
TBA
107 bool supports_z_point_type (char z_type) override;
108
a5b5da92
TBA
109 void process_qsupported (char **features, int count) override;
110
47f70aa7
TBA
111 bool supports_tracepoints () override;
112
809a0c35
TBA
113 bool supports_fast_tracepoints () override;
114
115 int install_fast_tracepoint_jump_pad
116 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
117 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
118 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
119 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
120 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
121 char *err) override;
122
123 int get_min_fast_tracepoint_insn_len () override;
124
ab64c999
TBA
125 struct emit_ops *emit_ops () override;
126
797bcff5
TBA
127protected:
128
129 void low_arch_setup () override;
daca57a7
TBA
130
131 bool low_cannot_fetch_register (int regno) override;
132
133 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
134
135 bool low_supports_breakpoints () override;
136
137 CORE_ADDR low_get_pc (regcache *regcache) override;
138
139 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
d4807ea2
TBA
140
141 int low_decr_pc_after_break () override;
d7146cda
TBA
142
143 bool low_breakpoint_at (CORE_ADDR pc) override;
9db9aa23
TBA
144
145 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
146 int size, raw_breakpoint *bp) override;
147
148 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
149 int size, raw_breakpoint *bp) override;
ac1bbaca
TBA
150
151 bool low_stopped_by_watchpoint () override;
152
153 CORE_ADDR low_stopped_data_address () override;
b35db733
TBA
154
155 /* collect_ptrace_register/supply_ptrace_register are not needed in the
156 native i386 case (no registers smaller than an xfer unit), and are not
157 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
cb63de7c
TBA
158
159 /* Need to fix up i386 siginfo if host is amd64. */
160 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
161 int direction) override;
fd000fb3
TBA
162
163 arch_process_info *low_new_process () override;
164
165 void low_delete_process (arch_process_info *info) override;
166
167 void low_new_thread (lwp_info *) override;
168
169 void low_delete_thread (arch_lwp_info *) override;
170
171 void low_new_fork (process_info *parent, process_info *child) override;
d7599cc0
TBA
172
173 void low_prepare_to_resume (lwp_info *lwp) override;
a5b5da92 174
13e567af
TBA
175 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
176
9cfd8715
TBA
177 bool low_supports_range_stepping () override;
178
a5b5da92
TBA
179private:
180
181 /* Update all the target description of all processes; a new GDB
182 connected, and it may or not support xml target descriptions. */
183 void update_xmltarget ();
ef0478f6
TBA
184};
185
186/* The singleton target ops object. */
187
188static x86_target the_x86_target;
189
aa5ca48f
DE
190/* Per-process arch-specific data we want to keep. */
191
192struct arch_process_info
193{
df7e5265 194 struct x86_debug_reg_state debug_reg_state;
aa5ca48f
DE
195};
196
d0722149
DE
197#ifdef __x86_64__
198
199/* Mapping between the general-purpose registers in `struct user'
200 format and GDB's register array layout.
201 Note that the transfer layout uses 64-bit regs. */
202static /*const*/ int i386_regmap[] =
203{
204 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
205 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
206 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
207 DS * 8, ES * 8, FS * 8, GS * 8
208};
209
210#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
211
212/* So code below doesn't have to care, i386 or amd64. */
213#define ORIG_EAX ORIG_RAX
bc9540e8 214#define REGSIZE 8
d0722149
DE
215
216static const int x86_64_regmap[] =
217{
218 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
219 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
220 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
221 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
222 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
223 DS * 8, ES * 8, FS * 8, GS * 8,
224 -1, -1, -1, -1, -1, -1, -1, -1,
225 -1, -1, -1, -1, -1, -1, -1, -1,
226 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
227 -1,
228 -1, -1, -1, -1, -1, -1, -1, -1,
229 ORIG_RAX * 8,
2735833d
WT
230#ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
231 21 * 8, 22 * 8,
232#else
233 -1, -1,
234#endif
a196ebeb 235 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
01f9f808
MS
236 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
237 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
238 -1, -1, -1, -1, -1, -1, -1, -1,
239 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
240 -1, -1, -1, -1, -1, -1, -1, -1,
241 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
242 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
243 -1, -1, -1, -1, -1, -1, -1, -1,
244 -1, -1, -1, -1, -1, -1, -1, -1,
51547df6
MS
245 -1, -1, -1, -1, -1, -1, -1, -1,
246 -1 /* pkru */
d0722149
DE
247};
248
249#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
9e0aa64f 250#define X86_64_USER_REGS (GS + 1)
d0722149
DE
251
252#else /* ! __x86_64__ */
253
254/* Mapping between the general-purpose registers in `struct user'
255 format and GDB's register array layout. */
256static /*const*/ int i386_regmap[] =
257{
258 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
259 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
260 EIP * 4, EFL * 4, CS * 4, SS * 4,
261 DS * 4, ES * 4, FS * 4, GS * 4
262};
263
264#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
265
bc9540e8
PA
266#define REGSIZE 4
267
d0722149 268#endif
3aee8918
PA
269
270#ifdef __x86_64__
271
272/* Returns true if the current inferior belongs to a x86-64 process,
273 per the tdesc. */
274
275static int
276is_64bit_tdesc (void)
277{
0bfdf32f 278 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3aee8918
PA
279
280 return register_size (regcache->tdesc, 0) == 8;
281}
282
283#endif
284
d0722149
DE
285\f
286/* Called by libthread_db. */
287
288ps_err_e
754653a7 289ps_get_thread_area (struct ps_prochandle *ph,
d0722149
DE
290 lwpid_t lwpid, int idx, void **base)
291{
292#ifdef __x86_64__
3aee8918 293 int use_64bit = is_64bit_tdesc ();
d0722149
DE
294
295 if (use_64bit)
296 {
297 switch (idx)
298 {
299 case FS:
300 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
301 return PS_OK;
302 break;
303 case GS:
304 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
305 return PS_OK;
306 break;
307 default:
308 return PS_BADADDR;
309 }
310 return PS_ERR;
311 }
312#endif
313
314 {
315 unsigned int desc[4];
316
317 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
318 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
319 return PS_ERR;
320
d1ec4ce7
DE
321 /* Ensure we properly extend the value to 64-bits for x86_64. */
322 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
323 return PS_OK;
324 }
325}
fa593d66
PA
326
327/* Get the thread area address. This is used to recognize which
328 thread is which when tracing with the in-process agent library. We
329 don't read anything from the address, and treat it as opaque; it's
330 the address itself that we assume is unique per-thread. */
331
13e567af
TBA
332int
333x86_target::low_get_thread_area (int lwpid, CORE_ADDR *addr)
fa593d66
PA
334{
335#ifdef __x86_64__
3aee8918 336 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
337
338 if (use_64bit)
339 {
340 void *base;
341 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
342 {
343 *addr = (CORE_ADDR) (uintptr_t) base;
344 return 0;
345 }
346
347 return -1;
348 }
349#endif
350
351 {
f2907e49 352 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
d86d4aaf
DE
353 struct thread_info *thr = get_lwp_thread (lwp);
354 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
355 unsigned int desc[4];
356 ULONGEST gs = 0;
357 const int reg_thread_area = 3; /* bits to scale down register value. */
358 int idx;
359
360 collect_register_by_name (regcache, "gs", &gs);
361
362 idx = gs >> reg_thread_area;
363
364 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 365 lwpid_of (thr),
493e2a69 366 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
367 return -1;
368
369 *addr = desc[1];
370 return 0;
371 }
372}
373
374
d0722149 375\f
daca57a7
TBA
376bool
377x86_target::low_cannot_store_register (int regno)
d0722149 378{
3aee8918
PA
379#ifdef __x86_64__
380 if (is_64bit_tdesc ())
daca57a7 381 return false;
3aee8918
PA
382#endif
383
d0722149
DE
384 return regno >= I386_NUM_REGS;
385}
386
daca57a7
TBA
387bool
388x86_target::low_cannot_fetch_register (int regno)
d0722149 389{
3aee8918
PA
390#ifdef __x86_64__
391 if (is_64bit_tdesc ())
daca57a7 392 return false;
3aee8918
PA
393#endif
394
d0722149
DE
395 return regno >= I386_NUM_REGS;
396}
397
398static void
442ea881 399x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
400{
401 int i;
402
403#ifdef __x86_64__
3aee8918 404 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
405 {
406 for (i = 0; i < X86_64_NUM_REGS; i++)
407 if (x86_64_regmap[i] != -1)
442ea881 408 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
409
410#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
411 {
412 unsigned long base;
413 int lwpid = lwpid_of (current_thread);
414
415 collect_register_by_name (regcache, "fs_base", &base);
416 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
417
418 collect_register_by_name (regcache, "gs_base", &base);
419 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
420 }
421#endif
422
d0722149
DE
423 return;
424 }
9e0aa64f
JK
425
426 /* 32-bit inferior registers need to be zero-extended.
427 Callers would read uninitialized memory otherwise. */
428 memset (buf, 0x00, X86_64_USER_REGS * 8);
d0722149
DE
429#endif
430
431 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 432 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 433
442ea881 434 collect_register_by_name (regcache, "orig_eax",
bc9540e8 435 ((char *) buf) + ORIG_EAX * REGSIZE);
3f52fdbc 436
e90a813d 437#ifdef __x86_64__
3f52fdbc
KB
438 /* Sign extend EAX value to avoid potential syscall restart
439 problems.
440
441 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
442 for a detailed explanation. */
443 if (register_size (regcache->tdesc, 0) == 4)
444 {
445 void *ptr = ((gdb_byte *) buf
446 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
447
448 *(int64_t *) ptr = *(int32_t *) ptr;
449 }
e90a813d 450#endif
d0722149
DE
451}
452
453static void
442ea881 454x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
455{
456 int i;
457
458#ifdef __x86_64__
3aee8918 459 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
460 {
461 for (i = 0; i < X86_64_NUM_REGS; i++)
462 if (x86_64_regmap[i] != -1)
442ea881 463 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
464
465#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
466 {
467 unsigned long base;
468 int lwpid = lwpid_of (current_thread);
469
470 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
471 supply_register_by_name (regcache, "fs_base", &base);
472
473 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
474 supply_register_by_name (regcache, "gs_base", &base);
475 }
476#endif
d0722149
DE
477 return;
478 }
479#endif
480
481 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 482 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 483
442ea881 484 supply_register_by_name (regcache, "orig_eax",
bc9540e8 485 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
486}
487
488static void
442ea881 489x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
490{
491#ifdef __x86_64__
442ea881 492 i387_cache_to_fxsave (regcache, buf);
d0722149 493#else
442ea881 494 i387_cache_to_fsave (regcache, buf);
d0722149
DE
495#endif
496}
497
498static void
442ea881 499x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
500{
501#ifdef __x86_64__
442ea881 502 i387_fxsave_to_cache (regcache, buf);
d0722149 503#else
442ea881 504 i387_fsave_to_cache (regcache, buf);
d0722149
DE
505#endif
506}
507
508#ifndef __x86_64__
509
510static void
442ea881 511x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 512{
442ea881 513 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
514}
515
516static void
442ea881 517x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 518{
442ea881 519 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
520}
521
522#endif
523
1570b33e
L
524static void
525x86_fill_xstateregset (struct regcache *regcache, void *buf)
526{
527 i387_cache_to_xsave (regcache, buf);
528}
529
530static void
531x86_store_xstateregset (struct regcache *regcache, const void *buf)
532{
533 i387_xsave_to_cache (regcache, buf);
534}
535
d0722149
DE
536/* ??? The non-biarch i386 case stores all the i387 regs twice.
537 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
538 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
539 doesn't work. IWBN to avoid the duplication in the case where it
540 does work. Maybe the arch_setup routine could check whether it works
3aee8918 541 and update the supported regsets accordingly. */
d0722149 542
3aee8918 543static struct regset_info x86_regsets[] =
d0722149
DE
544{
545#ifdef HAVE_PTRACE_GETREGS
1570b33e 546 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
547 GENERAL_REGS,
548 x86_fill_gregset, x86_store_gregset },
1570b33e
L
549 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
550 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
551# ifndef __x86_64__
552# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 553 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
554 EXTENDED_REGS,
555 x86_fill_fpxregset, x86_store_fpxregset },
556# endif
557# endif
1570b33e 558 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
559 FP_REGS,
560 x86_fill_fpregset, x86_store_fpregset },
561#endif /* HAVE_PTRACE_GETREGS */
50bc912a 562 NULL_REGSET
d0722149
DE
563};
564
bf9ae9d8
TBA
565bool
566x86_target::low_supports_breakpoints ()
567{
568 return true;
569}
570
571CORE_ADDR
572x86_target::low_get_pc (regcache *regcache)
d0722149 573{
3aee8918 574 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
575
576 if (use_64bit)
577 {
6598661d
PA
578 uint64_t pc;
579
442ea881 580 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
581 return (CORE_ADDR) pc;
582 }
583 else
584 {
6598661d
PA
585 uint32_t pc;
586
442ea881 587 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
588 return (CORE_ADDR) pc;
589 }
590}
591
bf9ae9d8
TBA
592void
593x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
d0722149 594{
3aee8918 595 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
596
597 if (use_64bit)
598 {
6598661d
PA
599 uint64_t newpc = pc;
600
442ea881 601 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
602 }
603 else
604 {
6598661d
PA
605 uint32_t newpc = pc;
606
442ea881 607 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
608 }
609}
d4807ea2
TBA
610
611int
612x86_target::low_decr_pc_after_break ()
613{
614 return 1;
615}
616
d0722149 617\f
dd373349 618static const gdb_byte x86_breakpoint[] = { 0xCC };
d0722149
DE
619#define x86_breakpoint_len 1
620
d7146cda
TBA
621bool
622x86_target::low_breakpoint_at (CORE_ADDR pc)
d0722149
DE
623{
624 unsigned char c;
625
d7146cda 626 read_memory (pc, &c, 1);
d0722149 627 if (c == 0xCC)
d7146cda 628 return true;
d0722149 629
d7146cda 630 return false;
d0722149
DE
631}
632\f
42995dbd 633/* Low-level function vector. */
df7e5265 634struct x86_dr_low_type x86_dr_low =
42995dbd 635 {
d33472ad
GB
636 x86_linux_dr_set_control,
637 x86_linux_dr_set_addr,
638 x86_linux_dr_get_addr,
639 x86_linux_dr_get_status,
640 x86_linux_dr_get_control,
42995dbd
GB
641 sizeof (void *),
642 };
aa5ca48f 643\f
90d74c30 644/* Breakpoint/Watchpoint support. */
aa5ca48f 645
007c9b97
TBA
646bool
647x86_target::supports_z_point_type (char z_type)
802e8e6d
PA
648{
649 switch (z_type)
650 {
651 case Z_PACKET_SW_BP:
652 case Z_PACKET_HW_BP:
653 case Z_PACKET_WRITE_WP:
654 case Z_PACKET_ACCESS_WP:
007c9b97 655 return true;
802e8e6d 656 default:
007c9b97 657 return false;
802e8e6d
PA
658 }
659}
660
9db9aa23
TBA
661int
662x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
663 int size, raw_breakpoint *bp)
aa5ca48f
DE
664{
665 struct process_info *proc = current_process ();
802e8e6d 666
aa5ca48f
DE
667 switch (type)
668 {
802e8e6d
PA
669 case raw_bkpt_type_hw:
670 case raw_bkpt_type_write_wp:
671 case raw_bkpt_type_access_wp:
a4165e94 672 {
802e8e6d
PA
673 enum target_hw_bp_type hw_type
674 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 675 struct x86_debug_reg_state *state
fe978cb0 676 = &proc->priv->arch_private->debug_reg_state;
a4165e94 677
df7e5265 678 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
a4165e94 679 }
961bd387 680
aa5ca48f
DE
681 default:
682 /* Unsupported. */
683 return 1;
684 }
685}
686
9db9aa23
TBA
687int
688x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
689 int size, raw_breakpoint *bp)
aa5ca48f
DE
690{
691 struct process_info *proc = current_process ();
802e8e6d 692
aa5ca48f
DE
693 switch (type)
694 {
802e8e6d
PA
695 case raw_bkpt_type_hw:
696 case raw_bkpt_type_write_wp:
697 case raw_bkpt_type_access_wp:
a4165e94 698 {
802e8e6d
PA
699 enum target_hw_bp_type hw_type
700 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 701 struct x86_debug_reg_state *state
fe978cb0 702 = &proc->priv->arch_private->debug_reg_state;
a4165e94 703
df7e5265 704 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
a4165e94 705 }
aa5ca48f
DE
706 default:
707 /* Unsupported. */
708 return 1;
709 }
710}
711
ac1bbaca
TBA
712bool
713x86_target::low_stopped_by_watchpoint ()
aa5ca48f
DE
714{
715 struct process_info *proc = current_process ();
fe978cb0 716 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
aa5ca48f
DE
717}
718
ac1bbaca
TBA
719CORE_ADDR
720x86_target::low_stopped_data_address ()
aa5ca48f
DE
721{
722 struct process_info *proc = current_process ();
723 CORE_ADDR addr;
fe978cb0 724 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
df7e5265 725 &addr))
aa5ca48f
DE
726 return addr;
727 return 0;
728}
729\f
730/* Called when a new process is created. */
731
fd000fb3
TBA
732arch_process_info *
733x86_target::low_new_process ()
aa5ca48f 734{
ed859da7 735 struct arch_process_info *info = XCNEW (struct arch_process_info);
aa5ca48f 736
df7e5265 737 x86_low_init_dregs (&info->debug_reg_state);
aa5ca48f
DE
738
739 return info;
740}
741
04ec7890
SM
742/* Called when a process is being deleted. */
743
fd000fb3
TBA
744void
745x86_target::low_delete_process (arch_process_info *info)
04ec7890
SM
746{
747 xfree (info);
748}
749
fd000fb3
TBA
750void
751x86_target::low_new_thread (lwp_info *lwp)
752{
753 /* This comes from nat/. */
754 x86_linux_new_thread (lwp);
755}
3a8a0396 756
fd000fb3
TBA
757void
758x86_target::low_delete_thread (arch_lwp_info *alwp)
759{
760 /* This comes from nat/. */
761 x86_linux_delete_thread (alwp);
762}
763
764/* Target routine for new_fork. */
765
766void
767x86_target::low_new_fork (process_info *parent, process_info *child)
3a8a0396
DB
768{
769 /* These are allocated by linux_add_process. */
770 gdb_assert (parent->priv != NULL
771 && parent->priv->arch_private != NULL);
772 gdb_assert (child->priv != NULL
773 && child->priv->arch_private != NULL);
774
775 /* Linux kernel before 2.6.33 commit
776 72f674d203cd230426437cdcf7dd6f681dad8b0d
777 will inherit hardware debug registers from parent
778 on fork/vfork/clone. Newer Linux kernels create such tasks with
779 zeroed debug registers.
780
781 GDB core assumes the child inherits the watchpoints/hw
782 breakpoints of the parent, and will remove them all from the
783 forked off process. Copy the debug registers mirrors into the
784 new process so that all breakpoints and watchpoints can be
785 removed together. The debug registers mirror will become zeroed
786 in the end before detaching the forked off process, thus making
787 this compatible with older Linux kernels too. */
788
789 *child->priv->arch_private = *parent->priv->arch_private;
790}
791
d7599cc0
TBA
792void
793x86_target::low_prepare_to_resume (lwp_info *lwp)
794{
795 /* This comes from nat/. */
796 x86_linux_prepare_to_resume (lwp);
797}
798
70a0bb6b
GB
799/* See nat/x86-dregs.h. */
800
801struct x86_debug_reg_state *
802x86_debug_reg_state (pid_t pid)
803{
804 struct process_info *proc = find_process_pid (pid);
805
806 return &proc->priv->arch_private->debug_reg_state;
807}
aa5ca48f 808\f
d0722149
DE
809/* When GDBSERVER is built as a 64-bit application on linux, the
810 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
811 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
812 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
813 conversion in-place ourselves. */
814
9cf12d57 815/* Convert a ptrace/host siginfo object, into/from the siginfo in the
d0722149
DE
816 layout of the inferiors' architecture. Returns true if any
817 conversion was done; false otherwise. If DIRECTION is 1, then copy
9cf12d57 818 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
d0722149
DE
819 INF. */
820
cb63de7c
TBA
821bool
822x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
d0722149
DE
823{
824#ifdef __x86_64__
760256f9 825 unsigned int machine;
0bfdf32f 826 int tid = lwpid_of (current_thread);
760256f9
PA
827 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
828
d0722149 829 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 830 if (!is_64bit_tdesc ())
9cf12d57 831 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 832 FIXUP_32);
c92b5177 833 /* No fixup for native x32 GDB. */
760256f9 834 else if (!is_elf64 && sizeof (void *) == 8)
9cf12d57 835 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 836 FIXUP_X32);
d0722149
DE
837#endif
838
cb63de7c 839 return false;
d0722149
DE
840}
841\f
1570b33e
L
842static int use_xml;
843
3aee8918
PA
844/* Format of XSAVE extended state is:
845 struct
846 {
847 fxsave_bytes[0..463]
848 sw_usable_bytes[464..511]
849 xstate_hdr_bytes[512..575]
850 avx_bytes[576..831]
851 future_state etc
852 };
853
854 Same memory layout will be used for the coredump NT_X86_XSTATE
855 representing the XSAVE extended state registers.
856
857 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
858 extended state mask, which is the same as the extended control register
859 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
860 together with the mask saved in the xstate_hdr_bytes to determine what
861 states the processor/OS supports and what state, used or initialized,
862 the process/thread is in. */
863#define I386_LINUX_XSAVE_XCR0_OFFSET 464
864
865/* Does the current host support the GETFPXREGS request? The header
866 file may or may not define it, and even if it is defined, the
867 kernel will return EIO if it's running on a pre-SSE processor. */
868int have_ptrace_getfpxregs =
869#ifdef HAVE_PTRACE_GETFPXREGS
870 -1
871#else
872 0
873#endif
874;
1570b33e 875
3aee8918
PA
876/* Get Linux/x86 target description from running target. */
877
878static const struct target_desc *
879x86_linux_read_description (void)
1570b33e 880{
3aee8918
PA
881 unsigned int machine;
882 int is_elf64;
a196ebeb 883 int xcr0_features;
3aee8918
PA
884 int tid;
885 static uint64_t xcr0;
3a13a53b 886 struct regset_info *regset;
1570b33e 887
0bfdf32f 888 tid = lwpid_of (current_thread);
1570b33e 889
3aee8918 890 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 891
3aee8918 892 if (sizeof (void *) == 4)
3a13a53b 893 {
3aee8918
PA
894 if (is_elf64 > 0)
895 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
896#ifndef __x86_64__
897 else if (machine == EM_X86_64)
898 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
899#endif
900 }
3a13a53b 901
3aee8918
PA
902#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
903 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
904 {
905 elf_fpxregset_t fpxregs;
3a13a53b 906
3aee8918 907 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 908 {
3aee8918
PA
909 have_ptrace_getfpxregs = 0;
910 have_ptrace_getregset = 0;
f49ff000 911 return i386_linux_read_description (X86_XSTATE_X87);
3a13a53b 912 }
3aee8918
PA
913 else
914 have_ptrace_getfpxregs = 1;
3a13a53b 915 }
1570b33e
L
916#endif
917
918 if (!use_xml)
919 {
df7e5265 920 x86_xcr0 = X86_XSTATE_SSE_MASK;
3aee8918 921
1570b33e
L
922 /* Don't use XML. */
923#ifdef __x86_64__
3aee8918
PA
924 if (machine == EM_X86_64)
925 return tdesc_amd64_linux_no_xml;
1570b33e 926 else
1570b33e 927#endif
3aee8918 928 return tdesc_i386_linux_no_xml;
1570b33e
L
929 }
930
1570b33e
L
931 if (have_ptrace_getregset == -1)
932 {
df7e5265 933 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 934 struct iovec iov;
1570b33e
L
935
936 iov.iov_base = xstateregs;
937 iov.iov_len = sizeof (xstateregs);
938
939 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
940 if (ptrace (PTRACE_GETREGSET, tid,
941 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
942 have_ptrace_getregset = 0;
943 else
1570b33e 944 {
3aee8918
PA
945 have_ptrace_getregset = 1;
946
947 /* Get XCR0 from XSAVE extended state. */
948 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
949 / sizeof (uint64_t))];
950
951 /* Use PTRACE_GETREGSET if it is available. */
952 for (regset = x86_regsets;
953 regset->fill_function != NULL; regset++)
954 if (regset->get_request == PTRACE_GETREGSET)
df7e5265 955 regset->size = X86_XSTATE_SIZE (xcr0);
3aee8918
PA
956 else if (regset->type != GENERAL_REGS)
957 regset->size = 0;
1570b33e 958 }
1570b33e
L
959 }
960
3aee8918 961 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb 962 xcr0_features = (have_ptrace_getregset
2e1e43e1 963 && (xcr0 & X86_XSTATE_ALL_MASK));
3aee8918 964
a196ebeb 965 if (xcr0_features)
3aee8918 966 x86_xcr0 = xcr0;
1570b33e 967
3aee8918
PA
968 if (machine == EM_X86_64)
969 {
1570b33e 970#ifdef __x86_64__
b4570e4b 971 const target_desc *tdesc = NULL;
a196ebeb 972
b4570e4b 973 if (xcr0_features)
3aee8918 974 {
b4570e4b
YQ
975 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
976 !is_elf64);
1570b33e 977 }
b4570e4b
YQ
978
979 if (tdesc == NULL)
980 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
981 return tdesc;
3aee8918 982#endif
1570b33e 983 }
3aee8918
PA
984 else
985 {
f49ff000 986 const target_desc *tdesc = NULL;
a1fa17ee 987
f49ff000
YQ
988 if (xcr0_features)
989 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
2b863f51 990
f49ff000
YQ
991 if (tdesc == NULL)
992 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
a196ebeb 993
f49ff000 994 return tdesc;
3aee8918
PA
995 }
996
997 gdb_assert_not_reached ("failed to return tdesc");
998}
999
3aee8918
PA
1000/* Update all the target description of all processes; a new GDB
1001 connected, and it may or not support xml target descriptions. */
1002
797bcff5
TBA
1003void
1004x86_target::update_xmltarget ()
3aee8918 1005{
0bfdf32f 1006 struct thread_info *saved_thread = current_thread;
3aee8918
PA
1007
1008 /* Before changing the register cache's internal layout, flush the
1009 contents of the current valid caches back to the threads, and
1010 release the current regcache objects. */
1011 regcache_release ();
1012
797bcff5 1013 for_each_process ([this] (process_info *proc) {
9179355e
SM
1014 int pid = proc->pid;
1015
1016 /* Look up any thread of this process. */
1017 current_thread = find_any_thread_of_pid (pid);
1018
797bcff5 1019 low_arch_setup ();
9179355e 1020 });
3aee8918 1021
0bfdf32f 1022 current_thread = saved_thread;
1570b33e
L
1023}
1024
1025/* Process qSupported query, "xmlRegisters=". Update the buffer size for
1026 PTRACE_GETREGSET. */
1027
a5b5da92
TBA
1028void
1029x86_target::process_qsupported (char **features, int count)
1570b33e 1030{
06e03fff
PA
1031 int i;
1032
1570b33e
L
1033 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1034 with "i386" in qSupported query, it supports x86 XML target
1035 descriptions. */
1036 use_xml = 0;
06e03fff 1037 for (i = 0; i < count; i++)
1570b33e 1038 {
06e03fff 1039 const char *feature = features[i];
1570b33e 1040
06e03fff 1041 if (startswith (feature, "xmlRegisters="))
1570b33e 1042 {
06e03fff 1043 char *copy = xstrdup (feature + 13);
06e03fff 1044
ca3a04f6
CB
1045 char *saveptr;
1046 for (char *p = strtok_r (copy, ",", &saveptr);
1047 p != NULL;
1048 p = strtok_r (NULL, ",", &saveptr))
1570b33e 1049 {
06e03fff
PA
1050 if (strcmp (p, "i386") == 0)
1051 {
1052 use_xml = 1;
1053 break;
1054 }
1570b33e 1055 }
1570b33e 1056
06e03fff
PA
1057 free (copy);
1058 }
1570b33e 1059 }
a5b5da92 1060 update_xmltarget ();
1570b33e
L
1061}
1062
3aee8918 1063/* Common for x86/x86-64. */
d0722149 1064
3aee8918
PA
1065static struct regsets_info x86_regsets_info =
1066 {
1067 x86_regsets, /* regsets */
1068 0, /* num_regsets */
1069 NULL, /* disabled_regsets */
1070 };
214d508e
L
1071
1072#ifdef __x86_64__
3aee8918
PA
1073static struct regs_info amd64_linux_regs_info =
1074 {
1075 NULL, /* regset_bitmap */
1076 NULL, /* usrregs_info */
1077 &x86_regsets_info
1078 };
d0722149 1079#endif
3aee8918
PA
1080static struct usrregs_info i386_linux_usrregs_info =
1081 {
1082 I386_NUM_REGS,
1083 i386_regmap,
1084 };
d0722149 1085
3aee8918
PA
1086static struct regs_info i386_linux_regs_info =
1087 {
1088 NULL, /* regset_bitmap */
1089 &i386_linux_usrregs_info,
1090 &x86_regsets_info
1091 };
d0722149 1092
aa8d21c9
TBA
1093const regs_info *
1094x86_target::get_regs_info ()
3aee8918
PA
1095{
1096#ifdef __x86_64__
1097 if (is_64bit_tdesc ())
1098 return &amd64_linux_regs_info;
1099 else
1100#endif
1101 return &i386_linux_regs_info;
1102}
d0722149 1103
3aee8918
PA
1104/* Initialize the target description for the architecture of the
1105 inferior. */
1570b33e 1106
797bcff5
TBA
1107void
1108x86_target::low_arch_setup ()
3aee8918
PA
1109{
1110 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1111}
1112
82075af2
JS
1113/* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1114 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1115
1116static void
4cc32bec 1117x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
82075af2
JS
1118{
1119 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1120
1121 if (use_64bit)
1122 {
1123 long l_sysno;
82075af2
JS
1124
1125 collect_register_by_name (regcache, "orig_rax", &l_sysno);
82075af2 1126 *sysno = (int) l_sysno;
82075af2
JS
1127 }
1128 else
4cc32bec 1129 collect_register_by_name (regcache, "orig_eax", sysno);
82075af2
JS
1130}
1131
47f70aa7
TBA
1132bool
1133x86_target::supports_tracepoints ()
219f2f23 1134{
47f70aa7 1135 return true;
219f2f23
PA
1136}
1137
fa593d66
PA
1138static void
1139append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1140{
4196ab2a 1141 target_write_memory (*to, buf, len);
fa593d66
PA
1142 *to += len;
1143}
1144
1145static int
a121b7c1 1146push_opcode (unsigned char *buf, const char *op)
fa593d66
PA
1147{
1148 unsigned char *buf_org = buf;
1149
1150 while (1)
1151 {
1152 char *endptr;
1153 unsigned long ul = strtoul (op, &endptr, 16);
1154
1155 if (endptr == op)
1156 break;
1157
1158 *buf++ = ul;
1159 op = endptr;
1160 }
1161
1162 return buf - buf_org;
1163}
1164
1165#ifdef __x86_64__
1166
1167/* Build a jump pad that saves registers and calls a collection
1168 function. Writes a jump instruction to the jump pad to
1169 JJUMPAD_INSN. The caller is responsible to write it in at the
1170 tracepoint address. */
1171
1172static int
1173amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1174 CORE_ADDR collector,
1175 CORE_ADDR lockaddr,
1176 ULONGEST orig_size,
1177 CORE_ADDR *jump_entry,
405f8e94
SS
1178 CORE_ADDR *trampoline,
1179 ULONGEST *trampoline_size,
fa593d66
PA
1180 unsigned char *jjump_pad_insn,
1181 ULONGEST *jjump_pad_insn_size,
1182 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1183 CORE_ADDR *adjusted_insn_addr_end,
1184 char *err)
fa593d66
PA
1185{
1186 unsigned char buf[40];
1187 int i, offset;
f4647387
YQ
1188 int64_t loffset;
1189
fa593d66
PA
1190 CORE_ADDR buildaddr = *jump_entry;
1191
1192 /* Build the jump pad. */
1193
1194 /* First, do tracepoint data collection. Save registers. */
1195 i = 0;
1196 /* Need to ensure stack pointer saved first. */
1197 buf[i++] = 0x54; /* push %rsp */
1198 buf[i++] = 0x55; /* push %rbp */
1199 buf[i++] = 0x57; /* push %rdi */
1200 buf[i++] = 0x56; /* push %rsi */
1201 buf[i++] = 0x52; /* push %rdx */
1202 buf[i++] = 0x51; /* push %rcx */
1203 buf[i++] = 0x53; /* push %rbx */
1204 buf[i++] = 0x50; /* push %rax */
1205 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1206 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1207 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1208 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1209 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1210 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1211 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1212 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1213 buf[i++] = 0x9c; /* pushfq */
c8ef42ee 1214 buf[i++] = 0x48; /* movabs <addr>,%rdi */
fa593d66 1215 buf[i++] = 0xbf;
c8ef42ee
PA
1216 memcpy (buf + i, &tpaddr, 8);
1217 i += 8;
fa593d66
PA
1218 buf[i++] = 0x57; /* push %rdi */
1219 append_insns (&buildaddr, i, buf);
1220
1221 /* Stack space for the collecting_t object. */
1222 i = 0;
1223 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1224 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1225 memcpy (buf + i, &tpoint, 8);
1226 i += 8;
1227 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1228 i += push_opcode (&buf[i],
1229 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1230 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1231 append_insns (&buildaddr, i, buf);
1232
1233 /* spin-lock. */
1234 i = 0;
1235 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1236 memcpy (&buf[i], (void *) &lockaddr, 8);
1237 i += 8;
1238 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1239 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1240 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1241 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1242 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1243 append_insns (&buildaddr, i, buf);
1244
1245 /* Set up the gdb_collect call. */
1246 /* At this point, (stack pointer + 0x18) is the base of our saved
1247 register block. */
1248
1249 i = 0;
1250 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1251 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1252
1253 /* tpoint address may be 64-bit wide. */
1254 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1255 memcpy (buf + i, &tpoint, 8);
1256 i += 8;
1257 append_insns (&buildaddr, i, buf);
1258
1259 /* The collector function being in the shared library, may be
1260 >31-bits away off the jump pad. */
1261 i = 0;
1262 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1263 memcpy (buf + i, &collector, 8);
1264 i += 8;
1265 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1266 append_insns (&buildaddr, i, buf);
1267
1268 /* Clear the spin-lock. */
1269 i = 0;
1270 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1271 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1272 memcpy (buf + i, &lockaddr, 8);
1273 i += 8;
1274 append_insns (&buildaddr, i, buf);
1275
1276 /* Remove stack that had been used for the collect_t object. */
1277 i = 0;
1278 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1279 append_insns (&buildaddr, i, buf);
1280
1281 /* Restore register state. */
1282 i = 0;
1283 buf[i++] = 0x48; /* add $0x8,%rsp */
1284 buf[i++] = 0x83;
1285 buf[i++] = 0xc4;
1286 buf[i++] = 0x08;
1287 buf[i++] = 0x9d; /* popfq */
1288 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1289 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1290 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1291 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1292 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1293 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1294 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1295 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1296 buf[i++] = 0x58; /* pop %rax */
1297 buf[i++] = 0x5b; /* pop %rbx */
1298 buf[i++] = 0x59; /* pop %rcx */
1299 buf[i++] = 0x5a; /* pop %rdx */
1300 buf[i++] = 0x5e; /* pop %rsi */
1301 buf[i++] = 0x5f; /* pop %rdi */
1302 buf[i++] = 0x5d; /* pop %rbp */
1303 buf[i++] = 0x5c; /* pop %rsp */
1304 append_insns (&buildaddr, i, buf);
1305
1306 /* Now, adjust the original instruction to execute in the jump
1307 pad. */
1308 *adjusted_insn_addr = buildaddr;
1309 relocate_instruction (&buildaddr, tpaddr);
1310 *adjusted_insn_addr_end = buildaddr;
1311
1312 /* Finally, write a jump back to the program. */
f4647387
YQ
1313
1314 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1315 if (loffset > INT_MAX || loffset < INT_MIN)
1316 {
1317 sprintf (err,
1318 "E.Jump back from jump pad too far from tracepoint "
1319 "(offset 0x%" PRIx64 " > int32).", loffset);
1320 return 1;
1321 }
1322
1323 offset = (int) loffset;
fa593d66
PA
1324 memcpy (buf, jump_insn, sizeof (jump_insn));
1325 memcpy (buf + 1, &offset, 4);
1326 append_insns (&buildaddr, sizeof (jump_insn), buf);
1327
1328 /* The jump pad is now built. Wire in a jump to our jump pad. This
1329 is always done last (by our caller actually), so that we can
1330 install fast tracepoints with threads running. This relies on
1331 the agent's atomic write support. */
f4647387
YQ
1332 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1333 if (loffset > INT_MAX || loffset < INT_MIN)
1334 {
1335 sprintf (err,
1336 "E.Jump pad too far from tracepoint "
1337 "(offset 0x%" PRIx64 " > int32).", loffset);
1338 return 1;
1339 }
1340
1341 offset = (int) loffset;
1342
fa593d66
PA
1343 memcpy (buf, jump_insn, sizeof (jump_insn));
1344 memcpy (buf + 1, &offset, 4);
1345 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1346 *jjump_pad_insn_size = sizeof (jump_insn);
1347
1348 /* Return the end address of our pad. */
1349 *jump_entry = buildaddr;
1350
1351 return 0;
1352}
1353
1354#endif /* __x86_64__ */
1355
1356/* Build a jump pad that saves registers and calls a collection
1357 function. Writes a jump instruction to the jump pad to
1358 JJUMPAD_INSN. The caller is responsible to write it in at the
1359 tracepoint address. */
1360
1361static int
1362i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1363 CORE_ADDR collector,
1364 CORE_ADDR lockaddr,
1365 ULONGEST orig_size,
1366 CORE_ADDR *jump_entry,
405f8e94
SS
1367 CORE_ADDR *trampoline,
1368 ULONGEST *trampoline_size,
fa593d66
PA
1369 unsigned char *jjump_pad_insn,
1370 ULONGEST *jjump_pad_insn_size,
1371 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1372 CORE_ADDR *adjusted_insn_addr_end,
1373 char *err)
fa593d66
PA
1374{
1375 unsigned char buf[0x100];
1376 int i, offset;
1377 CORE_ADDR buildaddr = *jump_entry;
1378
1379 /* Build the jump pad. */
1380
1381 /* First, do tracepoint data collection. Save registers. */
1382 i = 0;
1383 buf[i++] = 0x60; /* pushad */
1384 buf[i++] = 0x68; /* push tpaddr aka $pc */
1385 *((int *)(buf + i)) = (int) tpaddr;
1386 i += 4;
1387 buf[i++] = 0x9c; /* pushf */
1388 buf[i++] = 0x1e; /* push %ds */
1389 buf[i++] = 0x06; /* push %es */
1390 buf[i++] = 0x0f; /* push %fs */
1391 buf[i++] = 0xa0;
1392 buf[i++] = 0x0f; /* push %gs */
1393 buf[i++] = 0xa8;
1394 buf[i++] = 0x16; /* push %ss */
1395 buf[i++] = 0x0e; /* push %cs */
1396 append_insns (&buildaddr, i, buf);
1397
1398 /* Stack space for the collecting_t object. */
1399 i = 0;
1400 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1401
1402 /* Build the object. */
1403 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1404 memcpy (buf + i, &tpoint, 4);
1405 i += 4;
1406 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1407
1408 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1409 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1410 append_insns (&buildaddr, i, buf);
1411
1412 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1413 If we cared for it, this could be using xchg alternatively. */
1414
1415 i = 0;
1416 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1417 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1418 %esp,<lockaddr> */
1419 memcpy (&buf[i], (void *) &lockaddr, 4);
1420 i += 4;
1421 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1422 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1423 append_insns (&buildaddr, i, buf);
1424
1425
1426 /* Set up arguments to the gdb_collect call. */
1427 i = 0;
1428 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1429 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1430 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1431 append_insns (&buildaddr, i, buf);
1432
1433 i = 0;
1434 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1435 append_insns (&buildaddr, i, buf);
1436
1437 i = 0;
1438 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1439 memcpy (&buf[i], (void *) &tpoint, 4);
1440 i += 4;
1441 append_insns (&buildaddr, i, buf);
1442
1443 buf[0] = 0xe8; /* call <reladdr> */
1444 offset = collector - (buildaddr + sizeof (jump_insn));
1445 memcpy (buf + 1, &offset, 4);
1446 append_insns (&buildaddr, 5, buf);
1447 /* Clean up after the call. */
1448 buf[0] = 0x83; /* add $0x8,%esp */
1449 buf[1] = 0xc4;
1450 buf[2] = 0x08;
1451 append_insns (&buildaddr, 3, buf);
1452
1453
1454 /* Clear the spin-lock. This would need the LOCK prefix on older
1455 broken archs. */
1456 i = 0;
1457 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1458 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1459 memcpy (buf + i, &lockaddr, 4);
1460 i += 4;
1461 append_insns (&buildaddr, i, buf);
1462
1463
1464 /* Remove stack that had been used for the collect_t object. */
1465 i = 0;
1466 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1467 append_insns (&buildaddr, i, buf);
1468
1469 i = 0;
1470 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1471 buf[i++] = 0xc4;
1472 buf[i++] = 0x04;
1473 buf[i++] = 0x17; /* pop %ss */
1474 buf[i++] = 0x0f; /* pop %gs */
1475 buf[i++] = 0xa9;
1476 buf[i++] = 0x0f; /* pop %fs */
1477 buf[i++] = 0xa1;
1478 buf[i++] = 0x07; /* pop %es */
405f8e94 1479 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1480 buf[i++] = 0x9d; /* popf */
1481 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1482 buf[i++] = 0xc4;
1483 buf[i++] = 0x04;
1484 buf[i++] = 0x61; /* popad */
1485 append_insns (&buildaddr, i, buf);
1486
1487 /* Now, adjust the original instruction to execute in the jump
1488 pad. */
1489 *adjusted_insn_addr = buildaddr;
1490 relocate_instruction (&buildaddr, tpaddr);
1491 *adjusted_insn_addr_end = buildaddr;
1492
1493 /* Write the jump back to the program. */
1494 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1495 memcpy (buf, jump_insn, sizeof (jump_insn));
1496 memcpy (buf + 1, &offset, 4);
1497 append_insns (&buildaddr, sizeof (jump_insn), buf);
1498
1499 /* The jump pad is now built. Wire in a jump to our jump pad. This
1500 is always done last (by our caller actually), so that we can
1501 install fast tracepoints with threads running. This relies on
1502 the agent's atomic write support. */
405f8e94
SS
1503 if (orig_size == 4)
1504 {
1505 /* Create a trampoline. */
1506 *trampoline_size = sizeof (jump_insn);
1507 if (!claim_trampoline_space (*trampoline_size, trampoline))
1508 {
1509 /* No trampoline space available. */
1510 strcpy (err,
1511 "E.Cannot allocate trampoline space needed for fast "
1512 "tracepoints on 4-byte instructions.");
1513 return 1;
1514 }
1515
1516 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1517 memcpy (buf, jump_insn, sizeof (jump_insn));
1518 memcpy (buf + 1, &offset, 4);
4196ab2a 1519 target_write_memory (*trampoline, buf, sizeof (jump_insn));
405f8e94
SS
1520
1521 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1522 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1523 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1524 memcpy (buf + 2, &offset, 2);
1525 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1526 *jjump_pad_insn_size = sizeof (small_jump_insn);
1527 }
1528 else
1529 {
1530 /* Else use a 32-bit relative jump instruction. */
1531 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1532 memcpy (buf, jump_insn, sizeof (jump_insn));
1533 memcpy (buf + 1, &offset, 4);
1534 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1535 *jjump_pad_insn_size = sizeof (jump_insn);
1536 }
fa593d66
PA
1537
1538 /* Return the end address of our pad. */
1539 *jump_entry = buildaddr;
1540
1541 return 0;
1542}
1543
809a0c35
TBA
1544bool
1545x86_target::supports_fast_tracepoints ()
1546{
1547 return true;
1548}
1549
1550int
1551x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1552 CORE_ADDR tpaddr,
1553 CORE_ADDR collector,
1554 CORE_ADDR lockaddr,
1555 ULONGEST orig_size,
1556 CORE_ADDR *jump_entry,
1557 CORE_ADDR *trampoline,
1558 ULONGEST *trampoline_size,
1559 unsigned char *jjump_pad_insn,
1560 ULONGEST *jjump_pad_insn_size,
1561 CORE_ADDR *adjusted_insn_addr,
1562 CORE_ADDR *adjusted_insn_addr_end,
1563 char *err)
fa593d66
PA
1564{
1565#ifdef __x86_64__
3aee8918 1566 if (is_64bit_tdesc ())
fa593d66
PA
1567 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1568 collector, lockaddr,
1569 orig_size, jump_entry,
405f8e94 1570 trampoline, trampoline_size,
fa593d66
PA
1571 jjump_pad_insn,
1572 jjump_pad_insn_size,
1573 adjusted_insn_addr,
405f8e94
SS
1574 adjusted_insn_addr_end,
1575 err);
fa593d66
PA
1576#endif
1577
1578 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1579 collector, lockaddr,
1580 orig_size, jump_entry,
405f8e94 1581 trampoline, trampoline_size,
fa593d66
PA
1582 jjump_pad_insn,
1583 jjump_pad_insn_size,
1584 adjusted_insn_addr,
405f8e94
SS
1585 adjusted_insn_addr_end,
1586 err);
1587}
1588
1589/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1590 architectures. */
1591
809a0c35
TBA
1592int
1593x86_target::get_min_fast_tracepoint_insn_len ()
405f8e94
SS
1594{
1595 static int warned_about_fast_tracepoints = 0;
1596
1597#ifdef __x86_64__
1598 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1599 used for fast tracepoints. */
3aee8918 1600 if (is_64bit_tdesc ())
405f8e94
SS
1601 return 5;
1602#endif
1603
58b4daa5 1604 if (agent_loaded_p ())
405f8e94
SS
1605 {
1606 char errbuf[IPA_BUFSIZ];
1607
1608 errbuf[0] = '\0';
1609
1610 /* On x86, if trampolines are available, then 4-byte jump instructions
1611 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1612 with a 4-byte offset are used instead. */
1613 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1614 return 4;
1615 else
1616 {
1617 /* GDB has no channel to explain to user why a shorter fast
1618 tracepoint is not possible, but at least make GDBserver
1619 mention that something has gone awry. */
1620 if (!warned_about_fast_tracepoints)
1621 {
422186a9 1622 warning ("4-byte fast tracepoints not available; %s", errbuf);
405f8e94
SS
1623 warned_about_fast_tracepoints = 1;
1624 }
1625 return 5;
1626 }
1627 }
1628 else
1629 {
1630 /* Indicate that the minimum length is currently unknown since the IPA
1631 has not loaded yet. */
1632 return 0;
1633 }
fa593d66
PA
1634}
1635
6a271cae
PA
1636static void
1637add_insns (unsigned char *start, int len)
1638{
1639 CORE_ADDR buildaddr = current_insn_ptr;
1640
1641 if (debug_threads)
87ce2a04
DE
1642 debug_printf ("Adding %d bytes of insn at %s\n",
1643 len, paddress (buildaddr));
6a271cae
PA
1644
1645 append_insns (&buildaddr, len, start);
1646 current_insn_ptr = buildaddr;
1647}
1648
6a271cae
PA
1649/* Our general strategy for emitting code is to avoid specifying raw
1650 bytes whenever possible, and instead copy a block of inline asm
1651 that is embedded in the function. This is a little messy, because
1652 we need to keep the compiler from discarding what looks like dead
1653 code, plus suppress various warnings. */
1654
9e4344e5
PA
1655#define EMIT_ASM(NAME, INSNS) \
1656 do \
1657 { \
1658 extern unsigned char start_ ## NAME, end_ ## NAME; \
1659 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1660 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1661 "\t" "start_" #NAME ":" \
1662 "\t" INSNS "\n" \
1663 "\t" "end_" #NAME ":"); \
1664 } while (0)
6a271cae
PA
1665
1666#ifdef __x86_64__
1667
1668#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1669 do \
1670 { \
1671 extern unsigned char start_ ## NAME, end_ ## NAME; \
1672 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1673 __asm__ (".code32\n" \
1674 "\t" "jmp end_" #NAME "\n" \
1675 "\t" "start_" #NAME ":\n" \
1676 "\t" INSNS "\n" \
1677 "\t" "end_" #NAME ":\n" \
1678 ".code64\n"); \
1679 } while (0)
6a271cae
PA
1680
1681#else
1682
1683#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1684
1685#endif
1686
1687#ifdef __x86_64__
1688
1689static void
1690amd64_emit_prologue (void)
1691{
1692 EMIT_ASM (amd64_prologue,
1693 "pushq %rbp\n\t"
1694 "movq %rsp,%rbp\n\t"
1695 "sub $0x20,%rsp\n\t"
1696 "movq %rdi,-8(%rbp)\n\t"
1697 "movq %rsi,-16(%rbp)");
1698}
1699
1700
1701static void
1702amd64_emit_epilogue (void)
1703{
1704 EMIT_ASM (amd64_epilogue,
1705 "movq -16(%rbp),%rdi\n\t"
1706 "movq %rax,(%rdi)\n\t"
1707 "xor %rax,%rax\n\t"
1708 "leave\n\t"
1709 "ret");
1710}
1711
1712static void
1713amd64_emit_add (void)
1714{
1715 EMIT_ASM (amd64_add,
1716 "add (%rsp),%rax\n\t"
1717 "lea 0x8(%rsp),%rsp");
1718}
1719
1720static void
1721amd64_emit_sub (void)
1722{
1723 EMIT_ASM (amd64_sub,
1724 "sub %rax,(%rsp)\n\t"
1725 "pop %rax");
1726}
1727
1728static void
1729amd64_emit_mul (void)
1730{
1731 emit_error = 1;
1732}
1733
1734static void
1735amd64_emit_lsh (void)
1736{
1737 emit_error = 1;
1738}
1739
1740static void
1741amd64_emit_rsh_signed (void)
1742{
1743 emit_error = 1;
1744}
1745
1746static void
1747amd64_emit_rsh_unsigned (void)
1748{
1749 emit_error = 1;
1750}
1751
1752static void
1753amd64_emit_ext (int arg)
1754{
1755 switch (arg)
1756 {
1757 case 8:
1758 EMIT_ASM (amd64_ext_8,
1759 "cbtw\n\t"
1760 "cwtl\n\t"
1761 "cltq");
1762 break;
1763 case 16:
1764 EMIT_ASM (amd64_ext_16,
1765 "cwtl\n\t"
1766 "cltq");
1767 break;
1768 case 32:
1769 EMIT_ASM (amd64_ext_32,
1770 "cltq");
1771 break;
1772 default:
1773 emit_error = 1;
1774 }
1775}
1776
1777static void
1778amd64_emit_log_not (void)
1779{
1780 EMIT_ASM (amd64_log_not,
1781 "test %rax,%rax\n\t"
1782 "sete %cl\n\t"
1783 "movzbq %cl,%rax");
1784}
1785
1786static void
1787amd64_emit_bit_and (void)
1788{
1789 EMIT_ASM (amd64_and,
1790 "and (%rsp),%rax\n\t"
1791 "lea 0x8(%rsp),%rsp");
1792}
1793
1794static void
1795amd64_emit_bit_or (void)
1796{
1797 EMIT_ASM (amd64_or,
1798 "or (%rsp),%rax\n\t"
1799 "lea 0x8(%rsp),%rsp");
1800}
1801
1802static void
1803amd64_emit_bit_xor (void)
1804{
1805 EMIT_ASM (amd64_xor,
1806 "xor (%rsp),%rax\n\t"
1807 "lea 0x8(%rsp),%rsp");
1808}
1809
1810static void
1811amd64_emit_bit_not (void)
1812{
1813 EMIT_ASM (amd64_bit_not,
1814 "xorq $0xffffffffffffffff,%rax");
1815}
1816
1817static void
1818amd64_emit_equal (void)
1819{
1820 EMIT_ASM (amd64_equal,
1821 "cmp %rax,(%rsp)\n\t"
1822 "je .Lamd64_equal_true\n\t"
1823 "xor %rax,%rax\n\t"
1824 "jmp .Lamd64_equal_end\n\t"
1825 ".Lamd64_equal_true:\n\t"
1826 "mov $0x1,%rax\n\t"
1827 ".Lamd64_equal_end:\n\t"
1828 "lea 0x8(%rsp),%rsp");
1829}
1830
1831static void
1832amd64_emit_less_signed (void)
1833{
1834 EMIT_ASM (amd64_less_signed,
1835 "cmp %rax,(%rsp)\n\t"
1836 "jl .Lamd64_less_signed_true\n\t"
1837 "xor %rax,%rax\n\t"
1838 "jmp .Lamd64_less_signed_end\n\t"
1839 ".Lamd64_less_signed_true:\n\t"
1840 "mov $1,%rax\n\t"
1841 ".Lamd64_less_signed_end:\n\t"
1842 "lea 0x8(%rsp),%rsp");
1843}
1844
1845static void
1846amd64_emit_less_unsigned (void)
1847{
1848 EMIT_ASM (amd64_less_unsigned,
1849 "cmp %rax,(%rsp)\n\t"
1850 "jb .Lamd64_less_unsigned_true\n\t"
1851 "xor %rax,%rax\n\t"
1852 "jmp .Lamd64_less_unsigned_end\n\t"
1853 ".Lamd64_less_unsigned_true:\n\t"
1854 "mov $1,%rax\n\t"
1855 ".Lamd64_less_unsigned_end:\n\t"
1856 "lea 0x8(%rsp),%rsp");
1857}
1858
1859static void
1860amd64_emit_ref (int size)
1861{
1862 switch (size)
1863 {
1864 case 1:
1865 EMIT_ASM (amd64_ref1,
1866 "movb (%rax),%al");
1867 break;
1868 case 2:
1869 EMIT_ASM (amd64_ref2,
1870 "movw (%rax),%ax");
1871 break;
1872 case 4:
1873 EMIT_ASM (amd64_ref4,
1874 "movl (%rax),%eax");
1875 break;
1876 case 8:
1877 EMIT_ASM (amd64_ref8,
1878 "movq (%rax),%rax");
1879 break;
1880 }
1881}
1882
1883static void
1884amd64_emit_if_goto (int *offset_p, int *size_p)
1885{
1886 EMIT_ASM (amd64_if_goto,
1887 "mov %rax,%rcx\n\t"
1888 "pop %rax\n\t"
1889 "cmp $0,%rcx\n\t"
1890 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1891 if (offset_p)
1892 *offset_p = 10;
1893 if (size_p)
1894 *size_p = 4;
1895}
1896
1897static void
1898amd64_emit_goto (int *offset_p, int *size_p)
1899{
1900 EMIT_ASM (amd64_goto,
1901 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1902 if (offset_p)
1903 *offset_p = 1;
1904 if (size_p)
1905 *size_p = 4;
1906}
1907
1908static void
1909amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1910{
1911 int diff = (to - (from + size));
1912 unsigned char buf[sizeof (int)];
1913
1914 if (size != 4)
1915 {
1916 emit_error = 1;
1917 return;
1918 }
1919
1920 memcpy (buf, &diff, sizeof (int));
4196ab2a 1921 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
1922}
1923
1924static void
4e29fb54 1925amd64_emit_const (LONGEST num)
6a271cae
PA
1926{
1927 unsigned char buf[16];
1928 int i;
1929 CORE_ADDR buildaddr = current_insn_ptr;
1930
1931 i = 0;
1932 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 1933 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
1934 i += 8;
1935 append_insns (&buildaddr, i, buf);
1936 current_insn_ptr = buildaddr;
1937}
1938
1939static void
1940amd64_emit_call (CORE_ADDR fn)
1941{
1942 unsigned char buf[16];
1943 int i;
1944 CORE_ADDR buildaddr;
4e29fb54 1945 LONGEST offset64;
6a271cae
PA
1946
1947 /* The destination function being in the shared library, may be
1948 >31-bits away off the compiled code pad. */
1949
1950 buildaddr = current_insn_ptr;
1951
1952 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1953
1954 i = 0;
1955
1956 if (offset64 > INT_MAX || offset64 < INT_MIN)
1957 {
1958 /* Offset is too large for a call. Use callq, but that requires
1959 a register, so avoid it if possible. Use r10, since it is
1960 call-clobbered, we don't have to push/pop it. */
1961 buf[i++] = 0x48; /* mov $fn,%r10 */
1962 buf[i++] = 0xba;
1963 memcpy (buf + i, &fn, 8);
1964 i += 8;
1965 buf[i++] = 0xff; /* callq *%r10 */
1966 buf[i++] = 0xd2;
1967 }
1968 else
1969 {
1970 int offset32 = offset64; /* we know we can't overflow here. */
ed036b40
PA
1971
1972 buf[i++] = 0xe8; /* call <reladdr> */
6a271cae
PA
1973 memcpy (buf + i, &offset32, 4);
1974 i += 4;
1975 }
1976
1977 append_insns (&buildaddr, i, buf);
1978 current_insn_ptr = buildaddr;
1979}
1980
1981static void
1982amd64_emit_reg (int reg)
1983{
1984 unsigned char buf[16];
1985 int i;
1986 CORE_ADDR buildaddr;
1987
1988 /* Assume raw_regs is still in %rdi. */
1989 buildaddr = current_insn_ptr;
1990 i = 0;
1991 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 1992 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
1993 i += 4;
1994 append_insns (&buildaddr, i, buf);
1995 current_insn_ptr = buildaddr;
1996 amd64_emit_call (get_raw_reg_func_addr ());
1997}
1998
1999static void
2000amd64_emit_pop (void)
2001{
2002 EMIT_ASM (amd64_pop,
2003 "pop %rax");
2004}
2005
2006static void
2007amd64_emit_stack_flush (void)
2008{
2009 EMIT_ASM (amd64_stack_flush,
2010 "push %rax");
2011}
2012
2013static void
2014amd64_emit_zero_ext (int arg)
2015{
2016 switch (arg)
2017 {
2018 case 8:
2019 EMIT_ASM (amd64_zero_ext_8,
2020 "and $0xff,%rax");
2021 break;
2022 case 16:
2023 EMIT_ASM (amd64_zero_ext_16,
2024 "and $0xffff,%rax");
2025 break;
2026 case 32:
2027 EMIT_ASM (amd64_zero_ext_32,
2028 "mov $0xffffffff,%rcx\n\t"
2029 "and %rcx,%rax");
2030 break;
2031 default:
2032 emit_error = 1;
2033 }
2034}
2035
2036static void
2037amd64_emit_swap (void)
2038{
2039 EMIT_ASM (amd64_swap,
2040 "mov %rax,%rcx\n\t"
2041 "pop %rax\n\t"
2042 "push %rcx");
2043}
2044
2045static void
2046amd64_emit_stack_adjust (int n)
2047{
2048 unsigned char buf[16];
2049 int i;
2050 CORE_ADDR buildaddr = current_insn_ptr;
2051
2052 i = 0;
2053 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2054 buf[i++] = 0x8d;
2055 buf[i++] = 0x64;
2056 buf[i++] = 0x24;
2057 /* This only handles adjustments up to 16, but we don't expect any more. */
2058 buf[i++] = n * 8;
2059 append_insns (&buildaddr, i, buf);
2060 current_insn_ptr = buildaddr;
2061}
2062
2063/* FN's prototype is `LONGEST(*fn)(int)'. */
2064
2065static void
2066amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2067{
2068 unsigned char buf[16];
2069 int i;
2070 CORE_ADDR buildaddr;
2071
2072 buildaddr = current_insn_ptr;
2073 i = 0;
2074 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2075 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2076 i += 4;
2077 append_insns (&buildaddr, i, buf);
2078 current_insn_ptr = buildaddr;
2079 amd64_emit_call (fn);
2080}
2081
4e29fb54 2082/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2083
2084static void
2085amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2086{
2087 unsigned char buf[16];
2088 int i;
2089 CORE_ADDR buildaddr;
2090
2091 buildaddr = current_insn_ptr;
2092 i = 0;
2093 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2094 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2095 i += 4;
2096 append_insns (&buildaddr, i, buf);
2097 current_insn_ptr = buildaddr;
2098 EMIT_ASM (amd64_void_call_2_a,
2099 /* Save away a copy of the stack top. */
2100 "push %rax\n\t"
2101 /* Also pass top as the second argument. */
2102 "mov %rax,%rsi");
2103 amd64_emit_call (fn);
2104 EMIT_ASM (amd64_void_call_2_b,
2105 /* Restore the stack top, %rax may have been trashed. */
2106 "pop %rax");
2107}
2108
df4a0200 2109static void
6b9801d4
SS
2110amd64_emit_eq_goto (int *offset_p, int *size_p)
2111{
2112 EMIT_ASM (amd64_eq,
2113 "cmp %rax,(%rsp)\n\t"
2114 "jne .Lamd64_eq_fallthru\n\t"
2115 "lea 0x8(%rsp),%rsp\n\t"
2116 "pop %rax\n\t"
2117 /* jmp, but don't trust the assembler to choose the right jump */
2118 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2119 ".Lamd64_eq_fallthru:\n\t"
2120 "lea 0x8(%rsp),%rsp\n\t"
2121 "pop %rax");
2122
2123 if (offset_p)
2124 *offset_p = 13;
2125 if (size_p)
2126 *size_p = 4;
2127}
2128
df4a0200 2129static void
6b9801d4
SS
2130amd64_emit_ne_goto (int *offset_p, int *size_p)
2131{
2132 EMIT_ASM (amd64_ne,
2133 "cmp %rax,(%rsp)\n\t"
2134 "je .Lamd64_ne_fallthru\n\t"
2135 "lea 0x8(%rsp),%rsp\n\t"
2136 "pop %rax\n\t"
2137 /* jmp, but don't trust the assembler to choose the right jump */
2138 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2139 ".Lamd64_ne_fallthru:\n\t"
2140 "lea 0x8(%rsp),%rsp\n\t"
2141 "pop %rax");
2142
2143 if (offset_p)
2144 *offset_p = 13;
2145 if (size_p)
2146 *size_p = 4;
2147}
2148
df4a0200 2149static void
6b9801d4
SS
2150amd64_emit_lt_goto (int *offset_p, int *size_p)
2151{
2152 EMIT_ASM (amd64_lt,
2153 "cmp %rax,(%rsp)\n\t"
2154 "jnl .Lamd64_lt_fallthru\n\t"
2155 "lea 0x8(%rsp),%rsp\n\t"
2156 "pop %rax\n\t"
2157 /* jmp, but don't trust the assembler to choose the right jump */
2158 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2159 ".Lamd64_lt_fallthru:\n\t"
2160 "lea 0x8(%rsp),%rsp\n\t"
2161 "pop %rax");
2162
2163 if (offset_p)
2164 *offset_p = 13;
2165 if (size_p)
2166 *size_p = 4;
2167}
2168
df4a0200 2169static void
6b9801d4
SS
2170amd64_emit_le_goto (int *offset_p, int *size_p)
2171{
2172 EMIT_ASM (amd64_le,
2173 "cmp %rax,(%rsp)\n\t"
2174 "jnle .Lamd64_le_fallthru\n\t"
2175 "lea 0x8(%rsp),%rsp\n\t"
2176 "pop %rax\n\t"
2177 /* jmp, but don't trust the assembler to choose the right jump */
2178 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2179 ".Lamd64_le_fallthru:\n\t"
2180 "lea 0x8(%rsp),%rsp\n\t"
2181 "pop %rax");
2182
2183 if (offset_p)
2184 *offset_p = 13;
2185 if (size_p)
2186 *size_p = 4;
2187}
2188
df4a0200 2189static void
6b9801d4
SS
2190amd64_emit_gt_goto (int *offset_p, int *size_p)
2191{
2192 EMIT_ASM (amd64_gt,
2193 "cmp %rax,(%rsp)\n\t"
2194 "jng .Lamd64_gt_fallthru\n\t"
2195 "lea 0x8(%rsp),%rsp\n\t"
2196 "pop %rax\n\t"
2197 /* jmp, but don't trust the assembler to choose the right jump */
2198 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2199 ".Lamd64_gt_fallthru:\n\t"
2200 "lea 0x8(%rsp),%rsp\n\t"
2201 "pop %rax");
2202
2203 if (offset_p)
2204 *offset_p = 13;
2205 if (size_p)
2206 *size_p = 4;
2207}
2208
df4a0200 2209static void
6b9801d4
SS
2210amd64_emit_ge_goto (int *offset_p, int *size_p)
2211{
2212 EMIT_ASM (amd64_ge,
2213 "cmp %rax,(%rsp)\n\t"
2214 "jnge .Lamd64_ge_fallthru\n\t"
2215 ".Lamd64_ge_jump:\n\t"
2216 "lea 0x8(%rsp),%rsp\n\t"
2217 "pop %rax\n\t"
2218 /* jmp, but don't trust the assembler to choose the right jump */
2219 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2220 ".Lamd64_ge_fallthru:\n\t"
2221 "lea 0x8(%rsp),%rsp\n\t"
2222 "pop %rax");
2223
2224 if (offset_p)
2225 *offset_p = 13;
2226 if (size_p)
2227 *size_p = 4;
2228}
2229
6a271cae
PA
2230struct emit_ops amd64_emit_ops =
2231 {
2232 amd64_emit_prologue,
2233 amd64_emit_epilogue,
2234 amd64_emit_add,
2235 amd64_emit_sub,
2236 amd64_emit_mul,
2237 amd64_emit_lsh,
2238 amd64_emit_rsh_signed,
2239 amd64_emit_rsh_unsigned,
2240 amd64_emit_ext,
2241 amd64_emit_log_not,
2242 amd64_emit_bit_and,
2243 amd64_emit_bit_or,
2244 amd64_emit_bit_xor,
2245 amd64_emit_bit_not,
2246 amd64_emit_equal,
2247 amd64_emit_less_signed,
2248 amd64_emit_less_unsigned,
2249 amd64_emit_ref,
2250 amd64_emit_if_goto,
2251 amd64_emit_goto,
2252 amd64_write_goto_address,
2253 amd64_emit_const,
2254 amd64_emit_call,
2255 amd64_emit_reg,
2256 amd64_emit_pop,
2257 amd64_emit_stack_flush,
2258 amd64_emit_zero_ext,
2259 amd64_emit_swap,
2260 amd64_emit_stack_adjust,
2261 amd64_emit_int_call_1,
6b9801d4
SS
2262 amd64_emit_void_call_2,
2263 amd64_emit_eq_goto,
2264 amd64_emit_ne_goto,
2265 amd64_emit_lt_goto,
2266 amd64_emit_le_goto,
2267 amd64_emit_gt_goto,
2268 amd64_emit_ge_goto
6a271cae
PA
2269 };
2270
2271#endif /* __x86_64__ */
2272
2273static void
2274i386_emit_prologue (void)
2275{
2276 EMIT_ASM32 (i386_prologue,
2277 "push %ebp\n\t"
bf15cbda
SS
2278 "mov %esp,%ebp\n\t"
2279 "push %ebx");
6a271cae
PA
2280 /* At this point, the raw regs base address is at 8(%ebp), and the
2281 value pointer is at 12(%ebp). */
2282}
2283
2284static void
2285i386_emit_epilogue (void)
2286{
2287 EMIT_ASM32 (i386_epilogue,
2288 "mov 12(%ebp),%ecx\n\t"
2289 "mov %eax,(%ecx)\n\t"
2290 "mov %ebx,0x4(%ecx)\n\t"
2291 "xor %eax,%eax\n\t"
bf15cbda 2292 "pop %ebx\n\t"
6a271cae
PA
2293 "pop %ebp\n\t"
2294 "ret");
2295}
2296
2297static void
2298i386_emit_add (void)
2299{
2300 EMIT_ASM32 (i386_add,
2301 "add (%esp),%eax\n\t"
2302 "adc 0x4(%esp),%ebx\n\t"
2303 "lea 0x8(%esp),%esp");
2304}
2305
2306static void
2307i386_emit_sub (void)
2308{
2309 EMIT_ASM32 (i386_sub,
2310 "subl %eax,(%esp)\n\t"
2311 "sbbl %ebx,4(%esp)\n\t"
2312 "pop %eax\n\t"
2313 "pop %ebx\n\t");
2314}
2315
2316static void
2317i386_emit_mul (void)
2318{
2319 emit_error = 1;
2320}
2321
2322static void
2323i386_emit_lsh (void)
2324{
2325 emit_error = 1;
2326}
2327
2328static void
2329i386_emit_rsh_signed (void)
2330{
2331 emit_error = 1;
2332}
2333
2334static void
2335i386_emit_rsh_unsigned (void)
2336{
2337 emit_error = 1;
2338}
2339
2340static void
2341i386_emit_ext (int arg)
2342{
2343 switch (arg)
2344 {
2345 case 8:
2346 EMIT_ASM32 (i386_ext_8,
2347 "cbtw\n\t"
2348 "cwtl\n\t"
2349 "movl %eax,%ebx\n\t"
2350 "sarl $31,%ebx");
2351 break;
2352 case 16:
2353 EMIT_ASM32 (i386_ext_16,
2354 "cwtl\n\t"
2355 "movl %eax,%ebx\n\t"
2356 "sarl $31,%ebx");
2357 break;
2358 case 32:
2359 EMIT_ASM32 (i386_ext_32,
2360 "movl %eax,%ebx\n\t"
2361 "sarl $31,%ebx");
2362 break;
2363 default:
2364 emit_error = 1;
2365 }
2366}
2367
2368static void
2369i386_emit_log_not (void)
2370{
2371 EMIT_ASM32 (i386_log_not,
2372 "or %ebx,%eax\n\t"
2373 "test %eax,%eax\n\t"
2374 "sete %cl\n\t"
2375 "xor %ebx,%ebx\n\t"
2376 "movzbl %cl,%eax");
2377}
2378
2379static void
2380i386_emit_bit_and (void)
2381{
2382 EMIT_ASM32 (i386_and,
2383 "and (%esp),%eax\n\t"
2384 "and 0x4(%esp),%ebx\n\t"
2385 "lea 0x8(%esp),%esp");
2386}
2387
2388static void
2389i386_emit_bit_or (void)
2390{
2391 EMIT_ASM32 (i386_or,
2392 "or (%esp),%eax\n\t"
2393 "or 0x4(%esp),%ebx\n\t"
2394 "lea 0x8(%esp),%esp");
2395}
2396
2397static void
2398i386_emit_bit_xor (void)
2399{
2400 EMIT_ASM32 (i386_xor,
2401 "xor (%esp),%eax\n\t"
2402 "xor 0x4(%esp),%ebx\n\t"
2403 "lea 0x8(%esp),%esp");
2404}
2405
2406static void
2407i386_emit_bit_not (void)
2408{
2409 EMIT_ASM32 (i386_bit_not,
2410 "xor $0xffffffff,%eax\n\t"
2411 "xor $0xffffffff,%ebx\n\t");
2412}
2413
2414static void
2415i386_emit_equal (void)
2416{
2417 EMIT_ASM32 (i386_equal,
2418 "cmpl %ebx,4(%esp)\n\t"
2419 "jne .Li386_equal_false\n\t"
2420 "cmpl %eax,(%esp)\n\t"
2421 "je .Li386_equal_true\n\t"
2422 ".Li386_equal_false:\n\t"
2423 "xor %eax,%eax\n\t"
2424 "jmp .Li386_equal_end\n\t"
2425 ".Li386_equal_true:\n\t"
2426 "mov $1,%eax\n\t"
2427 ".Li386_equal_end:\n\t"
2428 "xor %ebx,%ebx\n\t"
2429 "lea 0x8(%esp),%esp");
2430}
2431
2432static void
2433i386_emit_less_signed (void)
2434{
2435 EMIT_ASM32 (i386_less_signed,
2436 "cmpl %ebx,4(%esp)\n\t"
2437 "jl .Li386_less_signed_true\n\t"
2438 "jne .Li386_less_signed_false\n\t"
2439 "cmpl %eax,(%esp)\n\t"
2440 "jl .Li386_less_signed_true\n\t"
2441 ".Li386_less_signed_false:\n\t"
2442 "xor %eax,%eax\n\t"
2443 "jmp .Li386_less_signed_end\n\t"
2444 ".Li386_less_signed_true:\n\t"
2445 "mov $1,%eax\n\t"
2446 ".Li386_less_signed_end:\n\t"
2447 "xor %ebx,%ebx\n\t"
2448 "lea 0x8(%esp),%esp");
2449}
2450
2451static void
2452i386_emit_less_unsigned (void)
2453{
2454 EMIT_ASM32 (i386_less_unsigned,
2455 "cmpl %ebx,4(%esp)\n\t"
2456 "jb .Li386_less_unsigned_true\n\t"
2457 "jne .Li386_less_unsigned_false\n\t"
2458 "cmpl %eax,(%esp)\n\t"
2459 "jb .Li386_less_unsigned_true\n\t"
2460 ".Li386_less_unsigned_false:\n\t"
2461 "xor %eax,%eax\n\t"
2462 "jmp .Li386_less_unsigned_end\n\t"
2463 ".Li386_less_unsigned_true:\n\t"
2464 "mov $1,%eax\n\t"
2465 ".Li386_less_unsigned_end:\n\t"
2466 "xor %ebx,%ebx\n\t"
2467 "lea 0x8(%esp),%esp");
2468}
2469
2470static void
2471i386_emit_ref (int size)
2472{
2473 switch (size)
2474 {
2475 case 1:
2476 EMIT_ASM32 (i386_ref1,
2477 "movb (%eax),%al");
2478 break;
2479 case 2:
2480 EMIT_ASM32 (i386_ref2,
2481 "movw (%eax),%ax");
2482 break;
2483 case 4:
2484 EMIT_ASM32 (i386_ref4,
2485 "movl (%eax),%eax");
2486 break;
2487 case 8:
2488 EMIT_ASM32 (i386_ref8,
2489 "movl 4(%eax),%ebx\n\t"
2490 "movl (%eax),%eax");
2491 break;
2492 }
2493}
2494
2495static void
2496i386_emit_if_goto (int *offset_p, int *size_p)
2497{
2498 EMIT_ASM32 (i386_if_goto,
2499 "mov %eax,%ecx\n\t"
2500 "or %ebx,%ecx\n\t"
2501 "pop %eax\n\t"
2502 "pop %ebx\n\t"
2503 "cmpl $0,%ecx\n\t"
2504 /* Don't trust the assembler to choose the right jump */
2505 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2506
2507 if (offset_p)
2508 *offset_p = 11; /* be sure that this matches the sequence above */
2509 if (size_p)
2510 *size_p = 4;
2511}
2512
2513static void
2514i386_emit_goto (int *offset_p, int *size_p)
2515{
2516 EMIT_ASM32 (i386_goto,
2517 /* Don't trust the assembler to choose the right jump */
2518 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2519 if (offset_p)
2520 *offset_p = 1;
2521 if (size_p)
2522 *size_p = 4;
2523}
2524
2525static void
2526i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2527{
2528 int diff = (to - (from + size));
2529 unsigned char buf[sizeof (int)];
2530
2531 /* We're only doing 4-byte sizes at the moment. */
2532 if (size != 4)
2533 {
2534 emit_error = 1;
2535 return;
2536 }
2537
2538 memcpy (buf, &diff, sizeof (int));
4196ab2a 2539 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
2540}
2541
2542static void
4e29fb54 2543i386_emit_const (LONGEST num)
6a271cae
PA
2544{
2545 unsigned char buf[16];
b00ad6ff 2546 int i, hi, lo;
6a271cae
PA
2547 CORE_ADDR buildaddr = current_insn_ptr;
2548
2549 i = 0;
2550 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2551 lo = num & 0xffffffff;
2552 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2553 i += 4;
2554 hi = ((num >> 32) & 0xffffffff);
2555 if (hi)
2556 {
2557 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2558 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2559 i += 4;
2560 }
2561 else
2562 {
2563 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2564 }
2565 append_insns (&buildaddr, i, buf);
2566 current_insn_ptr = buildaddr;
2567}
2568
2569static void
2570i386_emit_call (CORE_ADDR fn)
2571{
2572 unsigned char buf[16];
2573 int i, offset;
2574 CORE_ADDR buildaddr;
2575
2576 buildaddr = current_insn_ptr;
2577 i = 0;
2578 buf[i++] = 0xe8; /* call <reladdr> */
2579 offset = ((int) fn) - (buildaddr + 5);
2580 memcpy (buf + 1, &offset, 4);
2581 append_insns (&buildaddr, 5, buf);
2582 current_insn_ptr = buildaddr;
2583}
2584
2585static void
2586i386_emit_reg (int reg)
2587{
2588 unsigned char buf[16];
2589 int i;
2590 CORE_ADDR buildaddr;
2591
2592 EMIT_ASM32 (i386_reg_a,
2593 "sub $0x8,%esp");
2594 buildaddr = current_insn_ptr;
2595 i = 0;
2596 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2597 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2598 i += 4;
2599 append_insns (&buildaddr, i, buf);
2600 current_insn_ptr = buildaddr;
2601 EMIT_ASM32 (i386_reg_b,
2602 "mov %eax,4(%esp)\n\t"
2603 "mov 8(%ebp),%eax\n\t"
2604 "mov %eax,(%esp)");
2605 i386_emit_call (get_raw_reg_func_addr ());
2606 EMIT_ASM32 (i386_reg_c,
2607 "xor %ebx,%ebx\n\t"
2608 "lea 0x8(%esp),%esp");
2609}
2610
2611static void
2612i386_emit_pop (void)
2613{
2614 EMIT_ASM32 (i386_pop,
2615 "pop %eax\n\t"
2616 "pop %ebx");
2617}
2618
2619static void
2620i386_emit_stack_flush (void)
2621{
2622 EMIT_ASM32 (i386_stack_flush,
2623 "push %ebx\n\t"
2624 "push %eax");
2625}
2626
2627static void
2628i386_emit_zero_ext (int arg)
2629{
2630 switch (arg)
2631 {
2632 case 8:
2633 EMIT_ASM32 (i386_zero_ext_8,
2634 "and $0xff,%eax\n\t"
2635 "xor %ebx,%ebx");
2636 break;
2637 case 16:
2638 EMIT_ASM32 (i386_zero_ext_16,
2639 "and $0xffff,%eax\n\t"
2640 "xor %ebx,%ebx");
2641 break;
2642 case 32:
2643 EMIT_ASM32 (i386_zero_ext_32,
2644 "xor %ebx,%ebx");
2645 break;
2646 default:
2647 emit_error = 1;
2648 }
2649}
2650
2651static void
2652i386_emit_swap (void)
2653{
2654 EMIT_ASM32 (i386_swap,
2655 "mov %eax,%ecx\n\t"
2656 "mov %ebx,%edx\n\t"
2657 "pop %eax\n\t"
2658 "pop %ebx\n\t"
2659 "push %edx\n\t"
2660 "push %ecx");
2661}
2662
2663static void
2664i386_emit_stack_adjust (int n)
2665{
2666 unsigned char buf[16];
2667 int i;
2668 CORE_ADDR buildaddr = current_insn_ptr;
2669
2670 i = 0;
2671 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2672 buf[i++] = 0x64;
2673 buf[i++] = 0x24;
2674 buf[i++] = n * 8;
2675 append_insns (&buildaddr, i, buf);
2676 current_insn_ptr = buildaddr;
2677}
2678
2679/* FN's prototype is `LONGEST(*fn)(int)'. */
2680
2681static void
2682i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2683{
2684 unsigned char buf[16];
2685 int i;
2686 CORE_ADDR buildaddr;
2687
2688 EMIT_ASM32 (i386_int_call_1_a,
2689 /* Reserve a bit of stack space. */
2690 "sub $0x8,%esp");
2691 /* Put the one argument on the stack. */
2692 buildaddr = current_insn_ptr;
2693 i = 0;
2694 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2695 buf[i++] = 0x04;
2696 buf[i++] = 0x24;
b00ad6ff 2697 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2698 i += 4;
2699 append_insns (&buildaddr, i, buf);
2700 current_insn_ptr = buildaddr;
2701 i386_emit_call (fn);
2702 EMIT_ASM32 (i386_int_call_1_c,
2703 "mov %edx,%ebx\n\t"
2704 "lea 0x8(%esp),%esp");
2705}
2706
4e29fb54 2707/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2708
2709static void
2710i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2711{
2712 unsigned char buf[16];
2713 int i;
2714 CORE_ADDR buildaddr;
2715
2716 EMIT_ASM32 (i386_void_call_2_a,
2717 /* Preserve %eax only; we don't have to worry about %ebx. */
2718 "push %eax\n\t"
2719 /* Reserve a bit of stack space for arguments. */
2720 "sub $0x10,%esp\n\t"
2721 /* Copy "top" to the second argument position. (Note that
2722 we can't assume function won't scribble on its
2723 arguments, so don't try to restore from this.) */
2724 "mov %eax,4(%esp)\n\t"
2725 "mov %ebx,8(%esp)");
2726 /* Put the first argument on the stack. */
2727 buildaddr = current_insn_ptr;
2728 i = 0;
2729 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2730 buf[i++] = 0x04;
2731 buf[i++] = 0x24;
b00ad6ff 2732 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2733 i += 4;
2734 append_insns (&buildaddr, i, buf);
2735 current_insn_ptr = buildaddr;
2736 i386_emit_call (fn);
2737 EMIT_ASM32 (i386_void_call_2_b,
2738 "lea 0x10(%esp),%esp\n\t"
2739 /* Restore original stack top. */
2740 "pop %eax");
2741}
2742
6b9801d4 2743
df4a0200 2744static void
6b9801d4
SS
2745i386_emit_eq_goto (int *offset_p, int *size_p)
2746{
2747 EMIT_ASM32 (eq,
2748 /* Check low half first, more likely to be decider */
2749 "cmpl %eax,(%esp)\n\t"
2750 "jne .Leq_fallthru\n\t"
2751 "cmpl %ebx,4(%esp)\n\t"
2752 "jne .Leq_fallthru\n\t"
2753 "lea 0x8(%esp),%esp\n\t"
2754 "pop %eax\n\t"
2755 "pop %ebx\n\t"
2756 /* jmp, but don't trust the assembler to choose the right jump */
2757 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2758 ".Leq_fallthru:\n\t"
2759 "lea 0x8(%esp),%esp\n\t"
2760 "pop %eax\n\t"
2761 "pop %ebx");
2762
2763 if (offset_p)
2764 *offset_p = 18;
2765 if (size_p)
2766 *size_p = 4;
2767}
2768
df4a0200 2769static void
6b9801d4
SS
2770i386_emit_ne_goto (int *offset_p, int *size_p)
2771{
2772 EMIT_ASM32 (ne,
2773 /* Check low half first, more likely to be decider */
2774 "cmpl %eax,(%esp)\n\t"
2775 "jne .Lne_jump\n\t"
2776 "cmpl %ebx,4(%esp)\n\t"
2777 "je .Lne_fallthru\n\t"
2778 ".Lne_jump:\n\t"
2779 "lea 0x8(%esp),%esp\n\t"
2780 "pop %eax\n\t"
2781 "pop %ebx\n\t"
2782 /* jmp, but don't trust the assembler to choose the right jump */
2783 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2784 ".Lne_fallthru:\n\t"
2785 "lea 0x8(%esp),%esp\n\t"
2786 "pop %eax\n\t"
2787 "pop %ebx");
2788
2789 if (offset_p)
2790 *offset_p = 18;
2791 if (size_p)
2792 *size_p = 4;
2793}
2794
df4a0200 2795static void
6b9801d4
SS
2796i386_emit_lt_goto (int *offset_p, int *size_p)
2797{
2798 EMIT_ASM32 (lt,
2799 "cmpl %ebx,4(%esp)\n\t"
2800 "jl .Llt_jump\n\t"
2801 "jne .Llt_fallthru\n\t"
2802 "cmpl %eax,(%esp)\n\t"
2803 "jnl .Llt_fallthru\n\t"
2804 ".Llt_jump:\n\t"
2805 "lea 0x8(%esp),%esp\n\t"
2806 "pop %eax\n\t"
2807 "pop %ebx\n\t"
2808 /* jmp, but don't trust the assembler to choose the right jump */
2809 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2810 ".Llt_fallthru:\n\t"
2811 "lea 0x8(%esp),%esp\n\t"
2812 "pop %eax\n\t"
2813 "pop %ebx");
2814
2815 if (offset_p)
2816 *offset_p = 20;
2817 if (size_p)
2818 *size_p = 4;
2819}
2820
df4a0200 2821static void
6b9801d4
SS
2822i386_emit_le_goto (int *offset_p, int *size_p)
2823{
2824 EMIT_ASM32 (le,
2825 "cmpl %ebx,4(%esp)\n\t"
2826 "jle .Lle_jump\n\t"
2827 "jne .Lle_fallthru\n\t"
2828 "cmpl %eax,(%esp)\n\t"
2829 "jnle .Lle_fallthru\n\t"
2830 ".Lle_jump:\n\t"
2831 "lea 0x8(%esp),%esp\n\t"
2832 "pop %eax\n\t"
2833 "pop %ebx\n\t"
2834 /* jmp, but don't trust the assembler to choose the right jump */
2835 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2836 ".Lle_fallthru:\n\t"
2837 "lea 0x8(%esp),%esp\n\t"
2838 "pop %eax\n\t"
2839 "pop %ebx");
2840
2841 if (offset_p)
2842 *offset_p = 20;
2843 if (size_p)
2844 *size_p = 4;
2845}
2846
df4a0200 2847static void
6b9801d4
SS
2848i386_emit_gt_goto (int *offset_p, int *size_p)
2849{
2850 EMIT_ASM32 (gt,
2851 "cmpl %ebx,4(%esp)\n\t"
2852 "jg .Lgt_jump\n\t"
2853 "jne .Lgt_fallthru\n\t"
2854 "cmpl %eax,(%esp)\n\t"
2855 "jng .Lgt_fallthru\n\t"
2856 ".Lgt_jump:\n\t"
2857 "lea 0x8(%esp),%esp\n\t"
2858 "pop %eax\n\t"
2859 "pop %ebx\n\t"
2860 /* jmp, but don't trust the assembler to choose the right jump */
2861 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2862 ".Lgt_fallthru:\n\t"
2863 "lea 0x8(%esp),%esp\n\t"
2864 "pop %eax\n\t"
2865 "pop %ebx");
2866
2867 if (offset_p)
2868 *offset_p = 20;
2869 if (size_p)
2870 *size_p = 4;
2871}
2872
df4a0200 2873static void
6b9801d4
SS
2874i386_emit_ge_goto (int *offset_p, int *size_p)
2875{
2876 EMIT_ASM32 (ge,
2877 "cmpl %ebx,4(%esp)\n\t"
2878 "jge .Lge_jump\n\t"
2879 "jne .Lge_fallthru\n\t"
2880 "cmpl %eax,(%esp)\n\t"
2881 "jnge .Lge_fallthru\n\t"
2882 ".Lge_jump:\n\t"
2883 "lea 0x8(%esp),%esp\n\t"
2884 "pop %eax\n\t"
2885 "pop %ebx\n\t"
2886 /* jmp, but don't trust the assembler to choose the right jump */
2887 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2888 ".Lge_fallthru:\n\t"
2889 "lea 0x8(%esp),%esp\n\t"
2890 "pop %eax\n\t"
2891 "pop %ebx");
2892
2893 if (offset_p)
2894 *offset_p = 20;
2895 if (size_p)
2896 *size_p = 4;
2897}
2898
6a271cae
PA
2899struct emit_ops i386_emit_ops =
2900 {
2901 i386_emit_prologue,
2902 i386_emit_epilogue,
2903 i386_emit_add,
2904 i386_emit_sub,
2905 i386_emit_mul,
2906 i386_emit_lsh,
2907 i386_emit_rsh_signed,
2908 i386_emit_rsh_unsigned,
2909 i386_emit_ext,
2910 i386_emit_log_not,
2911 i386_emit_bit_and,
2912 i386_emit_bit_or,
2913 i386_emit_bit_xor,
2914 i386_emit_bit_not,
2915 i386_emit_equal,
2916 i386_emit_less_signed,
2917 i386_emit_less_unsigned,
2918 i386_emit_ref,
2919 i386_emit_if_goto,
2920 i386_emit_goto,
2921 i386_write_goto_address,
2922 i386_emit_const,
2923 i386_emit_call,
2924 i386_emit_reg,
2925 i386_emit_pop,
2926 i386_emit_stack_flush,
2927 i386_emit_zero_ext,
2928 i386_emit_swap,
2929 i386_emit_stack_adjust,
2930 i386_emit_int_call_1,
6b9801d4
SS
2931 i386_emit_void_call_2,
2932 i386_emit_eq_goto,
2933 i386_emit_ne_goto,
2934 i386_emit_lt_goto,
2935 i386_emit_le_goto,
2936 i386_emit_gt_goto,
2937 i386_emit_ge_goto
6a271cae
PA
2938 };
2939
2940
ab64c999
TBA
2941emit_ops *
2942x86_target::emit_ops ()
6a271cae
PA
2943{
2944#ifdef __x86_64__
3aee8918 2945 if (is_64bit_tdesc ())
6a271cae
PA
2946 return &amd64_emit_ops;
2947 else
2948#endif
2949 return &i386_emit_ops;
2950}
2951
3ca4edb6 2952/* Implementation of target ops method "sw_breakpoint_from_kind". */
dd373349 2953
3ca4edb6
TBA
2954const gdb_byte *
2955x86_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349
AT
2956{
2957 *size = x86_breakpoint_len;
2958 return x86_breakpoint;
2959}
2960
9cfd8715
TBA
2961bool
2962x86_target::low_supports_range_stepping ()
c2d6af84 2963{
9cfd8715 2964 return true;
c2d6af84
PA
2965}
2966
ae91f625
MK
2967static int
2968x86_get_ipa_tdesc_idx (void)
2969{
2970 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2971 const struct target_desc *tdesc = regcache->tdesc;
2972
2973#ifdef __x86_64__
b4570e4b 2974 return amd64_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2975#endif
2976
f49ff000 2977 if (tdesc == tdesc_i386_linux_no_xml)
ae91f625 2978 return X86_TDESC_SSE;
ae91f625 2979
f49ff000 2980 return i386_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2981}
2982
d0722149
DE
2983/* This is initialized assuming an amd64 target.
2984 x86_arch_setup will correct it for i386 or amd64 targets. */
2985
2986struct linux_target_ops the_low_target =
2987{
82075af2 2988 x86_get_syscall_trapinfo,
ae91f625 2989 x86_get_ipa_tdesc_idx,
d0722149 2990};
3aee8918 2991
ef0478f6
TBA
2992/* The linux target ops object. */
2993
2994linux_process_target *the_linux_target = &the_x86_target;
2995
3aee8918
PA
2996void
2997initialize_low_arch (void)
2998{
2999 /* Initialize the Linux target descriptions. */
3000#ifdef __x86_64__
cc397f3a 3001 tdesc_amd64_linux_no_xml = allocate_target_description ();
b4570e4b
YQ
3002 copy_target_description (tdesc_amd64_linux_no_xml,
3003 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
3004 false));
3aee8918
PA
3005 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3006#endif
f49ff000 3007
cc397f3a 3008 tdesc_i386_linux_no_xml = allocate_target_description ();
f49ff000
YQ
3009 copy_target_description (tdesc_i386_linux_no_xml,
3010 i386_linux_read_description (X86_XSTATE_SSE_MASK));
3aee8918
PA
3011 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3012
3013 initialize_regsets_info (&x86_regsets_info);
3014}
This page took 1.48389 seconds and 4 git commands to generate.