gdbserver/linux-low: turn 'get_syscall_trapinfo' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
b811d2c2 3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
d41f6d8e 20#include "server.h"
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "linux-low.h"
25#include "i387-fp.h"
df7e5265 26#include "x86-low.h"
268a13a5 27#include "gdbsupport/x86-xstate.h"
5826e159 28#include "nat/gdb_ptrace.h"
d0722149 29
93813b37
WT
30#ifdef __x86_64__
31#include "nat/amd64-linux-siginfo.h"
32#endif
33
d0722149 34#include "gdb_proc_service.h"
b5737fa9
PA
35/* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37#ifndef ELFMAG0
38#include "elf/common.h"
39#endif
40
268a13a5 41#include "gdbsupport/agent.h"
3aee8918 42#include "tdesc.h"
c144c7a0 43#include "tracepoint.h"
f699aaba 44#include "ax.h"
7b669087 45#include "nat/linux-nat.h"
4b134ca1 46#include "nat/x86-linux.h"
8e5d4070 47#include "nat/x86-linux-dregs.h"
ae91f625 48#include "linux-x86-tdesc.h"
a196ebeb 49
3aee8918
PA
50#ifdef __x86_64__
51static struct target_desc *tdesc_amd64_linux_no_xml;
52#endif
53static struct target_desc *tdesc_i386_linux_no_xml;
54
1570b33e 55
fa593d66 56static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 57static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 58
1570b33e
L
59/* Backward compatibility for gdb without XML support. */
60
61static const char *xmltarget_i386_linux_no_xml = "@<target>\
62<architecture>i386</architecture>\
63<osabi>GNU/Linux</osabi>\
64</target>";
f6d1620c
L
65
66#ifdef __x86_64__
1570b33e
L
67static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68<architecture>i386:x86-64</architecture>\
69<osabi>GNU/Linux</osabi>\
70</target>";
f6d1620c 71#endif
d0722149
DE
72
73#include <sys/reg.h>
74#include <sys/procfs.h>
1570b33e
L
75#include <sys/uio.h>
76
d0722149
DE
77#ifndef PTRACE_GET_THREAD_AREA
78#define PTRACE_GET_THREAD_AREA 25
79#endif
80
81/* This definition comes from prctl.h, but some kernels may not have it. */
82#ifndef PTRACE_ARCH_PRCTL
83#define PTRACE_ARCH_PRCTL 30
84#endif
85
86/* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88#ifndef ARCH_GET_FS
89#define ARCH_SET_GS 0x1001
90#define ARCH_SET_FS 0x1002
91#define ARCH_GET_FS 0x1003
92#define ARCH_GET_GS 0x1004
93#endif
94
ef0478f6
TBA
95/* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99class x86_target : public linux_process_target
100{
101public:
102
aa8d21c9
TBA
103 const regs_info *get_regs_info () override;
104
3ca4edb6
TBA
105 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
106
007c9b97
TBA
107 bool supports_z_point_type (char z_type) override;
108
a5b5da92
TBA
109 void process_qsupported (char **features, int count) override;
110
47f70aa7
TBA
111 bool supports_tracepoints () override;
112
809a0c35
TBA
113 bool supports_fast_tracepoints () override;
114
115 int install_fast_tracepoint_jump_pad
116 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
117 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
118 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
119 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
120 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
121 char *err) override;
122
123 int get_min_fast_tracepoint_insn_len () override;
124
ab64c999
TBA
125 struct emit_ops *emit_ops () override;
126
797bcff5
TBA
127protected:
128
129 void low_arch_setup () override;
daca57a7
TBA
130
131 bool low_cannot_fetch_register (int regno) override;
132
133 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
134
135 bool low_supports_breakpoints () override;
136
137 CORE_ADDR low_get_pc (regcache *regcache) override;
138
139 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
d4807ea2
TBA
140
141 int low_decr_pc_after_break () override;
d7146cda
TBA
142
143 bool low_breakpoint_at (CORE_ADDR pc) override;
9db9aa23
TBA
144
145 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
146 int size, raw_breakpoint *bp) override;
147
148 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
149 int size, raw_breakpoint *bp) override;
ac1bbaca
TBA
150
151 bool low_stopped_by_watchpoint () override;
152
153 CORE_ADDR low_stopped_data_address () override;
b35db733
TBA
154
155 /* collect_ptrace_register/supply_ptrace_register are not needed in the
156 native i386 case (no registers smaller than an xfer unit), and are not
157 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
cb63de7c
TBA
158
159 /* Need to fix up i386 siginfo if host is amd64. */
160 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
161 int direction) override;
fd000fb3
TBA
162
163 arch_process_info *low_new_process () override;
164
165 void low_delete_process (arch_process_info *info) override;
166
167 void low_new_thread (lwp_info *) override;
168
169 void low_delete_thread (arch_lwp_info *) override;
170
171 void low_new_fork (process_info *parent, process_info *child) override;
d7599cc0
TBA
172
173 void low_prepare_to_resume (lwp_info *lwp) override;
a5b5da92 174
13e567af
TBA
175 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
176
9cfd8715
TBA
177 bool low_supports_range_stepping () override;
178
9eedd27d
TBA
179 bool low_supports_catch_syscall () override;
180
181 void low_get_syscall_trapinfo (regcache *regcache, int *sysno) override;
182
a5b5da92
TBA
183private:
184
185 /* Update all the target description of all processes; a new GDB
186 connected, and it may or not support xml target descriptions. */
187 void update_xmltarget ();
ef0478f6
TBA
188};
189
190/* The singleton target ops object. */
191
192static x86_target the_x86_target;
193
aa5ca48f
DE
194/* Per-process arch-specific data we want to keep. */
195
196struct arch_process_info
197{
df7e5265 198 struct x86_debug_reg_state debug_reg_state;
aa5ca48f
DE
199};
200
d0722149
DE
201#ifdef __x86_64__
202
203/* Mapping between the general-purpose registers in `struct user'
204 format and GDB's register array layout.
205 Note that the transfer layout uses 64-bit regs. */
206static /*const*/ int i386_regmap[] =
207{
208 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
209 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
210 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
211 DS * 8, ES * 8, FS * 8, GS * 8
212};
213
214#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
215
216/* So code below doesn't have to care, i386 or amd64. */
217#define ORIG_EAX ORIG_RAX
bc9540e8 218#define REGSIZE 8
d0722149
DE
219
220static const int x86_64_regmap[] =
221{
222 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
223 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
224 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
225 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
226 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
227 DS * 8, ES * 8, FS * 8, GS * 8,
228 -1, -1, -1, -1, -1, -1, -1, -1,
229 -1, -1, -1, -1, -1, -1, -1, -1,
230 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
231 -1,
232 -1, -1, -1, -1, -1, -1, -1, -1,
233 ORIG_RAX * 8,
2735833d
WT
234#ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
235 21 * 8, 22 * 8,
236#else
237 -1, -1,
238#endif
a196ebeb 239 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
01f9f808
MS
240 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
241 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
242 -1, -1, -1, -1, -1, -1, -1, -1,
243 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
244 -1, -1, -1, -1, -1, -1, -1, -1,
245 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
246 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
247 -1, -1, -1, -1, -1, -1, -1, -1,
248 -1, -1, -1, -1, -1, -1, -1, -1,
51547df6
MS
249 -1, -1, -1, -1, -1, -1, -1, -1,
250 -1 /* pkru */
d0722149
DE
251};
252
253#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
9e0aa64f 254#define X86_64_USER_REGS (GS + 1)
d0722149
DE
255
256#else /* ! __x86_64__ */
257
258/* Mapping between the general-purpose registers in `struct user'
259 format and GDB's register array layout. */
260static /*const*/ int i386_regmap[] =
261{
262 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
263 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
264 EIP * 4, EFL * 4, CS * 4, SS * 4,
265 DS * 4, ES * 4, FS * 4, GS * 4
266};
267
268#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
269
bc9540e8
PA
270#define REGSIZE 4
271
d0722149 272#endif
3aee8918
PA
273
274#ifdef __x86_64__
275
276/* Returns true if the current inferior belongs to a x86-64 process,
277 per the tdesc. */
278
279static int
280is_64bit_tdesc (void)
281{
0bfdf32f 282 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3aee8918
PA
283
284 return register_size (regcache->tdesc, 0) == 8;
285}
286
287#endif
288
d0722149
DE
289\f
290/* Called by libthread_db. */
291
292ps_err_e
754653a7 293ps_get_thread_area (struct ps_prochandle *ph,
d0722149
DE
294 lwpid_t lwpid, int idx, void **base)
295{
296#ifdef __x86_64__
3aee8918 297 int use_64bit = is_64bit_tdesc ();
d0722149
DE
298
299 if (use_64bit)
300 {
301 switch (idx)
302 {
303 case FS:
304 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
305 return PS_OK;
306 break;
307 case GS:
308 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
309 return PS_OK;
310 break;
311 default:
312 return PS_BADADDR;
313 }
314 return PS_ERR;
315 }
316#endif
317
318 {
319 unsigned int desc[4];
320
321 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
322 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
323 return PS_ERR;
324
d1ec4ce7
DE
325 /* Ensure we properly extend the value to 64-bits for x86_64. */
326 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
327 return PS_OK;
328 }
329}
fa593d66
PA
330
331/* Get the thread area address. This is used to recognize which
332 thread is which when tracing with the in-process agent library. We
333 don't read anything from the address, and treat it as opaque; it's
334 the address itself that we assume is unique per-thread. */
335
13e567af
TBA
336int
337x86_target::low_get_thread_area (int lwpid, CORE_ADDR *addr)
fa593d66
PA
338{
339#ifdef __x86_64__
3aee8918 340 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
341
342 if (use_64bit)
343 {
344 void *base;
345 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
346 {
347 *addr = (CORE_ADDR) (uintptr_t) base;
348 return 0;
349 }
350
351 return -1;
352 }
353#endif
354
355 {
f2907e49 356 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
d86d4aaf
DE
357 struct thread_info *thr = get_lwp_thread (lwp);
358 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
359 unsigned int desc[4];
360 ULONGEST gs = 0;
361 const int reg_thread_area = 3; /* bits to scale down register value. */
362 int idx;
363
364 collect_register_by_name (regcache, "gs", &gs);
365
366 idx = gs >> reg_thread_area;
367
368 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 369 lwpid_of (thr),
493e2a69 370 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
371 return -1;
372
373 *addr = desc[1];
374 return 0;
375 }
376}
377
378
d0722149 379\f
daca57a7
TBA
380bool
381x86_target::low_cannot_store_register (int regno)
d0722149 382{
3aee8918
PA
383#ifdef __x86_64__
384 if (is_64bit_tdesc ())
daca57a7 385 return false;
3aee8918
PA
386#endif
387
d0722149
DE
388 return regno >= I386_NUM_REGS;
389}
390
daca57a7
TBA
391bool
392x86_target::low_cannot_fetch_register (int regno)
d0722149 393{
3aee8918
PA
394#ifdef __x86_64__
395 if (is_64bit_tdesc ())
daca57a7 396 return false;
3aee8918
PA
397#endif
398
d0722149
DE
399 return regno >= I386_NUM_REGS;
400}
401
402static void
442ea881 403x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
404{
405 int i;
406
407#ifdef __x86_64__
3aee8918 408 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
409 {
410 for (i = 0; i < X86_64_NUM_REGS; i++)
411 if (x86_64_regmap[i] != -1)
442ea881 412 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
413
414#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
415 {
416 unsigned long base;
417 int lwpid = lwpid_of (current_thread);
418
419 collect_register_by_name (regcache, "fs_base", &base);
420 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
421
422 collect_register_by_name (regcache, "gs_base", &base);
423 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
424 }
425#endif
426
d0722149
DE
427 return;
428 }
9e0aa64f
JK
429
430 /* 32-bit inferior registers need to be zero-extended.
431 Callers would read uninitialized memory otherwise. */
432 memset (buf, 0x00, X86_64_USER_REGS * 8);
d0722149
DE
433#endif
434
435 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 436 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 437
442ea881 438 collect_register_by_name (regcache, "orig_eax",
bc9540e8 439 ((char *) buf) + ORIG_EAX * REGSIZE);
3f52fdbc 440
e90a813d 441#ifdef __x86_64__
3f52fdbc
KB
442 /* Sign extend EAX value to avoid potential syscall restart
443 problems.
444
445 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
446 for a detailed explanation. */
447 if (register_size (regcache->tdesc, 0) == 4)
448 {
449 void *ptr = ((gdb_byte *) buf
450 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
451
452 *(int64_t *) ptr = *(int32_t *) ptr;
453 }
e90a813d 454#endif
d0722149
DE
455}
456
457static void
442ea881 458x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
459{
460 int i;
461
462#ifdef __x86_64__
3aee8918 463 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
464 {
465 for (i = 0; i < X86_64_NUM_REGS; i++)
466 if (x86_64_regmap[i] != -1)
442ea881 467 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
468
469#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
470 {
471 unsigned long base;
472 int lwpid = lwpid_of (current_thread);
473
474 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
475 supply_register_by_name (regcache, "fs_base", &base);
476
477 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
478 supply_register_by_name (regcache, "gs_base", &base);
479 }
480#endif
d0722149
DE
481 return;
482 }
483#endif
484
485 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 486 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 487
442ea881 488 supply_register_by_name (regcache, "orig_eax",
bc9540e8 489 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
490}
491
492static void
442ea881 493x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
494{
495#ifdef __x86_64__
442ea881 496 i387_cache_to_fxsave (regcache, buf);
d0722149 497#else
442ea881 498 i387_cache_to_fsave (regcache, buf);
d0722149
DE
499#endif
500}
501
502static void
442ea881 503x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
504{
505#ifdef __x86_64__
442ea881 506 i387_fxsave_to_cache (regcache, buf);
d0722149 507#else
442ea881 508 i387_fsave_to_cache (regcache, buf);
d0722149
DE
509#endif
510}
511
512#ifndef __x86_64__
513
514static void
442ea881 515x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 516{
442ea881 517 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
518}
519
520static void
442ea881 521x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 522{
442ea881 523 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
524}
525
526#endif
527
1570b33e
L
528static void
529x86_fill_xstateregset (struct regcache *regcache, void *buf)
530{
531 i387_cache_to_xsave (regcache, buf);
532}
533
534static void
535x86_store_xstateregset (struct regcache *regcache, const void *buf)
536{
537 i387_xsave_to_cache (regcache, buf);
538}
539
d0722149
DE
540/* ??? The non-biarch i386 case stores all the i387 regs twice.
541 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
542 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
543 doesn't work. IWBN to avoid the duplication in the case where it
544 does work. Maybe the arch_setup routine could check whether it works
3aee8918 545 and update the supported regsets accordingly. */
d0722149 546
3aee8918 547static struct regset_info x86_regsets[] =
d0722149
DE
548{
549#ifdef HAVE_PTRACE_GETREGS
1570b33e 550 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
551 GENERAL_REGS,
552 x86_fill_gregset, x86_store_gregset },
1570b33e
L
553 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
554 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
555# ifndef __x86_64__
556# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 557 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
558 EXTENDED_REGS,
559 x86_fill_fpxregset, x86_store_fpxregset },
560# endif
561# endif
1570b33e 562 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
563 FP_REGS,
564 x86_fill_fpregset, x86_store_fpregset },
565#endif /* HAVE_PTRACE_GETREGS */
50bc912a 566 NULL_REGSET
d0722149
DE
567};
568
bf9ae9d8
TBA
569bool
570x86_target::low_supports_breakpoints ()
571{
572 return true;
573}
574
575CORE_ADDR
576x86_target::low_get_pc (regcache *regcache)
d0722149 577{
3aee8918 578 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
579
580 if (use_64bit)
581 {
6598661d
PA
582 uint64_t pc;
583
442ea881 584 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
585 return (CORE_ADDR) pc;
586 }
587 else
588 {
6598661d
PA
589 uint32_t pc;
590
442ea881 591 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
592 return (CORE_ADDR) pc;
593 }
594}
595
bf9ae9d8
TBA
596void
597x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
d0722149 598{
3aee8918 599 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
600
601 if (use_64bit)
602 {
6598661d
PA
603 uint64_t newpc = pc;
604
442ea881 605 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
606 }
607 else
608 {
6598661d
PA
609 uint32_t newpc = pc;
610
442ea881 611 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
612 }
613}
d4807ea2
TBA
614
615int
616x86_target::low_decr_pc_after_break ()
617{
618 return 1;
619}
620
d0722149 621\f
dd373349 622static const gdb_byte x86_breakpoint[] = { 0xCC };
d0722149
DE
623#define x86_breakpoint_len 1
624
d7146cda
TBA
625bool
626x86_target::low_breakpoint_at (CORE_ADDR pc)
d0722149
DE
627{
628 unsigned char c;
629
d7146cda 630 read_memory (pc, &c, 1);
d0722149 631 if (c == 0xCC)
d7146cda 632 return true;
d0722149 633
d7146cda 634 return false;
d0722149
DE
635}
636\f
42995dbd 637/* Low-level function vector. */
df7e5265 638struct x86_dr_low_type x86_dr_low =
42995dbd 639 {
d33472ad
GB
640 x86_linux_dr_set_control,
641 x86_linux_dr_set_addr,
642 x86_linux_dr_get_addr,
643 x86_linux_dr_get_status,
644 x86_linux_dr_get_control,
42995dbd
GB
645 sizeof (void *),
646 };
aa5ca48f 647\f
90d74c30 648/* Breakpoint/Watchpoint support. */
aa5ca48f 649
007c9b97
TBA
650bool
651x86_target::supports_z_point_type (char z_type)
802e8e6d
PA
652{
653 switch (z_type)
654 {
655 case Z_PACKET_SW_BP:
656 case Z_PACKET_HW_BP:
657 case Z_PACKET_WRITE_WP:
658 case Z_PACKET_ACCESS_WP:
007c9b97 659 return true;
802e8e6d 660 default:
007c9b97 661 return false;
802e8e6d
PA
662 }
663}
664
9db9aa23
TBA
665int
666x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
667 int size, raw_breakpoint *bp)
aa5ca48f
DE
668{
669 struct process_info *proc = current_process ();
802e8e6d 670
aa5ca48f
DE
671 switch (type)
672 {
802e8e6d
PA
673 case raw_bkpt_type_hw:
674 case raw_bkpt_type_write_wp:
675 case raw_bkpt_type_access_wp:
a4165e94 676 {
802e8e6d
PA
677 enum target_hw_bp_type hw_type
678 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 679 struct x86_debug_reg_state *state
fe978cb0 680 = &proc->priv->arch_private->debug_reg_state;
a4165e94 681
df7e5265 682 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
a4165e94 683 }
961bd387 684
aa5ca48f
DE
685 default:
686 /* Unsupported. */
687 return 1;
688 }
689}
690
9db9aa23
TBA
691int
692x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
693 int size, raw_breakpoint *bp)
aa5ca48f
DE
694{
695 struct process_info *proc = current_process ();
802e8e6d 696
aa5ca48f
DE
697 switch (type)
698 {
802e8e6d
PA
699 case raw_bkpt_type_hw:
700 case raw_bkpt_type_write_wp:
701 case raw_bkpt_type_access_wp:
a4165e94 702 {
802e8e6d
PA
703 enum target_hw_bp_type hw_type
704 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 705 struct x86_debug_reg_state *state
fe978cb0 706 = &proc->priv->arch_private->debug_reg_state;
a4165e94 707
df7e5265 708 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
a4165e94 709 }
aa5ca48f
DE
710 default:
711 /* Unsupported. */
712 return 1;
713 }
714}
715
ac1bbaca
TBA
716bool
717x86_target::low_stopped_by_watchpoint ()
aa5ca48f
DE
718{
719 struct process_info *proc = current_process ();
fe978cb0 720 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
aa5ca48f
DE
721}
722
ac1bbaca
TBA
723CORE_ADDR
724x86_target::low_stopped_data_address ()
aa5ca48f
DE
725{
726 struct process_info *proc = current_process ();
727 CORE_ADDR addr;
fe978cb0 728 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
df7e5265 729 &addr))
aa5ca48f
DE
730 return addr;
731 return 0;
732}
733\f
734/* Called when a new process is created. */
735
fd000fb3
TBA
736arch_process_info *
737x86_target::low_new_process ()
aa5ca48f 738{
ed859da7 739 struct arch_process_info *info = XCNEW (struct arch_process_info);
aa5ca48f 740
df7e5265 741 x86_low_init_dregs (&info->debug_reg_state);
aa5ca48f
DE
742
743 return info;
744}
745
04ec7890
SM
746/* Called when a process is being deleted. */
747
fd000fb3
TBA
748void
749x86_target::low_delete_process (arch_process_info *info)
04ec7890
SM
750{
751 xfree (info);
752}
753
fd000fb3
TBA
754void
755x86_target::low_new_thread (lwp_info *lwp)
756{
757 /* This comes from nat/. */
758 x86_linux_new_thread (lwp);
759}
3a8a0396 760
fd000fb3
TBA
761void
762x86_target::low_delete_thread (arch_lwp_info *alwp)
763{
764 /* This comes from nat/. */
765 x86_linux_delete_thread (alwp);
766}
767
768/* Target routine for new_fork. */
769
770void
771x86_target::low_new_fork (process_info *parent, process_info *child)
3a8a0396
DB
772{
773 /* These are allocated by linux_add_process. */
774 gdb_assert (parent->priv != NULL
775 && parent->priv->arch_private != NULL);
776 gdb_assert (child->priv != NULL
777 && child->priv->arch_private != NULL);
778
779 /* Linux kernel before 2.6.33 commit
780 72f674d203cd230426437cdcf7dd6f681dad8b0d
781 will inherit hardware debug registers from parent
782 on fork/vfork/clone. Newer Linux kernels create such tasks with
783 zeroed debug registers.
784
785 GDB core assumes the child inherits the watchpoints/hw
786 breakpoints of the parent, and will remove them all from the
787 forked off process. Copy the debug registers mirrors into the
788 new process so that all breakpoints and watchpoints can be
789 removed together. The debug registers mirror will become zeroed
790 in the end before detaching the forked off process, thus making
791 this compatible with older Linux kernels too. */
792
793 *child->priv->arch_private = *parent->priv->arch_private;
794}
795
d7599cc0
TBA
796void
797x86_target::low_prepare_to_resume (lwp_info *lwp)
798{
799 /* This comes from nat/. */
800 x86_linux_prepare_to_resume (lwp);
801}
802
70a0bb6b
GB
803/* See nat/x86-dregs.h. */
804
805struct x86_debug_reg_state *
806x86_debug_reg_state (pid_t pid)
807{
808 struct process_info *proc = find_process_pid (pid);
809
810 return &proc->priv->arch_private->debug_reg_state;
811}
aa5ca48f 812\f
d0722149
DE
813/* When GDBSERVER is built as a 64-bit application on linux, the
814 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
815 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
816 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
817 conversion in-place ourselves. */
818
9cf12d57 819/* Convert a ptrace/host siginfo object, into/from the siginfo in the
d0722149
DE
820 layout of the inferiors' architecture. Returns true if any
821 conversion was done; false otherwise. If DIRECTION is 1, then copy
9cf12d57 822 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
d0722149
DE
823 INF. */
824
cb63de7c
TBA
825bool
826x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
d0722149
DE
827{
828#ifdef __x86_64__
760256f9 829 unsigned int machine;
0bfdf32f 830 int tid = lwpid_of (current_thread);
760256f9
PA
831 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
832
d0722149 833 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 834 if (!is_64bit_tdesc ())
9cf12d57 835 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 836 FIXUP_32);
c92b5177 837 /* No fixup for native x32 GDB. */
760256f9 838 else if (!is_elf64 && sizeof (void *) == 8)
9cf12d57 839 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 840 FIXUP_X32);
d0722149
DE
841#endif
842
cb63de7c 843 return false;
d0722149
DE
844}
845\f
1570b33e
L
846static int use_xml;
847
3aee8918
PA
848/* Format of XSAVE extended state is:
849 struct
850 {
851 fxsave_bytes[0..463]
852 sw_usable_bytes[464..511]
853 xstate_hdr_bytes[512..575]
854 avx_bytes[576..831]
855 future_state etc
856 };
857
858 Same memory layout will be used for the coredump NT_X86_XSTATE
859 representing the XSAVE extended state registers.
860
861 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
862 extended state mask, which is the same as the extended control register
863 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
864 together with the mask saved in the xstate_hdr_bytes to determine what
865 states the processor/OS supports and what state, used or initialized,
866 the process/thread is in. */
867#define I386_LINUX_XSAVE_XCR0_OFFSET 464
868
869/* Does the current host support the GETFPXREGS request? The header
870 file may or may not define it, and even if it is defined, the
871 kernel will return EIO if it's running on a pre-SSE processor. */
872int have_ptrace_getfpxregs =
873#ifdef HAVE_PTRACE_GETFPXREGS
874 -1
875#else
876 0
877#endif
878;
1570b33e 879
3aee8918
PA
880/* Get Linux/x86 target description from running target. */
881
882static const struct target_desc *
883x86_linux_read_description (void)
1570b33e 884{
3aee8918
PA
885 unsigned int machine;
886 int is_elf64;
a196ebeb 887 int xcr0_features;
3aee8918
PA
888 int tid;
889 static uint64_t xcr0;
3a13a53b 890 struct regset_info *regset;
1570b33e 891
0bfdf32f 892 tid = lwpid_of (current_thread);
1570b33e 893
3aee8918 894 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 895
3aee8918 896 if (sizeof (void *) == 4)
3a13a53b 897 {
3aee8918
PA
898 if (is_elf64 > 0)
899 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
900#ifndef __x86_64__
901 else if (machine == EM_X86_64)
902 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
903#endif
904 }
3a13a53b 905
3aee8918
PA
906#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
907 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
908 {
909 elf_fpxregset_t fpxregs;
3a13a53b 910
3aee8918 911 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 912 {
3aee8918
PA
913 have_ptrace_getfpxregs = 0;
914 have_ptrace_getregset = 0;
f49ff000 915 return i386_linux_read_description (X86_XSTATE_X87);
3a13a53b 916 }
3aee8918
PA
917 else
918 have_ptrace_getfpxregs = 1;
3a13a53b 919 }
1570b33e
L
920#endif
921
922 if (!use_xml)
923 {
df7e5265 924 x86_xcr0 = X86_XSTATE_SSE_MASK;
3aee8918 925
1570b33e
L
926 /* Don't use XML. */
927#ifdef __x86_64__
3aee8918
PA
928 if (machine == EM_X86_64)
929 return tdesc_amd64_linux_no_xml;
1570b33e 930 else
1570b33e 931#endif
3aee8918 932 return tdesc_i386_linux_no_xml;
1570b33e
L
933 }
934
1570b33e
L
935 if (have_ptrace_getregset == -1)
936 {
df7e5265 937 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 938 struct iovec iov;
1570b33e
L
939
940 iov.iov_base = xstateregs;
941 iov.iov_len = sizeof (xstateregs);
942
943 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
944 if (ptrace (PTRACE_GETREGSET, tid,
945 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
946 have_ptrace_getregset = 0;
947 else
1570b33e 948 {
3aee8918
PA
949 have_ptrace_getregset = 1;
950
951 /* Get XCR0 from XSAVE extended state. */
952 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
953 / sizeof (uint64_t))];
954
955 /* Use PTRACE_GETREGSET if it is available. */
956 for (regset = x86_regsets;
957 regset->fill_function != NULL; regset++)
958 if (regset->get_request == PTRACE_GETREGSET)
df7e5265 959 regset->size = X86_XSTATE_SIZE (xcr0);
3aee8918
PA
960 else if (regset->type != GENERAL_REGS)
961 regset->size = 0;
1570b33e 962 }
1570b33e
L
963 }
964
3aee8918 965 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb 966 xcr0_features = (have_ptrace_getregset
2e1e43e1 967 && (xcr0 & X86_XSTATE_ALL_MASK));
3aee8918 968
a196ebeb 969 if (xcr0_features)
3aee8918 970 x86_xcr0 = xcr0;
1570b33e 971
3aee8918
PA
972 if (machine == EM_X86_64)
973 {
1570b33e 974#ifdef __x86_64__
b4570e4b 975 const target_desc *tdesc = NULL;
a196ebeb 976
b4570e4b 977 if (xcr0_features)
3aee8918 978 {
b4570e4b
YQ
979 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
980 !is_elf64);
1570b33e 981 }
b4570e4b
YQ
982
983 if (tdesc == NULL)
984 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
985 return tdesc;
3aee8918 986#endif
1570b33e 987 }
3aee8918
PA
988 else
989 {
f49ff000 990 const target_desc *tdesc = NULL;
a1fa17ee 991
f49ff000
YQ
992 if (xcr0_features)
993 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
2b863f51 994
f49ff000
YQ
995 if (tdesc == NULL)
996 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
a196ebeb 997
f49ff000 998 return tdesc;
3aee8918
PA
999 }
1000
1001 gdb_assert_not_reached ("failed to return tdesc");
1002}
1003
3aee8918
PA
1004/* Update all the target description of all processes; a new GDB
1005 connected, and it may or not support xml target descriptions. */
1006
797bcff5
TBA
1007void
1008x86_target::update_xmltarget ()
3aee8918 1009{
0bfdf32f 1010 struct thread_info *saved_thread = current_thread;
3aee8918
PA
1011
1012 /* Before changing the register cache's internal layout, flush the
1013 contents of the current valid caches back to the threads, and
1014 release the current regcache objects. */
1015 regcache_release ();
1016
797bcff5 1017 for_each_process ([this] (process_info *proc) {
9179355e
SM
1018 int pid = proc->pid;
1019
1020 /* Look up any thread of this process. */
1021 current_thread = find_any_thread_of_pid (pid);
1022
797bcff5 1023 low_arch_setup ();
9179355e 1024 });
3aee8918 1025
0bfdf32f 1026 current_thread = saved_thread;
1570b33e
L
1027}
1028
1029/* Process qSupported query, "xmlRegisters=". Update the buffer size for
1030 PTRACE_GETREGSET. */
1031
a5b5da92
TBA
1032void
1033x86_target::process_qsupported (char **features, int count)
1570b33e 1034{
06e03fff
PA
1035 int i;
1036
1570b33e
L
1037 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1038 with "i386" in qSupported query, it supports x86 XML target
1039 descriptions. */
1040 use_xml = 0;
06e03fff 1041 for (i = 0; i < count; i++)
1570b33e 1042 {
06e03fff 1043 const char *feature = features[i];
1570b33e 1044
06e03fff 1045 if (startswith (feature, "xmlRegisters="))
1570b33e 1046 {
06e03fff 1047 char *copy = xstrdup (feature + 13);
06e03fff 1048
ca3a04f6
CB
1049 char *saveptr;
1050 for (char *p = strtok_r (copy, ",", &saveptr);
1051 p != NULL;
1052 p = strtok_r (NULL, ",", &saveptr))
1570b33e 1053 {
06e03fff
PA
1054 if (strcmp (p, "i386") == 0)
1055 {
1056 use_xml = 1;
1057 break;
1058 }
1570b33e 1059 }
1570b33e 1060
06e03fff
PA
1061 free (copy);
1062 }
1570b33e 1063 }
a5b5da92 1064 update_xmltarget ();
1570b33e
L
1065}
1066
3aee8918 1067/* Common for x86/x86-64. */
d0722149 1068
3aee8918
PA
1069static struct regsets_info x86_regsets_info =
1070 {
1071 x86_regsets, /* regsets */
1072 0, /* num_regsets */
1073 NULL, /* disabled_regsets */
1074 };
214d508e
L
1075
1076#ifdef __x86_64__
3aee8918
PA
1077static struct regs_info amd64_linux_regs_info =
1078 {
1079 NULL, /* regset_bitmap */
1080 NULL, /* usrregs_info */
1081 &x86_regsets_info
1082 };
d0722149 1083#endif
3aee8918
PA
1084static struct usrregs_info i386_linux_usrregs_info =
1085 {
1086 I386_NUM_REGS,
1087 i386_regmap,
1088 };
d0722149 1089
3aee8918
PA
1090static struct regs_info i386_linux_regs_info =
1091 {
1092 NULL, /* regset_bitmap */
1093 &i386_linux_usrregs_info,
1094 &x86_regsets_info
1095 };
d0722149 1096
aa8d21c9
TBA
1097const regs_info *
1098x86_target::get_regs_info ()
3aee8918
PA
1099{
1100#ifdef __x86_64__
1101 if (is_64bit_tdesc ())
1102 return &amd64_linux_regs_info;
1103 else
1104#endif
1105 return &i386_linux_regs_info;
1106}
d0722149 1107
3aee8918
PA
1108/* Initialize the target description for the architecture of the
1109 inferior. */
1570b33e 1110
797bcff5
TBA
1111void
1112x86_target::low_arch_setup ()
3aee8918
PA
1113{
1114 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1115}
1116
9eedd27d
TBA
1117bool
1118x86_target::low_supports_catch_syscall ()
1119{
1120 return true;
1121}
1122
82075af2
JS
1123/* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1124 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1125
9eedd27d
TBA
1126void
1127x86_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
82075af2
JS
1128{
1129 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1130
1131 if (use_64bit)
1132 {
1133 long l_sysno;
82075af2
JS
1134
1135 collect_register_by_name (regcache, "orig_rax", &l_sysno);
82075af2 1136 *sysno = (int) l_sysno;
82075af2
JS
1137 }
1138 else
4cc32bec 1139 collect_register_by_name (regcache, "orig_eax", sysno);
82075af2
JS
1140}
1141
47f70aa7
TBA
1142bool
1143x86_target::supports_tracepoints ()
219f2f23 1144{
47f70aa7 1145 return true;
219f2f23
PA
1146}
1147
fa593d66
PA
1148static void
1149append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1150{
4196ab2a 1151 target_write_memory (*to, buf, len);
fa593d66
PA
1152 *to += len;
1153}
1154
1155static int
a121b7c1 1156push_opcode (unsigned char *buf, const char *op)
fa593d66
PA
1157{
1158 unsigned char *buf_org = buf;
1159
1160 while (1)
1161 {
1162 char *endptr;
1163 unsigned long ul = strtoul (op, &endptr, 16);
1164
1165 if (endptr == op)
1166 break;
1167
1168 *buf++ = ul;
1169 op = endptr;
1170 }
1171
1172 return buf - buf_org;
1173}
1174
1175#ifdef __x86_64__
1176
1177/* Build a jump pad that saves registers and calls a collection
1178 function. Writes a jump instruction to the jump pad to
1179 JJUMPAD_INSN. The caller is responsible to write it in at the
1180 tracepoint address. */
1181
1182static int
1183amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1184 CORE_ADDR collector,
1185 CORE_ADDR lockaddr,
1186 ULONGEST orig_size,
1187 CORE_ADDR *jump_entry,
405f8e94
SS
1188 CORE_ADDR *trampoline,
1189 ULONGEST *trampoline_size,
fa593d66
PA
1190 unsigned char *jjump_pad_insn,
1191 ULONGEST *jjump_pad_insn_size,
1192 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1193 CORE_ADDR *adjusted_insn_addr_end,
1194 char *err)
fa593d66
PA
1195{
1196 unsigned char buf[40];
1197 int i, offset;
f4647387
YQ
1198 int64_t loffset;
1199
fa593d66
PA
1200 CORE_ADDR buildaddr = *jump_entry;
1201
1202 /* Build the jump pad. */
1203
1204 /* First, do tracepoint data collection. Save registers. */
1205 i = 0;
1206 /* Need to ensure stack pointer saved first. */
1207 buf[i++] = 0x54; /* push %rsp */
1208 buf[i++] = 0x55; /* push %rbp */
1209 buf[i++] = 0x57; /* push %rdi */
1210 buf[i++] = 0x56; /* push %rsi */
1211 buf[i++] = 0x52; /* push %rdx */
1212 buf[i++] = 0x51; /* push %rcx */
1213 buf[i++] = 0x53; /* push %rbx */
1214 buf[i++] = 0x50; /* push %rax */
1215 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1216 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1217 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1218 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1219 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1220 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1221 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1222 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1223 buf[i++] = 0x9c; /* pushfq */
c8ef42ee 1224 buf[i++] = 0x48; /* movabs <addr>,%rdi */
fa593d66 1225 buf[i++] = 0xbf;
c8ef42ee
PA
1226 memcpy (buf + i, &tpaddr, 8);
1227 i += 8;
fa593d66
PA
1228 buf[i++] = 0x57; /* push %rdi */
1229 append_insns (&buildaddr, i, buf);
1230
1231 /* Stack space for the collecting_t object. */
1232 i = 0;
1233 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1234 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1235 memcpy (buf + i, &tpoint, 8);
1236 i += 8;
1237 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1238 i += push_opcode (&buf[i],
1239 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1240 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1241 append_insns (&buildaddr, i, buf);
1242
1243 /* spin-lock. */
1244 i = 0;
1245 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1246 memcpy (&buf[i], (void *) &lockaddr, 8);
1247 i += 8;
1248 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1249 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1250 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1251 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1252 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1253 append_insns (&buildaddr, i, buf);
1254
1255 /* Set up the gdb_collect call. */
1256 /* At this point, (stack pointer + 0x18) is the base of our saved
1257 register block. */
1258
1259 i = 0;
1260 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1261 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1262
1263 /* tpoint address may be 64-bit wide. */
1264 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1265 memcpy (buf + i, &tpoint, 8);
1266 i += 8;
1267 append_insns (&buildaddr, i, buf);
1268
1269 /* The collector function being in the shared library, may be
1270 >31-bits away off the jump pad. */
1271 i = 0;
1272 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1273 memcpy (buf + i, &collector, 8);
1274 i += 8;
1275 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1276 append_insns (&buildaddr, i, buf);
1277
1278 /* Clear the spin-lock. */
1279 i = 0;
1280 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1281 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1282 memcpy (buf + i, &lockaddr, 8);
1283 i += 8;
1284 append_insns (&buildaddr, i, buf);
1285
1286 /* Remove stack that had been used for the collect_t object. */
1287 i = 0;
1288 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1289 append_insns (&buildaddr, i, buf);
1290
1291 /* Restore register state. */
1292 i = 0;
1293 buf[i++] = 0x48; /* add $0x8,%rsp */
1294 buf[i++] = 0x83;
1295 buf[i++] = 0xc4;
1296 buf[i++] = 0x08;
1297 buf[i++] = 0x9d; /* popfq */
1298 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1299 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1300 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1301 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1302 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1303 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1304 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1305 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1306 buf[i++] = 0x58; /* pop %rax */
1307 buf[i++] = 0x5b; /* pop %rbx */
1308 buf[i++] = 0x59; /* pop %rcx */
1309 buf[i++] = 0x5a; /* pop %rdx */
1310 buf[i++] = 0x5e; /* pop %rsi */
1311 buf[i++] = 0x5f; /* pop %rdi */
1312 buf[i++] = 0x5d; /* pop %rbp */
1313 buf[i++] = 0x5c; /* pop %rsp */
1314 append_insns (&buildaddr, i, buf);
1315
1316 /* Now, adjust the original instruction to execute in the jump
1317 pad. */
1318 *adjusted_insn_addr = buildaddr;
1319 relocate_instruction (&buildaddr, tpaddr);
1320 *adjusted_insn_addr_end = buildaddr;
1321
1322 /* Finally, write a jump back to the program. */
f4647387
YQ
1323
1324 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1325 if (loffset > INT_MAX || loffset < INT_MIN)
1326 {
1327 sprintf (err,
1328 "E.Jump back from jump pad too far from tracepoint "
1329 "(offset 0x%" PRIx64 " > int32).", loffset);
1330 return 1;
1331 }
1332
1333 offset = (int) loffset;
fa593d66
PA
1334 memcpy (buf, jump_insn, sizeof (jump_insn));
1335 memcpy (buf + 1, &offset, 4);
1336 append_insns (&buildaddr, sizeof (jump_insn), buf);
1337
1338 /* The jump pad is now built. Wire in a jump to our jump pad. This
1339 is always done last (by our caller actually), so that we can
1340 install fast tracepoints with threads running. This relies on
1341 the agent's atomic write support. */
f4647387
YQ
1342 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1343 if (loffset > INT_MAX || loffset < INT_MIN)
1344 {
1345 sprintf (err,
1346 "E.Jump pad too far from tracepoint "
1347 "(offset 0x%" PRIx64 " > int32).", loffset);
1348 return 1;
1349 }
1350
1351 offset = (int) loffset;
1352
fa593d66
PA
1353 memcpy (buf, jump_insn, sizeof (jump_insn));
1354 memcpy (buf + 1, &offset, 4);
1355 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1356 *jjump_pad_insn_size = sizeof (jump_insn);
1357
1358 /* Return the end address of our pad. */
1359 *jump_entry = buildaddr;
1360
1361 return 0;
1362}
1363
1364#endif /* __x86_64__ */
1365
1366/* Build a jump pad that saves registers and calls a collection
1367 function. Writes a jump instruction to the jump pad to
1368 JJUMPAD_INSN. The caller is responsible to write it in at the
1369 tracepoint address. */
1370
1371static int
1372i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1373 CORE_ADDR collector,
1374 CORE_ADDR lockaddr,
1375 ULONGEST orig_size,
1376 CORE_ADDR *jump_entry,
405f8e94
SS
1377 CORE_ADDR *trampoline,
1378 ULONGEST *trampoline_size,
fa593d66
PA
1379 unsigned char *jjump_pad_insn,
1380 ULONGEST *jjump_pad_insn_size,
1381 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1382 CORE_ADDR *adjusted_insn_addr_end,
1383 char *err)
fa593d66
PA
1384{
1385 unsigned char buf[0x100];
1386 int i, offset;
1387 CORE_ADDR buildaddr = *jump_entry;
1388
1389 /* Build the jump pad. */
1390
1391 /* First, do tracepoint data collection. Save registers. */
1392 i = 0;
1393 buf[i++] = 0x60; /* pushad */
1394 buf[i++] = 0x68; /* push tpaddr aka $pc */
1395 *((int *)(buf + i)) = (int) tpaddr;
1396 i += 4;
1397 buf[i++] = 0x9c; /* pushf */
1398 buf[i++] = 0x1e; /* push %ds */
1399 buf[i++] = 0x06; /* push %es */
1400 buf[i++] = 0x0f; /* push %fs */
1401 buf[i++] = 0xa0;
1402 buf[i++] = 0x0f; /* push %gs */
1403 buf[i++] = 0xa8;
1404 buf[i++] = 0x16; /* push %ss */
1405 buf[i++] = 0x0e; /* push %cs */
1406 append_insns (&buildaddr, i, buf);
1407
1408 /* Stack space for the collecting_t object. */
1409 i = 0;
1410 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1411
1412 /* Build the object. */
1413 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1414 memcpy (buf + i, &tpoint, 4);
1415 i += 4;
1416 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1417
1418 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1419 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1420 append_insns (&buildaddr, i, buf);
1421
1422 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1423 If we cared for it, this could be using xchg alternatively. */
1424
1425 i = 0;
1426 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1427 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1428 %esp,<lockaddr> */
1429 memcpy (&buf[i], (void *) &lockaddr, 4);
1430 i += 4;
1431 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1432 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1433 append_insns (&buildaddr, i, buf);
1434
1435
1436 /* Set up arguments to the gdb_collect call. */
1437 i = 0;
1438 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1439 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1440 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1441 append_insns (&buildaddr, i, buf);
1442
1443 i = 0;
1444 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1445 append_insns (&buildaddr, i, buf);
1446
1447 i = 0;
1448 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1449 memcpy (&buf[i], (void *) &tpoint, 4);
1450 i += 4;
1451 append_insns (&buildaddr, i, buf);
1452
1453 buf[0] = 0xe8; /* call <reladdr> */
1454 offset = collector - (buildaddr + sizeof (jump_insn));
1455 memcpy (buf + 1, &offset, 4);
1456 append_insns (&buildaddr, 5, buf);
1457 /* Clean up after the call. */
1458 buf[0] = 0x83; /* add $0x8,%esp */
1459 buf[1] = 0xc4;
1460 buf[2] = 0x08;
1461 append_insns (&buildaddr, 3, buf);
1462
1463
1464 /* Clear the spin-lock. This would need the LOCK prefix on older
1465 broken archs. */
1466 i = 0;
1467 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1468 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1469 memcpy (buf + i, &lockaddr, 4);
1470 i += 4;
1471 append_insns (&buildaddr, i, buf);
1472
1473
1474 /* Remove stack that had been used for the collect_t object. */
1475 i = 0;
1476 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1477 append_insns (&buildaddr, i, buf);
1478
1479 i = 0;
1480 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1481 buf[i++] = 0xc4;
1482 buf[i++] = 0x04;
1483 buf[i++] = 0x17; /* pop %ss */
1484 buf[i++] = 0x0f; /* pop %gs */
1485 buf[i++] = 0xa9;
1486 buf[i++] = 0x0f; /* pop %fs */
1487 buf[i++] = 0xa1;
1488 buf[i++] = 0x07; /* pop %es */
405f8e94 1489 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1490 buf[i++] = 0x9d; /* popf */
1491 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1492 buf[i++] = 0xc4;
1493 buf[i++] = 0x04;
1494 buf[i++] = 0x61; /* popad */
1495 append_insns (&buildaddr, i, buf);
1496
1497 /* Now, adjust the original instruction to execute in the jump
1498 pad. */
1499 *adjusted_insn_addr = buildaddr;
1500 relocate_instruction (&buildaddr, tpaddr);
1501 *adjusted_insn_addr_end = buildaddr;
1502
1503 /* Write the jump back to the program. */
1504 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1505 memcpy (buf, jump_insn, sizeof (jump_insn));
1506 memcpy (buf + 1, &offset, 4);
1507 append_insns (&buildaddr, sizeof (jump_insn), buf);
1508
1509 /* The jump pad is now built. Wire in a jump to our jump pad. This
1510 is always done last (by our caller actually), so that we can
1511 install fast tracepoints with threads running. This relies on
1512 the agent's atomic write support. */
405f8e94
SS
1513 if (orig_size == 4)
1514 {
1515 /* Create a trampoline. */
1516 *trampoline_size = sizeof (jump_insn);
1517 if (!claim_trampoline_space (*trampoline_size, trampoline))
1518 {
1519 /* No trampoline space available. */
1520 strcpy (err,
1521 "E.Cannot allocate trampoline space needed for fast "
1522 "tracepoints on 4-byte instructions.");
1523 return 1;
1524 }
1525
1526 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1527 memcpy (buf, jump_insn, sizeof (jump_insn));
1528 memcpy (buf + 1, &offset, 4);
4196ab2a 1529 target_write_memory (*trampoline, buf, sizeof (jump_insn));
405f8e94
SS
1530
1531 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1532 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1533 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1534 memcpy (buf + 2, &offset, 2);
1535 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1536 *jjump_pad_insn_size = sizeof (small_jump_insn);
1537 }
1538 else
1539 {
1540 /* Else use a 32-bit relative jump instruction. */
1541 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1542 memcpy (buf, jump_insn, sizeof (jump_insn));
1543 memcpy (buf + 1, &offset, 4);
1544 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1545 *jjump_pad_insn_size = sizeof (jump_insn);
1546 }
fa593d66
PA
1547
1548 /* Return the end address of our pad. */
1549 *jump_entry = buildaddr;
1550
1551 return 0;
1552}
1553
809a0c35
TBA
1554bool
1555x86_target::supports_fast_tracepoints ()
1556{
1557 return true;
1558}
1559
1560int
1561x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1562 CORE_ADDR tpaddr,
1563 CORE_ADDR collector,
1564 CORE_ADDR lockaddr,
1565 ULONGEST orig_size,
1566 CORE_ADDR *jump_entry,
1567 CORE_ADDR *trampoline,
1568 ULONGEST *trampoline_size,
1569 unsigned char *jjump_pad_insn,
1570 ULONGEST *jjump_pad_insn_size,
1571 CORE_ADDR *adjusted_insn_addr,
1572 CORE_ADDR *adjusted_insn_addr_end,
1573 char *err)
fa593d66
PA
1574{
1575#ifdef __x86_64__
3aee8918 1576 if (is_64bit_tdesc ())
fa593d66
PA
1577 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1578 collector, lockaddr,
1579 orig_size, jump_entry,
405f8e94 1580 trampoline, trampoline_size,
fa593d66
PA
1581 jjump_pad_insn,
1582 jjump_pad_insn_size,
1583 adjusted_insn_addr,
405f8e94
SS
1584 adjusted_insn_addr_end,
1585 err);
fa593d66
PA
1586#endif
1587
1588 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1589 collector, lockaddr,
1590 orig_size, jump_entry,
405f8e94 1591 trampoline, trampoline_size,
fa593d66
PA
1592 jjump_pad_insn,
1593 jjump_pad_insn_size,
1594 adjusted_insn_addr,
405f8e94
SS
1595 adjusted_insn_addr_end,
1596 err);
1597}
1598
1599/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1600 architectures. */
1601
809a0c35
TBA
1602int
1603x86_target::get_min_fast_tracepoint_insn_len ()
405f8e94
SS
1604{
1605 static int warned_about_fast_tracepoints = 0;
1606
1607#ifdef __x86_64__
1608 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1609 used for fast tracepoints. */
3aee8918 1610 if (is_64bit_tdesc ())
405f8e94
SS
1611 return 5;
1612#endif
1613
58b4daa5 1614 if (agent_loaded_p ())
405f8e94
SS
1615 {
1616 char errbuf[IPA_BUFSIZ];
1617
1618 errbuf[0] = '\0';
1619
1620 /* On x86, if trampolines are available, then 4-byte jump instructions
1621 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1622 with a 4-byte offset are used instead. */
1623 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1624 return 4;
1625 else
1626 {
1627 /* GDB has no channel to explain to user why a shorter fast
1628 tracepoint is not possible, but at least make GDBserver
1629 mention that something has gone awry. */
1630 if (!warned_about_fast_tracepoints)
1631 {
422186a9 1632 warning ("4-byte fast tracepoints not available; %s", errbuf);
405f8e94
SS
1633 warned_about_fast_tracepoints = 1;
1634 }
1635 return 5;
1636 }
1637 }
1638 else
1639 {
1640 /* Indicate that the minimum length is currently unknown since the IPA
1641 has not loaded yet. */
1642 return 0;
1643 }
fa593d66
PA
1644}
1645
6a271cae
PA
1646static void
1647add_insns (unsigned char *start, int len)
1648{
1649 CORE_ADDR buildaddr = current_insn_ptr;
1650
1651 if (debug_threads)
87ce2a04
DE
1652 debug_printf ("Adding %d bytes of insn at %s\n",
1653 len, paddress (buildaddr));
6a271cae
PA
1654
1655 append_insns (&buildaddr, len, start);
1656 current_insn_ptr = buildaddr;
1657}
1658
6a271cae
PA
1659/* Our general strategy for emitting code is to avoid specifying raw
1660 bytes whenever possible, and instead copy a block of inline asm
1661 that is embedded in the function. This is a little messy, because
1662 we need to keep the compiler from discarding what looks like dead
1663 code, plus suppress various warnings. */
1664
9e4344e5
PA
1665#define EMIT_ASM(NAME, INSNS) \
1666 do \
1667 { \
1668 extern unsigned char start_ ## NAME, end_ ## NAME; \
1669 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1670 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1671 "\t" "start_" #NAME ":" \
1672 "\t" INSNS "\n" \
1673 "\t" "end_" #NAME ":"); \
1674 } while (0)
6a271cae
PA
1675
1676#ifdef __x86_64__
1677
1678#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1679 do \
1680 { \
1681 extern unsigned char start_ ## NAME, end_ ## NAME; \
1682 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1683 __asm__ (".code32\n" \
1684 "\t" "jmp end_" #NAME "\n" \
1685 "\t" "start_" #NAME ":\n" \
1686 "\t" INSNS "\n" \
1687 "\t" "end_" #NAME ":\n" \
1688 ".code64\n"); \
1689 } while (0)
6a271cae
PA
1690
1691#else
1692
1693#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1694
1695#endif
1696
1697#ifdef __x86_64__
1698
1699static void
1700amd64_emit_prologue (void)
1701{
1702 EMIT_ASM (amd64_prologue,
1703 "pushq %rbp\n\t"
1704 "movq %rsp,%rbp\n\t"
1705 "sub $0x20,%rsp\n\t"
1706 "movq %rdi,-8(%rbp)\n\t"
1707 "movq %rsi,-16(%rbp)");
1708}
1709
1710
1711static void
1712amd64_emit_epilogue (void)
1713{
1714 EMIT_ASM (amd64_epilogue,
1715 "movq -16(%rbp),%rdi\n\t"
1716 "movq %rax,(%rdi)\n\t"
1717 "xor %rax,%rax\n\t"
1718 "leave\n\t"
1719 "ret");
1720}
1721
1722static void
1723amd64_emit_add (void)
1724{
1725 EMIT_ASM (amd64_add,
1726 "add (%rsp),%rax\n\t"
1727 "lea 0x8(%rsp),%rsp");
1728}
1729
1730static void
1731amd64_emit_sub (void)
1732{
1733 EMIT_ASM (amd64_sub,
1734 "sub %rax,(%rsp)\n\t"
1735 "pop %rax");
1736}
1737
1738static void
1739amd64_emit_mul (void)
1740{
1741 emit_error = 1;
1742}
1743
1744static void
1745amd64_emit_lsh (void)
1746{
1747 emit_error = 1;
1748}
1749
1750static void
1751amd64_emit_rsh_signed (void)
1752{
1753 emit_error = 1;
1754}
1755
1756static void
1757amd64_emit_rsh_unsigned (void)
1758{
1759 emit_error = 1;
1760}
1761
1762static void
1763amd64_emit_ext (int arg)
1764{
1765 switch (arg)
1766 {
1767 case 8:
1768 EMIT_ASM (amd64_ext_8,
1769 "cbtw\n\t"
1770 "cwtl\n\t"
1771 "cltq");
1772 break;
1773 case 16:
1774 EMIT_ASM (amd64_ext_16,
1775 "cwtl\n\t"
1776 "cltq");
1777 break;
1778 case 32:
1779 EMIT_ASM (amd64_ext_32,
1780 "cltq");
1781 break;
1782 default:
1783 emit_error = 1;
1784 }
1785}
1786
1787static void
1788amd64_emit_log_not (void)
1789{
1790 EMIT_ASM (amd64_log_not,
1791 "test %rax,%rax\n\t"
1792 "sete %cl\n\t"
1793 "movzbq %cl,%rax");
1794}
1795
1796static void
1797amd64_emit_bit_and (void)
1798{
1799 EMIT_ASM (amd64_and,
1800 "and (%rsp),%rax\n\t"
1801 "lea 0x8(%rsp),%rsp");
1802}
1803
1804static void
1805amd64_emit_bit_or (void)
1806{
1807 EMIT_ASM (amd64_or,
1808 "or (%rsp),%rax\n\t"
1809 "lea 0x8(%rsp),%rsp");
1810}
1811
1812static void
1813amd64_emit_bit_xor (void)
1814{
1815 EMIT_ASM (amd64_xor,
1816 "xor (%rsp),%rax\n\t"
1817 "lea 0x8(%rsp),%rsp");
1818}
1819
1820static void
1821amd64_emit_bit_not (void)
1822{
1823 EMIT_ASM (amd64_bit_not,
1824 "xorq $0xffffffffffffffff,%rax");
1825}
1826
1827static void
1828amd64_emit_equal (void)
1829{
1830 EMIT_ASM (amd64_equal,
1831 "cmp %rax,(%rsp)\n\t"
1832 "je .Lamd64_equal_true\n\t"
1833 "xor %rax,%rax\n\t"
1834 "jmp .Lamd64_equal_end\n\t"
1835 ".Lamd64_equal_true:\n\t"
1836 "mov $0x1,%rax\n\t"
1837 ".Lamd64_equal_end:\n\t"
1838 "lea 0x8(%rsp),%rsp");
1839}
1840
1841static void
1842amd64_emit_less_signed (void)
1843{
1844 EMIT_ASM (amd64_less_signed,
1845 "cmp %rax,(%rsp)\n\t"
1846 "jl .Lamd64_less_signed_true\n\t"
1847 "xor %rax,%rax\n\t"
1848 "jmp .Lamd64_less_signed_end\n\t"
1849 ".Lamd64_less_signed_true:\n\t"
1850 "mov $1,%rax\n\t"
1851 ".Lamd64_less_signed_end:\n\t"
1852 "lea 0x8(%rsp),%rsp");
1853}
1854
1855static void
1856amd64_emit_less_unsigned (void)
1857{
1858 EMIT_ASM (amd64_less_unsigned,
1859 "cmp %rax,(%rsp)\n\t"
1860 "jb .Lamd64_less_unsigned_true\n\t"
1861 "xor %rax,%rax\n\t"
1862 "jmp .Lamd64_less_unsigned_end\n\t"
1863 ".Lamd64_less_unsigned_true:\n\t"
1864 "mov $1,%rax\n\t"
1865 ".Lamd64_less_unsigned_end:\n\t"
1866 "lea 0x8(%rsp),%rsp");
1867}
1868
1869static void
1870amd64_emit_ref (int size)
1871{
1872 switch (size)
1873 {
1874 case 1:
1875 EMIT_ASM (amd64_ref1,
1876 "movb (%rax),%al");
1877 break;
1878 case 2:
1879 EMIT_ASM (amd64_ref2,
1880 "movw (%rax),%ax");
1881 break;
1882 case 4:
1883 EMIT_ASM (amd64_ref4,
1884 "movl (%rax),%eax");
1885 break;
1886 case 8:
1887 EMIT_ASM (amd64_ref8,
1888 "movq (%rax),%rax");
1889 break;
1890 }
1891}
1892
1893static void
1894amd64_emit_if_goto (int *offset_p, int *size_p)
1895{
1896 EMIT_ASM (amd64_if_goto,
1897 "mov %rax,%rcx\n\t"
1898 "pop %rax\n\t"
1899 "cmp $0,%rcx\n\t"
1900 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1901 if (offset_p)
1902 *offset_p = 10;
1903 if (size_p)
1904 *size_p = 4;
1905}
1906
1907static void
1908amd64_emit_goto (int *offset_p, int *size_p)
1909{
1910 EMIT_ASM (amd64_goto,
1911 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1912 if (offset_p)
1913 *offset_p = 1;
1914 if (size_p)
1915 *size_p = 4;
1916}
1917
1918static void
1919amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1920{
1921 int diff = (to - (from + size));
1922 unsigned char buf[sizeof (int)];
1923
1924 if (size != 4)
1925 {
1926 emit_error = 1;
1927 return;
1928 }
1929
1930 memcpy (buf, &diff, sizeof (int));
4196ab2a 1931 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
1932}
1933
1934static void
4e29fb54 1935amd64_emit_const (LONGEST num)
6a271cae
PA
1936{
1937 unsigned char buf[16];
1938 int i;
1939 CORE_ADDR buildaddr = current_insn_ptr;
1940
1941 i = 0;
1942 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 1943 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
1944 i += 8;
1945 append_insns (&buildaddr, i, buf);
1946 current_insn_ptr = buildaddr;
1947}
1948
1949static void
1950amd64_emit_call (CORE_ADDR fn)
1951{
1952 unsigned char buf[16];
1953 int i;
1954 CORE_ADDR buildaddr;
4e29fb54 1955 LONGEST offset64;
6a271cae
PA
1956
1957 /* The destination function being in the shared library, may be
1958 >31-bits away off the compiled code pad. */
1959
1960 buildaddr = current_insn_ptr;
1961
1962 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1963
1964 i = 0;
1965
1966 if (offset64 > INT_MAX || offset64 < INT_MIN)
1967 {
1968 /* Offset is too large for a call. Use callq, but that requires
1969 a register, so avoid it if possible. Use r10, since it is
1970 call-clobbered, we don't have to push/pop it. */
1971 buf[i++] = 0x48; /* mov $fn,%r10 */
1972 buf[i++] = 0xba;
1973 memcpy (buf + i, &fn, 8);
1974 i += 8;
1975 buf[i++] = 0xff; /* callq *%r10 */
1976 buf[i++] = 0xd2;
1977 }
1978 else
1979 {
1980 int offset32 = offset64; /* we know we can't overflow here. */
ed036b40
PA
1981
1982 buf[i++] = 0xe8; /* call <reladdr> */
6a271cae
PA
1983 memcpy (buf + i, &offset32, 4);
1984 i += 4;
1985 }
1986
1987 append_insns (&buildaddr, i, buf);
1988 current_insn_ptr = buildaddr;
1989}
1990
1991static void
1992amd64_emit_reg (int reg)
1993{
1994 unsigned char buf[16];
1995 int i;
1996 CORE_ADDR buildaddr;
1997
1998 /* Assume raw_regs is still in %rdi. */
1999 buildaddr = current_insn_ptr;
2000 i = 0;
2001 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 2002 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2003 i += 4;
2004 append_insns (&buildaddr, i, buf);
2005 current_insn_ptr = buildaddr;
2006 amd64_emit_call (get_raw_reg_func_addr ());
2007}
2008
2009static void
2010amd64_emit_pop (void)
2011{
2012 EMIT_ASM (amd64_pop,
2013 "pop %rax");
2014}
2015
2016static void
2017amd64_emit_stack_flush (void)
2018{
2019 EMIT_ASM (amd64_stack_flush,
2020 "push %rax");
2021}
2022
2023static void
2024amd64_emit_zero_ext (int arg)
2025{
2026 switch (arg)
2027 {
2028 case 8:
2029 EMIT_ASM (amd64_zero_ext_8,
2030 "and $0xff,%rax");
2031 break;
2032 case 16:
2033 EMIT_ASM (amd64_zero_ext_16,
2034 "and $0xffff,%rax");
2035 break;
2036 case 32:
2037 EMIT_ASM (amd64_zero_ext_32,
2038 "mov $0xffffffff,%rcx\n\t"
2039 "and %rcx,%rax");
2040 break;
2041 default:
2042 emit_error = 1;
2043 }
2044}
2045
2046static void
2047amd64_emit_swap (void)
2048{
2049 EMIT_ASM (amd64_swap,
2050 "mov %rax,%rcx\n\t"
2051 "pop %rax\n\t"
2052 "push %rcx");
2053}
2054
2055static void
2056amd64_emit_stack_adjust (int n)
2057{
2058 unsigned char buf[16];
2059 int i;
2060 CORE_ADDR buildaddr = current_insn_ptr;
2061
2062 i = 0;
2063 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2064 buf[i++] = 0x8d;
2065 buf[i++] = 0x64;
2066 buf[i++] = 0x24;
2067 /* This only handles adjustments up to 16, but we don't expect any more. */
2068 buf[i++] = n * 8;
2069 append_insns (&buildaddr, i, buf);
2070 current_insn_ptr = buildaddr;
2071}
2072
2073/* FN's prototype is `LONGEST(*fn)(int)'. */
2074
2075static void
2076amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2077{
2078 unsigned char buf[16];
2079 int i;
2080 CORE_ADDR buildaddr;
2081
2082 buildaddr = current_insn_ptr;
2083 i = 0;
2084 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2085 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2086 i += 4;
2087 append_insns (&buildaddr, i, buf);
2088 current_insn_ptr = buildaddr;
2089 amd64_emit_call (fn);
2090}
2091
4e29fb54 2092/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2093
2094static void
2095amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2096{
2097 unsigned char buf[16];
2098 int i;
2099 CORE_ADDR buildaddr;
2100
2101 buildaddr = current_insn_ptr;
2102 i = 0;
2103 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2104 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2105 i += 4;
2106 append_insns (&buildaddr, i, buf);
2107 current_insn_ptr = buildaddr;
2108 EMIT_ASM (amd64_void_call_2_a,
2109 /* Save away a copy of the stack top. */
2110 "push %rax\n\t"
2111 /* Also pass top as the second argument. */
2112 "mov %rax,%rsi");
2113 amd64_emit_call (fn);
2114 EMIT_ASM (amd64_void_call_2_b,
2115 /* Restore the stack top, %rax may have been trashed. */
2116 "pop %rax");
2117}
2118
df4a0200 2119static void
6b9801d4
SS
2120amd64_emit_eq_goto (int *offset_p, int *size_p)
2121{
2122 EMIT_ASM (amd64_eq,
2123 "cmp %rax,(%rsp)\n\t"
2124 "jne .Lamd64_eq_fallthru\n\t"
2125 "lea 0x8(%rsp),%rsp\n\t"
2126 "pop %rax\n\t"
2127 /* jmp, but don't trust the assembler to choose the right jump */
2128 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2129 ".Lamd64_eq_fallthru:\n\t"
2130 "lea 0x8(%rsp),%rsp\n\t"
2131 "pop %rax");
2132
2133 if (offset_p)
2134 *offset_p = 13;
2135 if (size_p)
2136 *size_p = 4;
2137}
2138
df4a0200 2139static void
6b9801d4
SS
2140amd64_emit_ne_goto (int *offset_p, int *size_p)
2141{
2142 EMIT_ASM (amd64_ne,
2143 "cmp %rax,(%rsp)\n\t"
2144 "je .Lamd64_ne_fallthru\n\t"
2145 "lea 0x8(%rsp),%rsp\n\t"
2146 "pop %rax\n\t"
2147 /* jmp, but don't trust the assembler to choose the right jump */
2148 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2149 ".Lamd64_ne_fallthru:\n\t"
2150 "lea 0x8(%rsp),%rsp\n\t"
2151 "pop %rax");
2152
2153 if (offset_p)
2154 *offset_p = 13;
2155 if (size_p)
2156 *size_p = 4;
2157}
2158
df4a0200 2159static void
6b9801d4
SS
2160amd64_emit_lt_goto (int *offset_p, int *size_p)
2161{
2162 EMIT_ASM (amd64_lt,
2163 "cmp %rax,(%rsp)\n\t"
2164 "jnl .Lamd64_lt_fallthru\n\t"
2165 "lea 0x8(%rsp),%rsp\n\t"
2166 "pop %rax\n\t"
2167 /* jmp, but don't trust the assembler to choose the right jump */
2168 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2169 ".Lamd64_lt_fallthru:\n\t"
2170 "lea 0x8(%rsp),%rsp\n\t"
2171 "pop %rax");
2172
2173 if (offset_p)
2174 *offset_p = 13;
2175 if (size_p)
2176 *size_p = 4;
2177}
2178
df4a0200 2179static void
6b9801d4
SS
2180amd64_emit_le_goto (int *offset_p, int *size_p)
2181{
2182 EMIT_ASM (amd64_le,
2183 "cmp %rax,(%rsp)\n\t"
2184 "jnle .Lamd64_le_fallthru\n\t"
2185 "lea 0x8(%rsp),%rsp\n\t"
2186 "pop %rax\n\t"
2187 /* jmp, but don't trust the assembler to choose the right jump */
2188 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2189 ".Lamd64_le_fallthru:\n\t"
2190 "lea 0x8(%rsp),%rsp\n\t"
2191 "pop %rax");
2192
2193 if (offset_p)
2194 *offset_p = 13;
2195 if (size_p)
2196 *size_p = 4;
2197}
2198
df4a0200 2199static void
6b9801d4
SS
2200amd64_emit_gt_goto (int *offset_p, int *size_p)
2201{
2202 EMIT_ASM (amd64_gt,
2203 "cmp %rax,(%rsp)\n\t"
2204 "jng .Lamd64_gt_fallthru\n\t"
2205 "lea 0x8(%rsp),%rsp\n\t"
2206 "pop %rax\n\t"
2207 /* jmp, but don't trust the assembler to choose the right jump */
2208 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2209 ".Lamd64_gt_fallthru:\n\t"
2210 "lea 0x8(%rsp),%rsp\n\t"
2211 "pop %rax");
2212
2213 if (offset_p)
2214 *offset_p = 13;
2215 if (size_p)
2216 *size_p = 4;
2217}
2218
df4a0200 2219static void
6b9801d4
SS
2220amd64_emit_ge_goto (int *offset_p, int *size_p)
2221{
2222 EMIT_ASM (amd64_ge,
2223 "cmp %rax,(%rsp)\n\t"
2224 "jnge .Lamd64_ge_fallthru\n\t"
2225 ".Lamd64_ge_jump:\n\t"
2226 "lea 0x8(%rsp),%rsp\n\t"
2227 "pop %rax\n\t"
2228 /* jmp, but don't trust the assembler to choose the right jump */
2229 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2230 ".Lamd64_ge_fallthru:\n\t"
2231 "lea 0x8(%rsp),%rsp\n\t"
2232 "pop %rax");
2233
2234 if (offset_p)
2235 *offset_p = 13;
2236 if (size_p)
2237 *size_p = 4;
2238}
2239
6a271cae
PA
2240struct emit_ops amd64_emit_ops =
2241 {
2242 amd64_emit_prologue,
2243 amd64_emit_epilogue,
2244 amd64_emit_add,
2245 amd64_emit_sub,
2246 amd64_emit_mul,
2247 amd64_emit_lsh,
2248 amd64_emit_rsh_signed,
2249 amd64_emit_rsh_unsigned,
2250 amd64_emit_ext,
2251 amd64_emit_log_not,
2252 amd64_emit_bit_and,
2253 amd64_emit_bit_or,
2254 amd64_emit_bit_xor,
2255 amd64_emit_bit_not,
2256 amd64_emit_equal,
2257 amd64_emit_less_signed,
2258 amd64_emit_less_unsigned,
2259 amd64_emit_ref,
2260 amd64_emit_if_goto,
2261 amd64_emit_goto,
2262 amd64_write_goto_address,
2263 amd64_emit_const,
2264 amd64_emit_call,
2265 amd64_emit_reg,
2266 amd64_emit_pop,
2267 amd64_emit_stack_flush,
2268 amd64_emit_zero_ext,
2269 amd64_emit_swap,
2270 amd64_emit_stack_adjust,
2271 amd64_emit_int_call_1,
6b9801d4
SS
2272 amd64_emit_void_call_2,
2273 amd64_emit_eq_goto,
2274 amd64_emit_ne_goto,
2275 amd64_emit_lt_goto,
2276 amd64_emit_le_goto,
2277 amd64_emit_gt_goto,
2278 amd64_emit_ge_goto
6a271cae
PA
2279 };
2280
2281#endif /* __x86_64__ */
2282
2283static void
2284i386_emit_prologue (void)
2285{
2286 EMIT_ASM32 (i386_prologue,
2287 "push %ebp\n\t"
bf15cbda
SS
2288 "mov %esp,%ebp\n\t"
2289 "push %ebx");
6a271cae
PA
2290 /* At this point, the raw regs base address is at 8(%ebp), and the
2291 value pointer is at 12(%ebp). */
2292}
2293
2294static void
2295i386_emit_epilogue (void)
2296{
2297 EMIT_ASM32 (i386_epilogue,
2298 "mov 12(%ebp),%ecx\n\t"
2299 "mov %eax,(%ecx)\n\t"
2300 "mov %ebx,0x4(%ecx)\n\t"
2301 "xor %eax,%eax\n\t"
bf15cbda 2302 "pop %ebx\n\t"
6a271cae
PA
2303 "pop %ebp\n\t"
2304 "ret");
2305}
2306
2307static void
2308i386_emit_add (void)
2309{
2310 EMIT_ASM32 (i386_add,
2311 "add (%esp),%eax\n\t"
2312 "adc 0x4(%esp),%ebx\n\t"
2313 "lea 0x8(%esp),%esp");
2314}
2315
2316static void
2317i386_emit_sub (void)
2318{
2319 EMIT_ASM32 (i386_sub,
2320 "subl %eax,(%esp)\n\t"
2321 "sbbl %ebx,4(%esp)\n\t"
2322 "pop %eax\n\t"
2323 "pop %ebx\n\t");
2324}
2325
2326static void
2327i386_emit_mul (void)
2328{
2329 emit_error = 1;
2330}
2331
2332static void
2333i386_emit_lsh (void)
2334{
2335 emit_error = 1;
2336}
2337
2338static void
2339i386_emit_rsh_signed (void)
2340{
2341 emit_error = 1;
2342}
2343
2344static void
2345i386_emit_rsh_unsigned (void)
2346{
2347 emit_error = 1;
2348}
2349
2350static void
2351i386_emit_ext (int arg)
2352{
2353 switch (arg)
2354 {
2355 case 8:
2356 EMIT_ASM32 (i386_ext_8,
2357 "cbtw\n\t"
2358 "cwtl\n\t"
2359 "movl %eax,%ebx\n\t"
2360 "sarl $31,%ebx");
2361 break;
2362 case 16:
2363 EMIT_ASM32 (i386_ext_16,
2364 "cwtl\n\t"
2365 "movl %eax,%ebx\n\t"
2366 "sarl $31,%ebx");
2367 break;
2368 case 32:
2369 EMIT_ASM32 (i386_ext_32,
2370 "movl %eax,%ebx\n\t"
2371 "sarl $31,%ebx");
2372 break;
2373 default:
2374 emit_error = 1;
2375 }
2376}
2377
2378static void
2379i386_emit_log_not (void)
2380{
2381 EMIT_ASM32 (i386_log_not,
2382 "or %ebx,%eax\n\t"
2383 "test %eax,%eax\n\t"
2384 "sete %cl\n\t"
2385 "xor %ebx,%ebx\n\t"
2386 "movzbl %cl,%eax");
2387}
2388
2389static void
2390i386_emit_bit_and (void)
2391{
2392 EMIT_ASM32 (i386_and,
2393 "and (%esp),%eax\n\t"
2394 "and 0x4(%esp),%ebx\n\t"
2395 "lea 0x8(%esp),%esp");
2396}
2397
2398static void
2399i386_emit_bit_or (void)
2400{
2401 EMIT_ASM32 (i386_or,
2402 "or (%esp),%eax\n\t"
2403 "or 0x4(%esp),%ebx\n\t"
2404 "lea 0x8(%esp),%esp");
2405}
2406
2407static void
2408i386_emit_bit_xor (void)
2409{
2410 EMIT_ASM32 (i386_xor,
2411 "xor (%esp),%eax\n\t"
2412 "xor 0x4(%esp),%ebx\n\t"
2413 "lea 0x8(%esp),%esp");
2414}
2415
2416static void
2417i386_emit_bit_not (void)
2418{
2419 EMIT_ASM32 (i386_bit_not,
2420 "xor $0xffffffff,%eax\n\t"
2421 "xor $0xffffffff,%ebx\n\t");
2422}
2423
2424static void
2425i386_emit_equal (void)
2426{
2427 EMIT_ASM32 (i386_equal,
2428 "cmpl %ebx,4(%esp)\n\t"
2429 "jne .Li386_equal_false\n\t"
2430 "cmpl %eax,(%esp)\n\t"
2431 "je .Li386_equal_true\n\t"
2432 ".Li386_equal_false:\n\t"
2433 "xor %eax,%eax\n\t"
2434 "jmp .Li386_equal_end\n\t"
2435 ".Li386_equal_true:\n\t"
2436 "mov $1,%eax\n\t"
2437 ".Li386_equal_end:\n\t"
2438 "xor %ebx,%ebx\n\t"
2439 "lea 0x8(%esp),%esp");
2440}
2441
2442static void
2443i386_emit_less_signed (void)
2444{
2445 EMIT_ASM32 (i386_less_signed,
2446 "cmpl %ebx,4(%esp)\n\t"
2447 "jl .Li386_less_signed_true\n\t"
2448 "jne .Li386_less_signed_false\n\t"
2449 "cmpl %eax,(%esp)\n\t"
2450 "jl .Li386_less_signed_true\n\t"
2451 ".Li386_less_signed_false:\n\t"
2452 "xor %eax,%eax\n\t"
2453 "jmp .Li386_less_signed_end\n\t"
2454 ".Li386_less_signed_true:\n\t"
2455 "mov $1,%eax\n\t"
2456 ".Li386_less_signed_end:\n\t"
2457 "xor %ebx,%ebx\n\t"
2458 "lea 0x8(%esp),%esp");
2459}
2460
2461static void
2462i386_emit_less_unsigned (void)
2463{
2464 EMIT_ASM32 (i386_less_unsigned,
2465 "cmpl %ebx,4(%esp)\n\t"
2466 "jb .Li386_less_unsigned_true\n\t"
2467 "jne .Li386_less_unsigned_false\n\t"
2468 "cmpl %eax,(%esp)\n\t"
2469 "jb .Li386_less_unsigned_true\n\t"
2470 ".Li386_less_unsigned_false:\n\t"
2471 "xor %eax,%eax\n\t"
2472 "jmp .Li386_less_unsigned_end\n\t"
2473 ".Li386_less_unsigned_true:\n\t"
2474 "mov $1,%eax\n\t"
2475 ".Li386_less_unsigned_end:\n\t"
2476 "xor %ebx,%ebx\n\t"
2477 "lea 0x8(%esp),%esp");
2478}
2479
2480static void
2481i386_emit_ref (int size)
2482{
2483 switch (size)
2484 {
2485 case 1:
2486 EMIT_ASM32 (i386_ref1,
2487 "movb (%eax),%al");
2488 break;
2489 case 2:
2490 EMIT_ASM32 (i386_ref2,
2491 "movw (%eax),%ax");
2492 break;
2493 case 4:
2494 EMIT_ASM32 (i386_ref4,
2495 "movl (%eax),%eax");
2496 break;
2497 case 8:
2498 EMIT_ASM32 (i386_ref8,
2499 "movl 4(%eax),%ebx\n\t"
2500 "movl (%eax),%eax");
2501 break;
2502 }
2503}
2504
2505static void
2506i386_emit_if_goto (int *offset_p, int *size_p)
2507{
2508 EMIT_ASM32 (i386_if_goto,
2509 "mov %eax,%ecx\n\t"
2510 "or %ebx,%ecx\n\t"
2511 "pop %eax\n\t"
2512 "pop %ebx\n\t"
2513 "cmpl $0,%ecx\n\t"
2514 /* Don't trust the assembler to choose the right jump */
2515 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2516
2517 if (offset_p)
2518 *offset_p = 11; /* be sure that this matches the sequence above */
2519 if (size_p)
2520 *size_p = 4;
2521}
2522
2523static void
2524i386_emit_goto (int *offset_p, int *size_p)
2525{
2526 EMIT_ASM32 (i386_goto,
2527 /* Don't trust the assembler to choose the right jump */
2528 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2529 if (offset_p)
2530 *offset_p = 1;
2531 if (size_p)
2532 *size_p = 4;
2533}
2534
2535static void
2536i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2537{
2538 int diff = (to - (from + size));
2539 unsigned char buf[sizeof (int)];
2540
2541 /* We're only doing 4-byte sizes at the moment. */
2542 if (size != 4)
2543 {
2544 emit_error = 1;
2545 return;
2546 }
2547
2548 memcpy (buf, &diff, sizeof (int));
4196ab2a 2549 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
2550}
2551
2552static void
4e29fb54 2553i386_emit_const (LONGEST num)
6a271cae
PA
2554{
2555 unsigned char buf[16];
b00ad6ff 2556 int i, hi, lo;
6a271cae
PA
2557 CORE_ADDR buildaddr = current_insn_ptr;
2558
2559 i = 0;
2560 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2561 lo = num & 0xffffffff;
2562 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2563 i += 4;
2564 hi = ((num >> 32) & 0xffffffff);
2565 if (hi)
2566 {
2567 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2568 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2569 i += 4;
2570 }
2571 else
2572 {
2573 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2574 }
2575 append_insns (&buildaddr, i, buf);
2576 current_insn_ptr = buildaddr;
2577}
2578
2579static void
2580i386_emit_call (CORE_ADDR fn)
2581{
2582 unsigned char buf[16];
2583 int i, offset;
2584 CORE_ADDR buildaddr;
2585
2586 buildaddr = current_insn_ptr;
2587 i = 0;
2588 buf[i++] = 0xe8; /* call <reladdr> */
2589 offset = ((int) fn) - (buildaddr + 5);
2590 memcpy (buf + 1, &offset, 4);
2591 append_insns (&buildaddr, 5, buf);
2592 current_insn_ptr = buildaddr;
2593}
2594
2595static void
2596i386_emit_reg (int reg)
2597{
2598 unsigned char buf[16];
2599 int i;
2600 CORE_ADDR buildaddr;
2601
2602 EMIT_ASM32 (i386_reg_a,
2603 "sub $0x8,%esp");
2604 buildaddr = current_insn_ptr;
2605 i = 0;
2606 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2607 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2608 i += 4;
2609 append_insns (&buildaddr, i, buf);
2610 current_insn_ptr = buildaddr;
2611 EMIT_ASM32 (i386_reg_b,
2612 "mov %eax,4(%esp)\n\t"
2613 "mov 8(%ebp),%eax\n\t"
2614 "mov %eax,(%esp)");
2615 i386_emit_call (get_raw_reg_func_addr ());
2616 EMIT_ASM32 (i386_reg_c,
2617 "xor %ebx,%ebx\n\t"
2618 "lea 0x8(%esp),%esp");
2619}
2620
2621static void
2622i386_emit_pop (void)
2623{
2624 EMIT_ASM32 (i386_pop,
2625 "pop %eax\n\t"
2626 "pop %ebx");
2627}
2628
2629static void
2630i386_emit_stack_flush (void)
2631{
2632 EMIT_ASM32 (i386_stack_flush,
2633 "push %ebx\n\t"
2634 "push %eax");
2635}
2636
2637static void
2638i386_emit_zero_ext (int arg)
2639{
2640 switch (arg)
2641 {
2642 case 8:
2643 EMIT_ASM32 (i386_zero_ext_8,
2644 "and $0xff,%eax\n\t"
2645 "xor %ebx,%ebx");
2646 break;
2647 case 16:
2648 EMIT_ASM32 (i386_zero_ext_16,
2649 "and $0xffff,%eax\n\t"
2650 "xor %ebx,%ebx");
2651 break;
2652 case 32:
2653 EMIT_ASM32 (i386_zero_ext_32,
2654 "xor %ebx,%ebx");
2655 break;
2656 default:
2657 emit_error = 1;
2658 }
2659}
2660
2661static void
2662i386_emit_swap (void)
2663{
2664 EMIT_ASM32 (i386_swap,
2665 "mov %eax,%ecx\n\t"
2666 "mov %ebx,%edx\n\t"
2667 "pop %eax\n\t"
2668 "pop %ebx\n\t"
2669 "push %edx\n\t"
2670 "push %ecx");
2671}
2672
2673static void
2674i386_emit_stack_adjust (int n)
2675{
2676 unsigned char buf[16];
2677 int i;
2678 CORE_ADDR buildaddr = current_insn_ptr;
2679
2680 i = 0;
2681 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2682 buf[i++] = 0x64;
2683 buf[i++] = 0x24;
2684 buf[i++] = n * 8;
2685 append_insns (&buildaddr, i, buf);
2686 current_insn_ptr = buildaddr;
2687}
2688
2689/* FN's prototype is `LONGEST(*fn)(int)'. */
2690
2691static void
2692i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2693{
2694 unsigned char buf[16];
2695 int i;
2696 CORE_ADDR buildaddr;
2697
2698 EMIT_ASM32 (i386_int_call_1_a,
2699 /* Reserve a bit of stack space. */
2700 "sub $0x8,%esp");
2701 /* Put the one argument on the stack. */
2702 buildaddr = current_insn_ptr;
2703 i = 0;
2704 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2705 buf[i++] = 0x04;
2706 buf[i++] = 0x24;
b00ad6ff 2707 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2708 i += 4;
2709 append_insns (&buildaddr, i, buf);
2710 current_insn_ptr = buildaddr;
2711 i386_emit_call (fn);
2712 EMIT_ASM32 (i386_int_call_1_c,
2713 "mov %edx,%ebx\n\t"
2714 "lea 0x8(%esp),%esp");
2715}
2716
4e29fb54 2717/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2718
2719static void
2720i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2721{
2722 unsigned char buf[16];
2723 int i;
2724 CORE_ADDR buildaddr;
2725
2726 EMIT_ASM32 (i386_void_call_2_a,
2727 /* Preserve %eax only; we don't have to worry about %ebx. */
2728 "push %eax\n\t"
2729 /* Reserve a bit of stack space for arguments. */
2730 "sub $0x10,%esp\n\t"
2731 /* Copy "top" to the second argument position. (Note that
2732 we can't assume function won't scribble on its
2733 arguments, so don't try to restore from this.) */
2734 "mov %eax,4(%esp)\n\t"
2735 "mov %ebx,8(%esp)");
2736 /* Put the first argument on the stack. */
2737 buildaddr = current_insn_ptr;
2738 i = 0;
2739 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2740 buf[i++] = 0x04;
2741 buf[i++] = 0x24;
b00ad6ff 2742 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2743 i += 4;
2744 append_insns (&buildaddr, i, buf);
2745 current_insn_ptr = buildaddr;
2746 i386_emit_call (fn);
2747 EMIT_ASM32 (i386_void_call_2_b,
2748 "lea 0x10(%esp),%esp\n\t"
2749 /* Restore original stack top. */
2750 "pop %eax");
2751}
2752
6b9801d4 2753
df4a0200 2754static void
6b9801d4
SS
2755i386_emit_eq_goto (int *offset_p, int *size_p)
2756{
2757 EMIT_ASM32 (eq,
2758 /* Check low half first, more likely to be decider */
2759 "cmpl %eax,(%esp)\n\t"
2760 "jne .Leq_fallthru\n\t"
2761 "cmpl %ebx,4(%esp)\n\t"
2762 "jne .Leq_fallthru\n\t"
2763 "lea 0x8(%esp),%esp\n\t"
2764 "pop %eax\n\t"
2765 "pop %ebx\n\t"
2766 /* jmp, but don't trust the assembler to choose the right jump */
2767 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2768 ".Leq_fallthru:\n\t"
2769 "lea 0x8(%esp),%esp\n\t"
2770 "pop %eax\n\t"
2771 "pop %ebx");
2772
2773 if (offset_p)
2774 *offset_p = 18;
2775 if (size_p)
2776 *size_p = 4;
2777}
2778
df4a0200 2779static void
6b9801d4
SS
2780i386_emit_ne_goto (int *offset_p, int *size_p)
2781{
2782 EMIT_ASM32 (ne,
2783 /* Check low half first, more likely to be decider */
2784 "cmpl %eax,(%esp)\n\t"
2785 "jne .Lne_jump\n\t"
2786 "cmpl %ebx,4(%esp)\n\t"
2787 "je .Lne_fallthru\n\t"
2788 ".Lne_jump:\n\t"
2789 "lea 0x8(%esp),%esp\n\t"
2790 "pop %eax\n\t"
2791 "pop %ebx\n\t"
2792 /* jmp, but don't trust the assembler to choose the right jump */
2793 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2794 ".Lne_fallthru:\n\t"
2795 "lea 0x8(%esp),%esp\n\t"
2796 "pop %eax\n\t"
2797 "pop %ebx");
2798
2799 if (offset_p)
2800 *offset_p = 18;
2801 if (size_p)
2802 *size_p = 4;
2803}
2804
df4a0200 2805static void
6b9801d4
SS
2806i386_emit_lt_goto (int *offset_p, int *size_p)
2807{
2808 EMIT_ASM32 (lt,
2809 "cmpl %ebx,4(%esp)\n\t"
2810 "jl .Llt_jump\n\t"
2811 "jne .Llt_fallthru\n\t"
2812 "cmpl %eax,(%esp)\n\t"
2813 "jnl .Llt_fallthru\n\t"
2814 ".Llt_jump:\n\t"
2815 "lea 0x8(%esp),%esp\n\t"
2816 "pop %eax\n\t"
2817 "pop %ebx\n\t"
2818 /* jmp, but don't trust the assembler to choose the right jump */
2819 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2820 ".Llt_fallthru:\n\t"
2821 "lea 0x8(%esp),%esp\n\t"
2822 "pop %eax\n\t"
2823 "pop %ebx");
2824
2825 if (offset_p)
2826 *offset_p = 20;
2827 if (size_p)
2828 *size_p = 4;
2829}
2830
df4a0200 2831static void
6b9801d4
SS
2832i386_emit_le_goto (int *offset_p, int *size_p)
2833{
2834 EMIT_ASM32 (le,
2835 "cmpl %ebx,4(%esp)\n\t"
2836 "jle .Lle_jump\n\t"
2837 "jne .Lle_fallthru\n\t"
2838 "cmpl %eax,(%esp)\n\t"
2839 "jnle .Lle_fallthru\n\t"
2840 ".Lle_jump:\n\t"
2841 "lea 0x8(%esp),%esp\n\t"
2842 "pop %eax\n\t"
2843 "pop %ebx\n\t"
2844 /* jmp, but don't trust the assembler to choose the right jump */
2845 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2846 ".Lle_fallthru:\n\t"
2847 "lea 0x8(%esp),%esp\n\t"
2848 "pop %eax\n\t"
2849 "pop %ebx");
2850
2851 if (offset_p)
2852 *offset_p = 20;
2853 if (size_p)
2854 *size_p = 4;
2855}
2856
df4a0200 2857static void
6b9801d4
SS
2858i386_emit_gt_goto (int *offset_p, int *size_p)
2859{
2860 EMIT_ASM32 (gt,
2861 "cmpl %ebx,4(%esp)\n\t"
2862 "jg .Lgt_jump\n\t"
2863 "jne .Lgt_fallthru\n\t"
2864 "cmpl %eax,(%esp)\n\t"
2865 "jng .Lgt_fallthru\n\t"
2866 ".Lgt_jump:\n\t"
2867 "lea 0x8(%esp),%esp\n\t"
2868 "pop %eax\n\t"
2869 "pop %ebx\n\t"
2870 /* jmp, but don't trust the assembler to choose the right jump */
2871 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2872 ".Lgt_fallthru:\n\t"
2873 "lea 0x8(%esp),%esp\n\t"
2874 "pop %eax\n\t"
2875 "pop %ebx");
2876
2877 if (offset_p)
2878 *offset_p = 20;
2879 if (size_p)
2880 *size_p = 4;
2881}
2882
df4a0200 2883static void
6b9801d4
SS
2884i386_emit_ge_goto (int *offset_p, int *size_p)
2885{
2886 EMIT_ASM32 (ge,
2887 "cmpl %ebx,4(%esp)\n\t"
2888 "jge .Lge_jump\n\t"
2889 "jne .Lge_fallthru\n\t"
2890 "cmpl %eax,(%esp)\n\t"
2891 "jnge .Lge_fallthru\n\t"
2892 ".Lge_jump:\n\t"
2893 "lea 0x8(%esp),%esp\n\t"
2894 "pop %eax\n\t"
2895 "pop %ebx\n\t"
2896 /* jmp, but don't trust the assembler to choose the right jump */
2897 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2898 ".Lge_fallthru:\n\t"
2899 "lea 0x8(%esp),%esp\n\t"
2900 "pop %eax\n\t"
2901 "pop %ebx");
2902
2903 if (offset_p)
2904 *offset_p = 20;
2905 if (size_p)
2906 *size_p = 4;
2907}
2908
6a271cae
PA
2909struct emit_ops i386_emit_ops =
2910 {
2911 i386_emit_prologue,
2912 i386_emit_epilogue,
2913 i386_emit_add,
2914 i386_emit_sub,
2915 i386_emit_mul,
2916 i386_emit_lsh,
2917 i386_emit_rsh_signed,
2918 i386_emit_rsh_unsigned,
2919 i386_emit_ext,
2920 i386_emit_log_not,
2921 i386_emit_bit_and,
2922 i386_emit_bit_or,
2923 i386_emit_bit_xor,
2924 i386_emit_bit_not,
2925 i386_emit_equal,
2926 i386_emit_less_signed,
2927 i386_emit_less_unsigned,
2928 i386_emit_ref,
2929 i386_emit_if_goto,
2930 i386_emit_goto,
2931 i386_write_goto_address,
2932 i386_emit_const,
2933 i386_emit_call,
2934 i386_emit_reg,
2935 i386_emit_pop,
2936 i386_emit_stack_flush,
2937 i386_emit_zero_ext,
2938 i386_emit_swap,
2939 i386_emit_stack_adjust,
2940 i386_emit_int_call_1,
6b9801d4
SS
2941 i386_emit_void_call_2,
2942 i386_emit_eq_goto,
2943 i386_emit_ne_goto,
2944 i386_emit_lt_goto,
2945 i386_emit_le_goto,
2946 i386_emit_gt_goto,
2947 i386_emit_ge_goto
6a271cae
PA
2948 };
2949
2950
ab64c999
TBA
2951emit_ops *
2952x86_target::emit_ops ()
6a271cae
PA
2953{
2954#ifdef __x86_64__
3aee8918 2955 if (is_64bit_tdesc ())
6a271cae
PA
2956 return &amd64_emit_ops;
2957 else
2958#endif
2959 return &i386_emit_ops;
2960}
2961
3ca4edb6 2962/* Implementation of target ops method "sw_breakpoint_from_kind". */
dd373349 2963
3ca4edb6
TBA
2964const gdb_byte *
2965x86_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349
AT
2966{
2967 *size = x86_breakpoint_len;
2968 return x86_breakpoint;
2969}
2970
9cfd8715
TBA
2971bool
2972x86_target::low_supports_range_stepping ()
c2d6af84 2973{
9cfd8715 2974 return true;
c2d6af84
PA
2975}
2976
ae91f625
MK
2977static int
2978x86_get_ipa_tdesc_idx (void)
2979{
2980 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2981 const struct target_desc *tdesc = regcache->tdesc;
2982
2983#ifdef __x86_64__
b4570e4b 2984 return amd64_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2985#endif
2986
f49ff000 2987 if (tdesc == tdesc_i386_linux_no_xml)
ae91f625 2988 return X86_TDESC_SSE;
ae91f625 2989
f49ff000 2990 return i386_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2991}
2992
d0722149
DE
2993/* This is initialized assuming an amd64 target.
2994 x86_arch_setup will correct it for i386 or amd64 targets. */
2995
2996struct linux_target_ops the_low_target =
2997{
ae91f625 2998 x86_get_ipa_tdesc_idx,
d0722149 2999};
3aee8918 3000
ef0478f6
TBA
3001/* The linux target ops object. */
3002
3003linux_process_target *the_linux_target = &the_x86_target;
3004
3aee8918
PA
3005void
3006initialize_low_arch (void)
3007{
3008 /* Initialize the Linux target descriptions. */
3009#ifdef __x86_64__
cc397f3a 3010 tdesc_amd64_linux_no_xml = allocate_target_description ();
b4570e4b
YQ
3011 copy_target_description (tdesc_amd64_linux_no_xml,
3012 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
3013 false));
3aee8918
PA
3014 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3015#endif
f49ff000 3016
cc397f3a 3017 tdesc_i386_linux_no_xml = allocate_target_description ();
f49ff000
YQ
3018 copy_target_description (tdesc_i386_linux_no_xml,
3019 i386_linux_read_description (X86_XSTATE_SSE_MASK));
3aee8918
PA
3020 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3021
3022 initialize_regsets_info (&x86_regsets_info);
3023}
This page took 0.998635 seconds and 4 git commands to generate.