gdbserver/linux-low: turn 'emit_ops' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
b811d2c2 3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
d41f6d8e 20#include "server.h"
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "linux-low.h"
25#include "i387-fp.h"
df7e5265 26#include "x86-low.h"
268a13a5 27#include "gdbsupport/x86-xstate.h"
5826e159 28#include "nat/gdb_ptrace.h"
d0722149 29
93813b37
WT
30#ifdef __x86_64__
31#include "nat/amd64-linux-siginfo.h"
32#endif
33
d0722149 34#include "gdb_proc_service.h"
b5737fa9
PA
35/* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37#ifndef ELFMAG0
38#include "elf/common.h"
39#endif
40
268a13a5 41#include "gdbsupport/agent.h"
3aee8918 42#include "tdesc.h"
c144c7a0 43#include "tracepoint.h"
f699aaba 44#include "ax.h"
7b669087 45#include "nat/linux-nat.h"
4b134ca1 46#include "nat/x86-linux.h"
8e5d4070 47#include "nat/x86-linux-dregs.h"
ae91f625 48#include "linux-x86-tdesc.h"
a196ebeb 49
3aee8918
PA
50#ifdef __x86_64__
51static struct target_desc *tdesc_amd64_linux_no_xml;
52#endif
53static struct target_desc *tdesc_i386_linux_no_xml;
54
1570b33e 55
fa593d66 56static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 57static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 58
1570b33e
L
59/* Backward compatibility for gdb without XML support. */
60
61static const char *xmltarget_i386_linux_no_xml = "@<target>\
62<architecture>i386</architecture>\
63<osabi>GNU/Linux</osabi>\
64</target>";
f6d1620c
L
65
66#ifdef __x86_64__
1570b33e
L
67static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68<architecture>i386:x86-64</architecture>\
69<osabi>GNU/Linux</osabi>\
70</target>";
f6d1620c 71#endif
d0722149
DE
72
73#include <sys/reg.h>
74#include <sys/procfs.h>
1570b33e
L
75#include <sys/uio.h>
76
d0722149
DE
77#ifndef PTRACE_GET_THREAD_AREA
78#define PTRACE_GET_THREAD_AREA 25
79#endif
80
81/* This definition comes from prctl.h, but some kernels may not have it. */
82#ifndef PTRACE_ARCH_PRCTL
83#define PTRACE_ARCH_PRCTL 30
84#endif
85
86/* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88#ifndef ARCH_GET_FS
89#define ARCH_SET_GS 0x1001
90#define ARCH_SET_FS 0x1002
91#define ARCH_GET_FS 0x1003
92#define ARCH_GET_GS 0x1004
93#endif
94
ef0478f6
TBA
95/* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99class x86_target : public linux_process_target
100{
101public:
102
aa8d21c9
TBA
103 const regs_info *get_regs_info () override;
104
3ca4edb6
TBA
105 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
106
007c9b97
TBA
107 bool supports_z_point_type (char z_type) override;
108
a5b5da92
TBA
109 void process_qsupported (char **features, int count) override;
110
47f70aa7
TBA
111 bool supports_tracepoints () override;
112
809a0c35
TBA
113 bool supports_fast_tracepoints () override;
114
115 int install_fast_tracepoint_jump_pad
116 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
117 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
118 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
119 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
120 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
121 char *err) override;
122
123 int get_min_fast_tracepoint_insn_len () override;
124
ab64c999
TBA
125 struct emit_ops *emit_ops () override;
126
797bcff5
TBA
127protected:
128
129 void low_arch_setup () override;
daca57a7
TBA
130
131 bool low_cannot_fetch_register (int regno) override;
132
133 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
134
135 bool low_supports_breakpoints () override;
136
137 CORE_ADDR low_get_pc (regcache *regcache) override;
138
139 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
d4807ea2
TBA
140
141 int low_decr_pc_after_break () override;
d7146cda
TBA
142
143 bool low_breakpoint_at (CORE_ADDR pc) override;
9db9aa23
TBA
144
145 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
146 int size, raw_breakpoint *bp) override;
147
148 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
149 int size, raw_breakpoint *bp) override;
ac1bbaca
TBA
150
151 bool low_stopped_by_watchpoint () override;
152
153 CORE_ADDR low_stopped_data_address () override;
b35db733
TBA
154
155 /* collect_ptrace_register/supply_ptrace_register are not needed in the
156 native i386 case (no registers smaller than an xfer unit), and are not
157 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
cb63de7c
TBA
158
159 /* Need to fix up i386 siginfo if host is amd64. */
160 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
161 int direction) override;
fd000fb3
TBA
162
163 arch_process_info *low_new_process () override;
164
165 void low_delete_process (arch_process_info *info) override;
166
167 void low_new_thread (lwp_info *) override;
168
169 void low_delete_thread (arch_lwp_info *) override;
170
171 void low_new_fork (process_info *parent, process_info *child) override;
d7599cc0
TBA
172
173 void low_prepare_to_resume (lwp_info *lwp) override;
a5b5da92 174
13e567af
TBA
175 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
176
a5b5da92
TBA
177private:
178
179 /* Update all the target description of all processes; a new GDB
180 connected, and it may or not support xml target descriptions. */
181 void update_xmltarget ();
ef0478f6
TBA
182};
183
184/* The singleton target ops object. */
185
186static x86_target the_x86_target;
187
aa5ca48f
DE
188/* Per-process arch-specific data we want to keep. */
189
190struct arch_process_info
191{
df7e5265 192 struct x86_debug_reg_state debug_reg_state;
aa5ca48f
DE
193};
194
d0722149
DE
195#ifdef __x86_64__
196
197/* Mapping between the general-purpose registers in `struct user'
198 format and GDB's register array layout.
199 Note that the transfer layout uses 64-bit regs. */
200static /*const*/ int i386_regmap[] =
201{
202 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
203 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
204 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
205 DS * 8, ES * 8, FS * 8, GS * 8
206};
207
208#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
209
210/* So code below doesn't have to care, i386 or amd64. */
211#define ORIG_EAX ORIG_RAX
bc9540e8 212#define REGSIZE 8
d0722149
DE
213
214static const int x86_64_regmap[] =
215{
216 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
217 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
218 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
219 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
220 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
221 DS * 8, ES * 8, FS * 8, GS * 8,
222 -1, -1, -1, -1, -1, -1, -1, -1,
223 -1, -1, -1, -1, -1, -1, -1, -1,
224 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
225 -1,
226 -1, -1, -1, -1, -1, -1, -1, -1,
227 ORIG_RAX * 8,
2735833d
WT
228#ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
229 21 * 8, 22 * 8,
230#else
231 -1, -1,
232#endif
a196ebeb 233 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
01f9f808
MS
234 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
235 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
236 -1, -1, -1, -1, -1, -1, -1, -1,
237 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
238 -1, -1, -1, -1, -1, -1, -1, -1,
239 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
240 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
241 -1, -1, -1, -1, -1, -1, -1, -1,
242 -1, -1, -1, -1, -1, -1, -1, -1,
51547df6
MS
243 -1, -1, -1, -1, -1, -1, -1, -1,
244 -1 /* pkru */
d0722149
DE
245};
246
247#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
9e0aa64f 248#define X86_64_USER_REGS (GS + 1)
d0722149
DE
249
250#else /* ! __x86_64__ */
251
252/* Mapping between the general-purpose registers in `struct user'
253 format and GDB's register array layout. */
254static /*const*/ int i386_regmap[] =
255{
256 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
257 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
258 EIP * 4, EFL * 4, CS * 4, SS * 4,
259 DS * 4, ES * 4, FS * 4, GS * 4
260};
261
262#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
263
bc9540e8
PA
264#define REGSIZE 4
265
d0722149 266#endif
3aee8918
PA
267
268#ifdef __x86_64__
269
270/* Returns true if the current inferior belongs to a x86-64 process,
271 per the tdesc. */
272
273static int
274is_64bit_tdesc (void)
275{
0bfdf32f 276 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3aee8918
PA
277
278 return register_size (regcache->tdesc, 0) == 8;
279}
280
281#endif
282
d0722149
DE
283\f
284/* Called by libthread_db. */
285
286ps_err_e
754653a7 287ps_get_thread_area (struct ps_prochandle *ph,
d0722149
DE
288 lwpid_t lwpid, int idx, void **base)
289{
290#ifdef __x86_64__
3aee8918 291 int use_64bit = is_64bit_tdesc ();
d0722149
DE
292
293 if (use_64bit)
294 {
295 switch (idx)
296 {
297 case FS:
298 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
299 return PS_OK;
300 break;
301 case GS:
302 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
303 return PS_OK;
304 break;
305 default:
306 return PS_BADADDR;
307 }
308 return PS_ERR;
309 }
310#endif
311
312 {
313 unsigned int desc[4];
314
315 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
316 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
317 return PS_ERR;
318
d1ec4ce7
DE
319 /* Ensure we properly extend the value to 64-bits for x86_64. */
320 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
321 return PS_OK;
322 }
323}
fa593d66
PA
324
325/* Get the thread area address. This is used to recognize which
326 thread is which when tracing with the in-process agent library. We
327 don't read anything from the address, and treat it as opaque; it's
328 the address itself that we assume is unique per-thread. */
329
13e567af
TBA
330int
331x86_target::low_get_thread_area (int lwpid, CORE_ADDR *addr)
fa593d66
PA
332{
333#ifdef __x86_64__
3aee8918 334 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
335
336 if (use_64bit)
337 {
338 void *base;
339 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
340 {
341 *addr = (CORE_ADDR) (uintptr_t) base;
342 return 0;
343 }
344
345 return -1;
346 }
347#endif
348
349 {
f2907e49 350 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
d86d4aaf
DE
351 struct thread_info *thr = get_lwp_thread (lwp);
352 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
353 unsigned int desc[4];
354 ULONGEST gs = 0;
355 const int reg_thread_area = 3; /* bits to scale down register value. */
356 int idx;
357
358 collect_register_by_name (regcache, "gs", &gs);
359
360 idx = gs >> reg_thread_area;
361
362 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 363 lwpid_of (thr),
493e2a69 364 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
365 return -1;
366
367 *addr = desc[1];
368 return 0;
369 }
370}
371
372
d0722149 373\f
daca57a7
TBA
374bool
375x86_target::low_cannot_store_register (int regno)
d0722149 376{
3aee8918
PA
377#ifdef __x86_64__
378 if (is_64bit_tdesc ())
daca57a7 379 return false;
3aee8918
PA
380#endif
381
d0722149
DE
382 return regno >= I386_NUM_REGS;
383}
384
daca57a7
TBA
385bool
386x86_target::low_cannot_fetch_register (int regno)
d0722149 387{
3aee8918
PA
388#ifdef __x86_64__
389 if (is_64bit_tdesc ())
daca57a7 390 return false;
3aee8918
PA
391#endif
392
d0722149
DE
393 return regno >= I386_NUM_REGS;
394}
395
396static void
442ea881 397x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
398{
399 int i;
400
401#ifdef __x86_64__
3aee8918 402 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
403 {
404 for (i = 0; i < X86_64_NUM_REGS; i++)
405 if (x86_64_regmap[i] != -1)
442ea881 406 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
407
408#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
409 {
410 unsigned long base;
411 int lwpid = lwpid_of (current_thread);
412
413 collect_register_by_name (regcache, "fs_base", &base);
414 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
415
416 collect_register_by_name (regcache, "gs_base", &base);
417 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
418 }
419#endif
420
d0722149
DE
421 return;
422 }
9e0aa64f
JK
423
424 /* 32-bit inferior registers need to be zero-extended.
425 Callers would read uninitialized memory otherwise. */
426 memset (buf, 0x00, X86_64_USER_REGS * 8);
d0722149
DE
427#endif
428
429 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 430 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 431
442ea881 432 collect_register_by_name (regcache, "orig_eax",
bc9540e8 433 ((char *) buf) + ORIG_EAX * REGSIZE);
3f52fdbc 434
e90a813d 435#ifdef __x86_64__
3f52fdbc
KB
436 /* Sign extend EAX value to avoid potential syscall restart
437 problems.
438
439 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
440 for a detailed explanation. */
441 if (register_size (regcache->tdesc, 0) == 4)
442 {
443 void *ptr = ((gdb_byte *) buf
444 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
445
446 *(int64_t *) ptr = *(int32_t *) ptr;
447 }
e90a813d 448#endif
d0722149
DE
449}
450
451static void
442ea881 452x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
453{
454 int i;
455
456#ifdef __x86_64__
3aee8918 457 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
458 {
459 for (i = 0; i < X86_64_NUM_REGS; i++)
460 if (x86_64_regmap[i] != -1)
442ea881 461 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
462
463#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
464 {
465 unsigned long base;
466 int lwpid = lwpid_of (current_thread);
467
468 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
469 supply_register_by_name (regcache, "fs_base", &base);
470
471 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
472 supply_register_by_name (regcache, "gs_base", &base);
473 }
474#endif
d0722149
DE
475 return;
476 }
477#endif
478
479 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 480 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 481
442ea881 482 supply_register_by_name (regcache, "orig_eax",
bc9540e8 483 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
484}
485
486static void
442ea881 487x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
488{
489#ifdef __x86_64__
442ea881 490 i387_cache_to_fxsave (regcache, buf);
d0722149 491#else
442ea881 492 i387_cache_to_fsave (regcache, buf);
d0722149
DE
493#endif
494}
495
496static void
442ea881 497x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
498{
499#ifdef __x86_64__
442ea881 500 i387_fxsave_to_cache (regcache, buf);
d0722149 501#else
442ea881 502 i387_fsave_to_cache (regcache, buf);
d0722149
DE
503#endif
504}
505
506#ifndef __x86_64__
507
508static void
442ea881 509x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 510{
442ea881 511 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
512}
513
514static void
442ea881 515x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 516{
442ea881 517 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
518}
519
520#endif
521
1570b33e
L
522static void
523x86_fill_xstateregset (struct regcache *regcache, void *buf)
524{
525 i387_cache_to_xsave (regcache, buf);
526}
527
528static void
529x86_store_xstateregset (struct regcache *regcache, const void *buf)
530{
531 i387_xsave_to_cache (regcache, buf);
532}
533
d0722149
DE
534/* ??? The non-biarch i386 case stores all the i387 regs twice.
535 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
536 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
537 doesn't work. IWBN to avoid the duplication in the case where it
538 does work. Maybe the arch_setup routine could check whether it works
3aee8918 539 and update the supported regsets accordingly. */
d0722149 540
3aee8918 541static struct regset_info x86_regsets[] =
d0722149
DE
542{
543#ifdef HAVE_PTRACE_GETREGS
1570b33e 544 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
545 GENERAL_REGS,
546 x86_fill_gregset, x86_store_gregset },
1570b33e
L
547 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
548 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
549# ifndef __x86_64__
550# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 551 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
552 EXTENDED_REGS,
553 x86_fill_fpxregset, x86_store_fpxregset },
554# endif
555# endif
1570b33e 556 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
557 FP_REGS,
558 x86_fill_fpregset, x86_store_fpregset },
559#endif /* HAVE_PTRACE_GETREGS */
50bc912a 560 NULL_REGSET
d0722149
DE
561};
562
bf9ae9d8
TBA
563bool
564x86_target::low_supports_breakpoints ()
565{
566 return true;
567}
568
569CORE_ADDR
570x86_target::low_get_pc (regcache *regcache)
d0722149 571{
3aee8918 572 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
573
574 if (use_64bit)
575 {
6598661d
PA
576 uint64_t pc;
577
442ea881 578 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
579 return (CORE_ADDR) pc;
580 }
581 else
582 {
6598661d
PA
583 uint32_t pc;
584
442ea881 585 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
586 return (CORE_ADDR) pc;
587 }
588}
589
bf9ae9d8
TBA
590void
591x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
d0722149 592{
3aee8918 593 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
594
595 if (use_64bit)
596 {
6598661d
PA
597 uint64_t newpc = pc;
598
442ea881 599 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
600 }
601 else
602 {
6598661d
PA
603 uint32_t newpc = pc;
604
442ea881 605 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
606 }
607}
d4807ea2
TBA
608
609int
610x86_target::low_decr_pc_after_break ()
611{
612 return 1;
613}
614
d0722149 615\f
dd373349 616static const gdb_byte x86_breakpoint[] = { 0xCC };
d0722149
DE
617#define x86_breakpoint_len 1
618
d7146cda
TBA
619bool
620x86_target::low_breakpoint_at (CORE_ADDR pc)
d0722149
DE
621{
622 unsigned char c;
623
d7146cda 624 read_memory (pc, &c, 1);
d0722149 625 if (c == 0xCC)
d7146cda 626 return true;
d0722149 627
d7146cda 628 return false;
d0722149
DE
629}
630\f
42995dbd 631/* Low-level function vector. */
df7e5265 632struct x86_dr_low_type x86_dr_low =
42995dbd 633 {
d33472ad
GB
634 x86_linux_dr_set_control,
635 x86_linux_dr_set_addr,
636 x86_linux_dr_get_addr,
637 x86_linux_dr_get_status,
638 x86_linux_dr_get_control,
42995dbd
GB
639 sizeof (void *),
640 };
aa5ca48f 641\f
90d74c30 642/* Breakpoint/Watchpoint support. */
aa5ca48f 643
007c9b97
TBA
644bool
645x86_target::supports_z_point_type (char z_type)
802e8e6d
PA
646{
647 switch (z_type)
648 {
649 case Z_PACKET_SW_BP:
650 case Z_PACKET_HW_BP:
651 case Z_PACKET_WRITE_WP:
652 case Z_PACKET_ACCESS_WP:
007c9b97 653 return true;
802e8e6d 654 default:
007c9b97 655 return false;
802e8e6d
PA
656 }
657}
658
9db9aa23
TBA
659int
660x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
661 int size, raw_breakpoint *bp)
aa5ca48f
DE
662{
663 struct process_info *proc = current_process ();
802e8e6d 664
aa5ca48f
DE
665 switch (type)
666 {
802e8e6d
PA
667 case raw_bkpt_type_hw:
668 case raw_bkpt_type_write_wp:
669 case raw_bkpt_type_access_wp:
a4165e94 670 {
802e8e6d
PA
671 enum target_hw_bp_type hw_type
672 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 673 struct x86_debug_reg_state *state
fe978cb0 674 = &proc->priv->arch_private->debug_reg_state;
a4165e94 675
df7e5265 676 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
a4165e94 677 }
961bd387 678
aa5ca48f
DE
679 default:
680 /* Unsupported. */
681 return 1;
682 }
683}
684
9db9aa23
TBA
685int
686x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
687 int size, raw_breakpoint *bp)
aa5ca48f
DE
688{
689 struct process_info *proc = current_process ();
802e8e6d 690
aa5ca48f
DE
691 switch (type)
692 {
802e8e6d
PA
693 case raw_bkpt_type_hw:
694 case raw_bkpt_type_write_wp:
695 case raw_bkpt_type_access_wp:
a4165e94 696 {
802e8e6d
PA
697 enum target_hw_bp_type hw_type
698 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 699 struct x86_debug_reg_state *state
fe978cb0 700 = &proc->priv->arch_private->debug_reg_state;
a4165e94 701
df7e5265 702 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
a4165e94 703 }
aa5ca48f
DE
704 default:
705 /* Unsupported. */
706 return 1;
707 }
708}
709
ac1bbaca
TBA
710bool
711x86_target::low_stopped_by_watchpoint ()
aa5ca48f
DE
712{
713 struct process_info *proc = current_process ();
fe978cb0 714 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
aa5ca48f
DE
715}
716
ac1bbaca
TBA
717CORE_ADDR
718x86_target::low_stopped_data_address ()
aa5ca48f
DE
719{
720 struct process_info *proc = current_process ();
721 CORE_ADDR addr;
fe978cb0 722 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
df7e5265 723 &addr))
aa5ca48f
DE
724 return addr;
725 return 0;
726}
727\f
728/* Called when a new process is created. */
729
fd000fb3
TBA
730arch_process_info *
731x86_target::low_new_process ()
aa5ca48f 732{
ed859da7 733 struct arch_process_info *info = XCNEW (struct arch_process_info);
aa5ca48f 734
df7e5265 735 x86_low_init_dregs (&info->debug_reg_state);
aa5ca48f
DE
736
737 return info;
738}
739
04ec7890
SM
740/* Called when a process is being deleted. */
741
fd000fb3
TBA
742void
743x86_target::low_delete_process (arch_process_info *info)
04ec7890
SM
744{
745 xfree (info);
746}
747
fd000fb3
TBA
748void
749x86_target::low_new_thread (lwp_info *lwp)
750{
751 /* This comes from nat/. */
752 x86_linux_new_thread (lwp);
753}
3a8a0396 754
fd000fb3
TBA
755void
756x86_target::low_delete_thread (arch_lwp_info *alwp)
757{
758 /* This comes from nat/. */
759 x86_linux_delete_thread (alwp);
760}
761
762/* Target routine for new_fork. */
763
764void
765x86_target::low_new_fork (process_info *parent, process_info *child)
3a8a0396
DB
766{
767 /* These are allocated by linux_add_process. */
768 gdb_assert (parent->priv != NULL
769 && parent->priv->arch_private != NULL);
770 gdb_assert (child->priv != NULL
771 && child->priv->arch_private != NULL);
772
773 /* Linux kernel before 2.6.33 commit
774 72f674d203cd230426437cdcf7dd6f681dad8b0d
775 will inherit hardware debug registers from parent
776 on fork/vfork/clone. Newer Linux kernels create such tasks with
777 zeroed debug registers.
778
779 GDB core assumes the child inherits the watchpoints/hw
780 breakpoints of the parent, and will remove them all from the
781 forked off process. Copy the debug registers mirrors into the
782 new process so that all breakpoints and watchpoints can be
783 removed together. The debug registers mirror will become zeroed
784 in the end before detaching the forked off process, thus making
785 this compatible with older Linux kernels too. */
786
787 *child->priv->arch_private = *parent->priv->arch_private;
788}
789
d7599cc0
TBA
790void
791x86_target::low_prepare_to_resume (lwp_info *lwp)
792{
793 /* This comes from nat/. */
794 x86_linux_prepare_to_resume (lwp);
795}
796
70a0bb6b
GB
797/* See nat/x86-dregs.h. */
798
799struct x86_debug_reg_state *
800x86_debug_reg_state (pid_t pid)
801{
802 struct process_info *proc = find_process_pid (pid);
803
804 return &proc->priv->arch_private->debug_reg_state;
805}
aa5ca48f 806\f
d0722149
DE
807/* When GDBSERVER is built as a 64-bit application on linux, the
808 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
809 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
810 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
811 conversion in-place ourselves. */
812
9cf12d57 813/* Convert a ptrace/host siginfo object, into/from the siginfo in the
d0722149
DE
814 layout of the inferiors' architecture. Returns true if any
815 conversion was done; false otherwise. If DIRECTION is 1, then copy
9cf12d57 816 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
d0722149
DE
817 INF. */
818
cb63de7c
TBA
819bool
820x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
d0722149
DE
821{
822#ifdef __x86_64__
760256f9 823 unsigned int machine;
0bfdf32f 824 int tid = lwpid_of (current_thread);
760256f9
PA
825 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
826
d0722149 827 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 828 if (!is_64bit_tdesc ())
9cf12d57 829 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 830 FIXUP_32);
c92b5177 831 /* No fixup for native x32 GDB. */
760256f9 832 else if (!is_elf64 && sizeof (void *) == 8)
9cf12d57 833 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 834 FIXUP_X32);
d0722149
DE
835#endif
836
cb63de7c 837 return false;
d0722149
DE
838}
839\f
1570b33e
L
840static int use_xml;
841
3aee8918
PA
842/* Format of XSAVE extended state is:
843 struct
844 {
845 fxsave_bytes[0..463]
846 sw_usable_bytes[464..511]
847 xstate_hdr_bytes[512..575]
848 avx_bytes[576..831]
849 future_state etc
850 };
851
852 Same memory layout will be used for the coredump NT_X86_XSTATE
853 representing the XSAVE extended state registers.
854
855 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
856 extended state mask, which is the same as the extended control register
857 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
858 together with the mask saved in the xstate_hdr_bytes to determine what
859 states the processor/OS supports and what state, used or initialized,
860 the process/thread is in. */
861#define I386_LINUX_XSAVE_XCR0_OFFSET 464
862
863/* Does the current host support the GETFPXREGS request? The header
864 file may or may not define it, and even if it is defined, the
865 kernel will return EIO if it's running on a pre-SSE processor. */
866int have_ptrace_getfpxregs =
867#ifdef HAVE_PTRACE_GETFPXREGS
868 -1
869#else
870 0
871#endif
872;
1570b33e 873
3aee8918
PA
874/* Get Linux/x86 target description from running target. */
875
876static const struct target_desc *
877x86_linux_read_description (void)
1570b33e 878{
3aee8918
PA
879 unsigned int machine;
880 int is_elf64;
a196ebeb 881 int xcr0_features;
3aee8918
PA
882 int tid;
883 static uint64_t xcr0;
3a13a53b 884 struct regset_info *regset;
1570b33e 885
0bfdf32f 886 tid = lwpid_of (current_thread);
1570b33e 887
3aee8918 888 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 889
3aee8918 890 if (sizeof (void *) == 4)
3a13a53b 891 {
3aee8918
PA
892 if (is_elf64 > 0)
893 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
894#ifndef __x86_64__
895 else if (machine == EM_X86_64)
896 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
897#endif
898 }
3a13a53b 899
3aee8918
PA
900#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
901 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
902 {
903 elf_fpxregset_t fpxregs;
3a13a53b 904
3aee8918 905 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 906 {
3aee8918
PA
907 have_ptrace_getfpxregs = 0;
908 have_ptrace_getregset = 0;
f49ff000 909 return i386_linux_read_description (X86_XSTATE_X87);
3a13a53b 910 }
3aee8918
PA
911 else
912 have_ptrace_getfpxregs = 1;
3a13a53b 913 }
1570b33e
L
914#endif
915
916 if (!use_xml)
917 {
df7e5265 918 x86_xcr0 = X86_XSTATE_SSE_MASK;
3aee8918 919
1570b33e
L
920 /* Don't use XML. */
921#ifdef __x86_64__
3aee8918
PA
922 if (machine == EM_X86_64)
923 return tdesc_amd64_linux_no_xml;
1570b33e 924 else
1570b33e 925#endif
3aee8918 926 return tdesc_i386_linux_no_xml;
1570b33e
L
927 }
928
1570b33e
L
929 if (have_ptrace_getregset == -1)
930 {
df7e5265 931 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 932 struct iovec iov;
1570b33e
L
933
934 iov.iov_base = xstateregs;
935 iov.iov_len = sizeof (xstateregs);
936
937 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
938 if (ptrace (PTRACE_GETREGSET, tid,
939 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
940 have_ptrace_getregset = 0;
941 else
1570b33e 942 {
3aee8918
PA
943 have_ptrace_getregset = 1;
944
945 /* Get XCR0 from XSAVE extended state. */
946 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
947 / sizeof (uint64_t))];
948
949 /* Use PTRACE_GETREGSET if it is available. */
950 for (regset = x86_regsets;
951 regset->fill_function != NULL; regset++)
952 if (regset->get_request == PTRACE_GETREGSET)
df7e5265 953 regset->size = X86_XSTATE_SIZE (xcr0);
3aee8918
PA
954 else if (regset->type != GENERAL_REGS)
955 regset->size = 0;
1570b33e 956 }
1570b33e
L
957 }
958
3aee8918 959 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb 960 xcr0_features = (have_ptrace_getregset
2e1e43e1 961 && (xcr0 & X86_XSTATE_ALL_MASK));
3aee8918 962
a196ebeb 963 if (xcr0_features)
3aee8918 964 x86_xcr0 = xcr0;
1570b33e 965
3aee8918
PA
966 if (machine == EM_X86_64)
967 {
1570b33e 968#ifdef __x86_64__
b4570e4b 969 const target_desc *tdesc = NULL;
a196ebeb 970
b4570e4b 971 if (xcr0_features)
3aee8918 972 {
b4570e4b
YQ
973 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
974 !is_elf64);
1570b33e 975 }
b4570e4b
YQ
976
977 if (tdesc == NULL)
978 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
979 return tdesc;
3aee8918 980#endif
1570b33e 981 }
3aee8918
PA
982 else
983 {
f49ff000 984 const target_desc *tdesc = NULL;
a1fa17ee 985
f49ff000
YQ
986 if (xcr0_features)
987 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
2b863f51 988
f49ff000
YQ
989 if (tdesc == NULL)
990 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
a196ebeb 991
f49ff000 992 return tdesc;
3aee8918
PA
993 }
994
995 gdb_assert_not_reached ("failed to return tdesc");
996}
997
3aee8918
PA
998/* Update all the target description of all processes; a new GDB
999 connected, and it may or not support xml target descriptions. */
1000
797bcff5
TBA
1001void
1002x86_target::update_xmltarget ()
3aee8918 1003{
0bfdf32f 1004 struct thread_info *saved_thread = current_thread;
3aee8918
PA
1005
1006 /* Before changing the register cache's internal layout, flush the
1007 contents of the current valid caches back to the threads, and
1008 release the current regcache objects. */
1009 regcache_release ();
1010
797bcff5 1011 for_each_process ([this] (process_info *proc) {
9179355e
SM
1012 int pid = proc->pid;
1013
1014 /* Look up any thread of this process. */
1015 current_thread = find_any_thread_of_pid (pid);
1016
797bcff5 1017 low_arch_setup ();
9179355e 1018 });
3aee8918 1019
0bfdf32f 1020 current_thread = saved_thread;
1570b33e
L
1021}
1022
1023/* Process qSupported query, "xmlRegisters=". Update the buffer size for
1024 PTRACE_GETREGSET. */
1025
a5b5da92
TBA
1026void
1027x86_target::process_qsupported (char **features, int count)
1570b33e 1028{
06e03fff
PA
1029 int i;
1030
1570b33e
L
1031 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1032 with "i386" in qSupported query, it supports x86 XML target
1033 descriptions. */
1034 use_xml = 0;
06e03fff 1035 for (i = 0; i < count; i++)
1570b33e 1036 {
06e03fff 1037 const char *feature = features[i];
1570b33e 1038
06e03fff 1039 if (startswith (feature, "xmlRegisters="))
1570b33e 1040 {
06e03fff 1041 char *copy = xstrdup (feature + 13);
06e03fff 1042
ca3a04f6
CB
1043 char *saveptr;
1044 for (char *p = strtok_r (copy, ",", &saveptr);
1045 p != NULL;
1046 p = strtok_r (NULL, ",", &saveptr))
1570b33e 1047 {
06e03fff
PA
1048 if (strcmp (p, "i386") == 0)
1049 {
1050 use_xml = 1;
1051 break;
1052 }
1570b33e 1053 }
1570b33e 1054
06e03fff
PA
1055 free (copy);
1056 }
1570b33e 1057 }
a5b5da92 1058 update_xmltarget ();
1570b33e
L
1059}
1060
3aee8918 1061/* Common for x86/x86-64. */
d0722149 1062
3aee8918
PA
1063static struct regsets_info x86_regsets_info =
1064 {
1065 x86_regsets, /* regsets */
1066 0, /* num_regsets */
1067 NULL, /* disabled_regsets */
1068 };
214d508e
L
1069
1070#ifdef __x86_64__
3aee8918
PA
1071static struct regs_info amd64_linux_regs_info =
1072 {
1073 NULL, /* regset_bitmap */
1074 NULL, /* usrregs_info */
1075 &x86_regsets_info
1076 };
d0722149 1077#endif
3aee8918
PA
1078static struct usrregs_info i386_linux_usrregs_info =
1079 {
1080 I386_NUM_REGS,
1081 i386_regmap,
1082 };
d0722149 1083
3aee8918
PA
1084static struct regs_info i386_linux_regs_info =
1085 {
1086 NULL, /* regset_bitmap */
1087 &i386_linux_usrregs_info,
1088 &x86_regsets_info
1089 };
d0722149 1090
aa8d21c9
TBA
1091const regs_info *
1092x86_target::get_regs_info ()
3aee8918
PA
1093{
1094#ifdef __x86_64__
1095 if (is_64bit_tdesc ())
1096 return &amd64_linux_regs_info;
1097 else
1098#endif
1099 return &i386_linux_regs_info;
1100}
d0722149 1101
3aee8918
PA
1102/* Initialize the target description for the architecture of the
1103 inferior. */
1570b33e 1104
797bcff5
TBA
1105void
1106x86_target::low_arch_setup ()
3aee8918
PA
1107{
1108 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1109}
1110
82075af2
JS
1111/* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1112 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1113
1114static void
4cc32bec 1115x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
82075af2
JS
1116{
1117 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1118
1119 if (use_64bit)
1120 {
1121 long l_sysno;
82075af2
JS
1122
1123 collect_register_by_name (regcache, "orig_rax", &l_sysno);
82075af2 1124 *sysno = (int) l_sysno;
82075af2
JS
1125 }
1126 else
4cc32bec 1127 collect_register_by_name (regcache, "orig_eax", sysno);
82075af2
JS
1128}
1129
47f70aa7
TBA
1130bool
1131x86_target::supports_tracepoints ()
219f2f23 1132{
47f70aa7 1133 return true;
219f2f23
PA
1134}
1135
fa593d66
PA
1136static void
1137append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1138{
4196ab2a 1139 target_write_memory (*to, buf, len);
fa593d66
PA
1140 *to += len;
1141}
1142
1143static int
a121b7c1 1144push_opcode (unsigned char *buf, const char *op)
fa593d66
PA
1145{
1146 unsigned char *buf_org = buf;
1147
1148 while (1)
1149 {
1150 char *endptr;
1151 unsigned long ul = strtoul (op, &endptr, 16);
1152
1153 if (endptr == op)
1154 break;
1155
1156 *buf++ = ul;
1157 op = endptr;
1158 }
1159
1160 return buf - buf_org;
1161}
1162
1163#ifdef __x86_64__
1164
1165/* Build a jump pad that saves registers and calls a collection
1166 function. Writes a jump instruction to the jump pad to
1167 JJUMPAD_INSN. The caller is responsible to write it in at the
1168 tracepoint address. */
1169
1170static int
1171amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1172 CORE_ADDR collector,
1173 CORE_ADDR lockaddr,
1174 ULONGEST orig_size,
1175 CORE_ADDR *jump_entry,
405f8e94
SS
1176 CORE_ADDR *trampoline,
1177 ULONGEST *trampoline_size,
fa593d66
PA
1178 unsigned char *jjump_pad_insn,
1179 ULONGEST *jjump_pad_insn_size,
1180 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1181 CORE_ADDR *adjusted_insn_addr_end,
1182 char *err)
fa593d66
PA
1183{
1184 unsigned char buf[40];
1185 int i, offset;
f4647387
YQ
1186 int64_t loffset;
1187
fa593d66
PA
1188 CORE_ADDR buildaddr = *jump_entry;
1189
1190 /* Build the jump pad. */
1191
1192 /* First, do tracepoint data collection. Save registers. */
1193 i = 0;
1194 /* Need to ensure stack pointer saved first. */
1195 buf[i++] = 0x54; /* push %rsp */
1196 buf[i++] = 0x55; /* push %rbp */
1197 buf[i++] = 0x57; /* push %rdi */
1198 buf[i++] = 0x56; /* push %rsi */
1199 buf[i++] = 0x52; /* push %rdx */
1200 buf[i++] = 0x51; /* push %rcx */
1201 buf[i++] = 0x53; /* push %rbx */
1202 buf[i++] = 0x50; /* push %rax */
1203 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1204 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1205 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1206 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1207 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1208 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1209 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1210 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1211 buf[i++] = 0x9c; /* pushfq */
c8ef42ee 1212 buf[i++] = 0x48; /* movabs <addr>,%rdi */
fa593d66 1213 buf[i++] = 0xbf;
c8ef42ee
PA
1214 memcpy (buf + i, &tpaddr, 8);
1215 i += 8;
fa593d66
PA
1216 buf[i++] = 0x57; /* push %rdi */
1217 append_insns (&buildaddr, i, buf);
1218
1219 /* Stack space for the collecting_t object. */
1220 i = 0;
1221 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1222 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1223 memcpy (buf + i, &tpoint, 8);
1224 i += 8;
1225 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1226 i += push_opcode (&buf[i],
1227 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1228 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1229 append_insns (&buildaddr, i, buf);
1230
1231 /* spin-lock. */
1232 i = 0;
1233 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1234 memcpy (&buf[i], (void *) &lockaddr, 8);
1235 i += 8;
1236 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1237 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1238 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1239 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1240 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1241 append_insns (&buildaddr, i, buf);
1242
1243 /* Set up the gdb_collect call. */
1244 /* At this point, (stack pointer + 0x18) is the base of our saved
1245 register block. */
1246
1247 i = 0;
1248 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1249 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1250
1251 /* tpoint address may be 64-bit wide. */
1252 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1253 memcpy (buf + i, &tpoint, 8);
1254 i += 8;
1255 append_insns (&buildaddr, i, buf);
1256
1257 /* The collector function being in the shared library, may be
1258 >31-bits away off the jump pad. */
1259 i = 0;
1260 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1261 memcpy (buf + i, &collector, 8);
1262 i += 8;
1263 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1264 append_insns (&buildaddr, i, buf);
1265
1266 /* Clear the spin-lock. */
1267 i = 0;
1268 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1269 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1270 memcpy (buf + i, &lockaddr, 8);
1271 i += 8;
1272 append_insns (&buildaddr, i, buf);
1273
1274 /* Remove stack that had been used for the collect_t object. */
1275 i = 0;
1276 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1277 append_insns (&buildaddr, i, buf);
1278
1279 /* Restore register state. */
1280 i = 0;
1281 buf[i++] = 0x48; /* add $0x8,%rsp */
1282 buf[i++] = 0x83;
1283 buf[i++] = 0xc4;
1284 buf[i++] = 0x08;
1285 buf[i++] = 0x9d; /* popfq */
1286 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1287 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1288 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1289 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1290 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1291 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1292 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1293 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1294 buf[i++] = 0x58; /* pop %rax */
1295 buf[i++] = 0x5b; /* pop %rbx */
1296 buf[i++] = 0x59; /* pop %rcx */
1297 buf[i++] = 0x5a; /* pop %rdx */
1298 buf[i++] = 0x5e; /* pop %rsi */
1299 buf[i++] = 0x5f; /* pop %rdi */
1300 buf[i++] = 0x5d; /* pop %rbp */
1301 buf[i++] = 0x5c; /* pop %rsp */
1302 append_insns (&buildaddr, i, buf);
1303
1304 /* Now, adjust the original instruction to execute in the jump
1305 pad. */
1306 *adjusted_insn_addr = buildaddr;
1307 relocate_instruction (&buildaddr, tpaddr);
1308 *adjusted_insn_addr_end = buildaddr;
1309
1310 /* Finally, write a jump back to the program. */
f4647387
YQ
1311
1312 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1313 if (loffset > INT_MAX || loffset < INT_MIN)
1314 {
1315 sprintf (err,
1316 "E.Jump back from jump pad too far from tracepoint "
1317 "(offset 0x%" PRIx64 " > int32).", loffset);
1318 return 1;
1319 }
1320
1321 offset = (int) loffset;
fa593d66
PA
1322 memcpy (buf, jump_insn, sizeof (jump_insn));
1323 memcpy (buf + 1, &offset, 4);
1324 append_insns (&buildaddr, sizeof (jump_insn), buf);
1325
1326 /* The jump pad is now built. Wire in a jump to our jump pad. This
1327 is always done last (by our caller actually), so that we can
1328 install fast tracepoints with threads running. This relies on
1329 the agent's atomic write support. */
f4647387
YQ
1330 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1331 if (loffset > INT_MAX || loffset < INT_MIN)
1332 {
1333 sprintf (err,
1334 "E.Jump pad too far from tracepoint "
1335 "(offset 0x%" PRIx64 " > int32).", loffset);
1336 return 1;
1337 }
1338
1339 offset = (int) loffset;
1340
fa593d66
PA
1341 memcpy (buf, jump_insn, sizeof (jump_insn));
1342 memcpy (buf + 1, &offset, 4);
1343 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1344 *jjump_pad_insn_size = sizeof (jump_insn);
1345
1346 /* Return the end address of our pad. */
1347 *jump_entry = buildaddr;
1348
1349 return 0;
1350}
1351
1352#endif /* __x86_64__ */
1353
1354/* Build a jump pad that saves registers and calls a collection
1355 function. Writes a jump instruction to the jump pad to
1356 JJUMPAD_INSN. The caller is responsible to write it in at the
1357 tracepoint address. */
1358
1359static int
1360i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1361 CORE_ADDR collector,
1362 CORE_ADDR lockaddr,
1363 ULONGEST orig_size,
1364 CORE_ADDR *jump_entry,
405f8e94
SS
1365 CORE_ADDR *trampoline,
1366 ULONGEST *trampoline_size,
fa593d66
PA
1367 unsigned char *jjump_pad_insn,
1368 ULONGEST *jjump_pad_insn_size,
1369 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1370 CORE_ADDR *adjusted_insn_addr_end,
1371 char *err)
fa593d66
PA
1372{
1373 unsigned char buf[0x100];
1374 int i, offset;
1375 CORE_ADDR buildaddr = *jump_entry;
1376
1377 /* Build the jump pad. */
1378
1379 /* First, do tracepoint data collection. Save registers. */
1380 i = 0;
1381 buf[i++] = 0x60; /* pushad */
1382 buf[i++] = 0x68; /* push tpaddr aka $pc */
1383 *((int *)(buf + i)) = (int) tpaddr;
1384 i += 4;
1385 buf[i++] = 0x9c; /* pushf */
1386 buf[i++] = 0x1e; /* push %ds */
1387 buf[i++] = 0x06; /* push %es */
1388 buf[i++] = 0x0f; /* push %fs */
1389 buf[i++] = 0xa0;
1390 buf[i++] = 0x0f; /* push %gs */
1391 buf[i++] = 0xa8;
1392 buf[i++] = 0x16; /* push %ss */
1393 buf[i++] = 0x0e; /* push %cs */
1394 append_insns (&buildaddr, i, buf);
1395
1396 /* Stack space for the collecting_t object. */
1397 i = 0;
1398 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1399
1400 /* Build the object. */
1401 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1402 memcpy (buf + i, &tpoint, 4);
1403 i += 4;
1404 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1405
1406 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1407 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1408 append_insns (&buildaddr, i, buf);
1409
1410 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1411 If we cared for it, this could be using xchg alternatively. */
1412
1413 i = 0;
1414 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1415 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1416 %esp,<lockaddr> */
1417 memcpy (&buf[i], (void *) &lockaddr, 4);
1418 i += 4;
1419 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1420 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1421 append_insns (&buildaddr, i, buf);
1422
1423
1424 /* Set up arguments to the gdb_collect call. */
1425 i = 0;
1426 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1427 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1428 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1429 append_insns (&buildaddr, i, buf);
1430
1431 i = 0;
1432 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1433 append_insns (&buildaddr, i, buf);
1434
1435 i = 0;
1436 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1437 memcpy (&buf[i], (void *) &tpoint, 4);
1438 i += 4;
1439 append_insns (&buildaddr, i, buf);
1440
1441 buf[0] = 0xe8; /* call <reladdr> */
1442 offset = collector - (buildaddr + sizeof (jump_insn));
1443 memcpy (buf + 1, &offset, 4);
1444 append_insns (&buildaddr, 5, buf);
1445 /* Clean up after the call. */
1446 buf[0] = 0x83; /* add $0x8,%esp */
1447 buf[1] = 0xc4;
1448 buf[2] = 0x08;
1449 append_insns (&buildaddr, 3, buf);
1450
1451
1452 /* Clear the spin-lock. This would need the LOCK prefix on older
1453 broken archs. */
1454 i = 0;
1455 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1456 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1457 memcpy (buf + i, &lockaddr, 4);
1458 i += 4;
1459 append_insns (&buildaddr, i, buf);
1460
1461
1462 /* Remove stack that had been used for the collect_t object. */
1463 i = 0;
1464 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1465 append_insns (&buildaddr, i, buf);
1466
1467 i = 0;
1468 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1469 buf[i++] = 0xc4;
1470 buf[i++] = 0x04;
1471 buf[i++] = 0x17; /* pop %ss */
1472 buf[i++] = 0x0f; /* pop %gs */
1473 buf[i++] = 0xa9;
1474 buf[i++] = 0x0f; /* pop %fs */
1475 buf[i++] = 0xa1;
1476 buf[i++] = 0x07; /* pop %es */
405f8e94 1477 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1478 buf[i++] = 0x9d; /* popf */
1479 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1480 buf[i++] = 0xc4;
1481 buf[i++] = 0x04;
1482 buf[i++] = 0x61; /* popad */
1483 append_insns (&buildaddr, i, buf);
1484
1485 /* Now, adjust the original instruction to execute in the jump
1486 pad. */
1487 *adjusted_insn_addr = buildaddr;
1488 relocate_instruction (&buildaddr, tpaddr);
1489 *adjusted_insn_addr_end = buildaddr;
1490
1491 /* Write the jump back to the program. */
1492 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1493 memcpy (buf, jump_insn, sizeof (jump_insn));
1494 memcpy (buf + 1, &offset, 4);
1495 append_insns (&buildaddr, sizeof (jump_insn), buf);
1496
1497 /* The jump pad is now built. Wire in a jump to our jump pad. This
1498 is always done last (by our caller actually), so that we can
1499 install fast tracepoints with threads running. This relies on
1500 the agent's atomic write support. */
405f8e94
SS
1501 if (orig_size == 4)
1502 {
1503 /* Create a trampoline. */
1504 *trampoline_size = sizeof (jump_insn);
1505 if (!claim_trampoline_space (*trampoline_size, trampoline))
1506 {
1507 /* No trampoline space available. */
1508 strcpy (err,
1509 "E.Cannot allocate trampoline space needed for fast "
1510 "tracepoints on 4-byte instructions.");
1511 return 1;
1512 }
1513
1514 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1515 memcpy (buf, jump_insn, sizeof (jump_insn));
1516 memcpy (buf + 1, &offset, 4);
4196ab2a 1517 target_write_memory (*trampoline, buf, sizeof (jump_insn));
405f8e94
SS
1518
1519 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1520 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1521 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1522 memcpy (buf + 2, &offset, 2);
1523 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1524 *jjump_pad_insn_size = sizeof (small_jump_insn);
1525 }
1526 else
1527 {
1528 /* Else use a 32-bit relative jump instruction. */
1529 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1530 memcpy (buf, jump_insn, sizeof (jump_insn));
1531 memcpy (buf + 1, &offset, 4);
1532 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1533 *jjump_pad_insn_size = sizeof (jump_insn);
1534 }
fa593d66
PA
1535
1536 /* Return the end address of our pad. */
1537 *jump_entry = buildaddr;
1538
1539 return 0;
1540}
1541
809a0c35
TBA
1542bool
1543x86_target::supports_fast_tracepoints ()
1544{
1545 return true;
1546}
1547
1548int
1549x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1550 CORE_ADDR tpaddr,
1551 CORE_ADDR collector,
1552 CORE_ADDR lockaddr,
1553 ULONGEST orig_size,
1554 CORE_ADDR *jump_entry,
1555 CORE_ADDR *trampoline,
1556 ULONGEST *trampoline_size,
1557 unsigned char *jjump_pad_insn,
1558 ULONGEST *jjump_pad_insn_size,
1559 CORE_ADDR *adjusted_insn_addr,
1560 CORE_ADDR *adjusted_insn_addr_end,
1561 char *err)
fa593d66
PA
1562{
1563#ifdef __x86_64__
3aee8918 1564 if (is_64bit_tdesc ())
fa593d66
PA
1565 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1566 collector, lockaddr,
1567 orig_size, jump_entry,
405f8e94 1568 trampoline, trampoline_size,
fa593d66
PA
1569 jjump_pad_insn,
1570 jjump_pad_insn_size,
1571 adjusted_insn_addr,
405f8e94
SS
1572 adjusted_insn_addr_end,
1573 err);
fa593d66
PA
1574#endif
1575
1576 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1577 collector, lockaddr,
1578 orig_size, jump_entry,
405f8e94 1579 trampoline, trampoline_size,
fa593d66
PA
1580 jjump_pad_insn,
1581 jjump_pad_insn_size,
1582 adjusted_insn_addr,
405f8e94
SS
1583 adjusted_insn_addr_end,
1584 err);
1585}
1586
1587/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1588 architectures. */
1589
809a0c35
TBA
1590int
1591x86_target::get_min_fast_tracepoint_insn_len ()
405f8e94
SS
1592{
1593 static int warned_about_fast_tracepoints = 0;
1594
1595#ifdef __x86_64__
1596 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1597 used for fast tracepoints. */
3aee8918 1598 if (is_64bit_tdesc ())
405f8e94
SS
1599 return 5;
1600#endif
1601
58b4daa5 1602 if (agent_loaded_p ())
405f8e94
SS
1603 {
1604 char errbuf[IPA_BUFSIZ];
1605
1606 errbuf[0] = '\0';
1607
1608 /* On x86, if trampolines are available, then 4-byte jump instructions
1609 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1610 with a 4-byte offset are used instead. */
1611 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1612 return 4;
1613 else
1614 {
1615 /* GDB has no channel to explain to user why a shorter fast
1616 tracepoint is not possible, but at least make GDBserver
1617 mention that something has gone awry. */
1618 if (!warned_about_fast_tracepoints)
1619 {
422186a9 1620 warning ("4-byte fast tracepoints not available; %s", errbuf);
405f8e94
SS
1621 warned_about_fast_tracepoints = 1;
1622 }
1623 return 5;
1624 }
1625 }
1626 else
1627 {
1628 /* Indicate that the minimum length is currently unknown since the IPA
1629 has not loaded yet. */
1630 return 0;
1631 }
fa593d66
PA
1632}
1633
6a271cae
PA
1634static void
1635add_insns (unsigned char *start, int len)
1636{
1637 CORE_ADDR buildaddr = current_insn_ptr;
1638
1639 if (debug_threads)
87ce2a04
DE
1640 debug_printf ("Adding %d bytes of insn at %s\n",
1641 len, paddress (buildaddr));
6a271cae
PA
1642
1643 append_insns (&buildaddr, len, start);
1644 current_insn_ptr = buildaddr;
1645}
1646
6a271cae
PA
1647/* Our general strategy for emitting code is to avoid specifying raw
1648 bytes whenever possible, and instead copy a block of inline asm
1649 that is embedded in the function. This is a little messy, because
1650 we need to keep the compiler from discarding what looks like dead
1651 code, plus suppress various warnings. */
1652
9e4344e5
PA
1653#define EMIT_ASM(NAME, INSNS) \
1654 do \
1655 { \
1656 extern unsigned char start_ ## NAME, end_ ## NAME; \
1657 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1658 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1659 "\t" "start_" #NAME ":" \
1660 "\t" INSNS "\n" \
1661 "\t" "end_" #NAME ":"); \
1662 } while (0)
6a271cae
PA
1663
1664#ifdef __x86_64__
1665
1666#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1667 do \
1668 { \
1669 extern unsigned char start_ ## NAME, end_ ## NAME; \
1670 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1671 __asm__ (".code32\n" \
1672 "\t" "jmp end_" #NAME "\n" \
1673 "\t" "start_" #NAME ":\n" \
1674 "\t" INSNS "\n" \
1675 "\t" "end_" #NAME ":\n" \
1676 ".code64\n"); \
1677 } while (0)
6a271cae
PA
1678
1679#else
1680
1681#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1682
1683#endif
1684
1685#ifdef __x86_64__
1686
1687static void
1688amd64_emit_prologue (void)
1689{
1690 EMIT_ASM (amd64_prologue,
1691 "pushq %rbp\n\t"
1692 "movq %rsp,%rbp\n\t"
1693 "sub $0x20,%rsp\n\t"
1694 "movq %rdi,-8(%rbp)\n\t"
1695 "movq %rsi,-16(%rbp)");
1696}
1697
1698
1699static void
1700amd64_emit_epilogue (void)
1701{
1702 EMIT_ASM (amd64_epilogue,
1703 "movq -16(%rbp),%rdi\n\t"
1704 "movq %rax,(%rdi)\n\t"
1705 "xor %rax,%rax\n\t"
1706 "leave\n\t"
1707 "ret");
1708}
1709
1710static void
1711amd64_emit_add (void)
1712{
1713 EMIT_ASM (amd64_add,
1714 "add (%rsp),%rax\n\t"
1715 "lea 0x8(%rsp),%rsp");
1716}
1717
1718static void
1719amd64_emit_sub (void)
1720{
1721 EMIT_ASM (amd64_sub,
1722 "sub %rax,(%rsp)\n\t"
1723 "pop %rax");
1724}
1725
1726static void
1727amd64_emit_mul (void)
1728{
1729 emit_error = 1;
1730}
1731
1732static void
1733amd64_emit_lsh (void)
1734{
1735 emit_error = 1;
1736}
1737
1738static void
1739amd64_emit_rsh_signed (void)
1740{
1741 emit_error = 1;
1742}
1743
1744static void
1745amd64_emit_rsh_unsigned (void)
1746{
1747 emit_error = 1;
1748}
1749
1750static void
1751amd64_emit_ext (int arg)
1752{
1753 switch (arg)
1754 {
1755 case 8:
1756 EMIT_ASM (amd64_ext_8,
1757 "cbtw\n\t"
1758 "cwtl\n\t"
1759 "cltq");
1760 break;
1761 case 16:
1762 EMIT_ASM (amd64_ext_16,
1763 "cwtl\n\t"
1764 "cltq");
1765 break;
1766 case 32:
1767 EMIT_ASM (amd64_ext_32,
1768 "cltq");
1769 break;
1770 default:
1771 emit_error = 1;
1772 }
1773}
1774
1775static void
1776amd64_emit_log_not (void)
1777{
1778 EMIT_ASM (amd64_log_not,
1779 "test %rax,%rax\n\t"
1780 "sete %cl\n\t"
1781 "movzbq %cl,%rax");
1782}
1783
1784static void
1785amd64_emit_bit_and (void)
1786{
1787 EMIT_ASM (amd64_and,
1788 "and (%rsp),%rax\n\t"
1789 "lea 0x8(%rsp),%rsp");
1790}
1791
1792static void
1793amd64_emit_bit_or (void)
1794{
1795 EMIT_ASM (amd64_or,
1796 "or (%rsp),%rax\n\t"
1797 "lea 0x8(%rsp),%rsp");
1798}
1799
1800static void
1801amd64_emit_bit_xor (void)
1802{
1803 EMIT_ASM (amd64_xor,
1804 "xor (%rsp),%rax\n\t"
1805 "lea 0x8(%rsp),%rsp");
1806}
1807
1808static void
1809amd64_emit_bit_not (void)
1810{
1811 EMIT_ASM (amd64_bit_not,
1812 "xorq $0xffffffffffffffff,%rax");
1813}
1814
1815static void
1816amd64_emit_equal (void)
1817{
1818 EMIT_ASM (amd64_equal,
1819 "cmp %rax,(%rsp)\n\t"
1820 "je .Lamd64_equal_true\n\t"
1821 "xor %rax,%rax\n\t"
1822 "jmp .Lamd64_equal_end\n\t"
1823 ".Lamd64_equal_true:\n\t"
1824 "mov $0x1,%rax\n\t"
1825 ".Lamd64_equal_end:\n\t"
1826 "lea 0x8(%rsp),%rsp");
1827}
1828
1829static void
1830amd64_emit_less_signed (void)
1831{
1832 EMIT_ASM (amd64_less_signed,
1833 "cmp %rax,(%rsp)\n\t"
1834 "jl .Lamd64_less_signed_true\n\t"
1835 "xor %rax,%rax\n\t"
1836 "jmp .Lamd64_less_signed_end\n\t"
1837 ".Lamd64_less_signed_true:\n\t"
1838 "mov $1,%rax\n\t"
1839 ".Lamd64_less_signed_end:\n\t"
1840 "lea 0x8(%rsp),%rsp");
1841}
1842
1843static void
1844amd64_emit_less_unsigned (void)
1845{
1846 EMIT_ASM (amd64_less_unsigned,
1847 "cmp %rax,(%rsp)\n\t"
1848 "jb .Lamd64_less_unsigned_true\n\t"
1849 "xor %rax,%rax\n\t"
1850 "jmp .Lamd64_less_unsigned_end\n\t"
1851 ".Lamd64_less_unsigned_true:\n\t"
1852 "mov $1,%rax\n\t"
1853 ".Lamd64_less_unsigned_end:\n\t"
1854 "lea 0x8(%rsp),%rsp");
1855}
1856
1857static void
1858amd64_emit_ref (int size)
1859{
1860 switch (size)
1861 {
1862 case 1:
1863 EMIT_ASM (amd64_ref1,
1864 "movb (%rax),%al");
1865 break;
1866 case 2:
1867 EMIT_ASM (amd64_ref2,
1868 "movw (%rax),%ax");
1869 break;
1870 case 4:
1871 EMIT_ASM (amd64_ref4,
1872 "movl (%rax),%eax");
1873 break;
1874 case 8:
1875 EMIT_ASM (amd64_ref8,
1876 "movq (%rax),%rax");
1877 break;
1878 }
1879}
1880
1881static void
1882amd64_emit_if_goto (int *offset_p, int *size_p)
1883{
1884 EMIT_ASM (amd64_if_goto,
1885 "mov %rax,%rcx\n\t"
1886 "pop %rax\n\t"
1887 "cmp $0,%rcx\n\t"
1888 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1889 if (offset_p)
1890 *offset_p = 10;
1891 if (size_p)
1892 *size_p = 4;
1893}
1894
1895static void
1896amd64_emit_goto (int *offset_p, int *size_p)
1897{
1898 EMIT_ASM (amd64_goto,
1899 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1900 if (offset_p)
1901 *offset_p = 1;
1902 if (size_p)
1903 *size_p = 4;
1904}
1905
1906static void
1907amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1908{
1909 int diff = (to - (from + size));
1910 unsigned char buf[sizeof (int)];
1911
1912 if (size != 4)
1913 {
1914 emit_error = 1;
1915 return;
1916 }
1917
1918 memcpy (buf, &diff, sizeof (int));
4196ab2a 1919 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
1920}
1921
1922static void
4e29fb54 1923amd64_emit_const (LONGEST num)
6a271cae
PA
1924{
1925 unsigned char buf[16];
1926 int i;
1927 CORE_ADDR buildaddr = current_insn_ptr;
1928
1929 i = 0;
1930 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 1931 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
1932 i += 8;
1933 append_insns (&buildaddr, i, buf);
1934 current_insn_ptr = buildaddr;
1935}
1936
1937static void
1938amd64_emit_call (CORE_ADDR fn)
1939{
1940 unsigned char buf[16];
1941 int i;
1942 CORE_ADDR buildaddr;
4e29fb54 1943 LONGEST offset64;
6a271cae
PA
1944
1945 /* The destination function being in the shared library, may be
1946 >31-bits away off the compiled code pad. */
1947
1948 buildaddr = current_insn_ptr;
1949
1950 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1951
1952 i = 0;
1953
1954 if (offset64 > INT_MAX || offset64 < INT_MIN)
1955 {
1956 /* Offset is too large for a call. Use callq, but that requires
1957 a register, so avoid it if possible. Use r10, since it is
1958 call-clobbered, we don't have to push/pop it. */
1959 buf[i++] = 0x48; /* mov $fn,%r10 */
1960 buf[i++] = 0xba;
1961 memcpy (buf + i, &fn, 8);
1962 i += 8;
1963 buf[i++] = 0xff; /* callq *%r10 */
1964 buf[i++] = 0xd2;
1965 }
1966 else
1967 {
1968 int offset32 = offset64; /* we know we can't overflow here. */
ed036b40
PA
1969
1970 buf[i++] = 0xe8; /* call <reladdr> */
6a271cae
PA
1971 memcpy (buf + i, &offset32, 4);
1972 i += 4;
1973 }
1974
1975 append_insns (&buildaddr, i, buf);
1976 current_insn_ptr = buildaddr;
1977}
1978
1979static void
1980amd64_emit_reg (int reg)
1981{
1982 unsigned char buf[16];
1983 int i;
1984 CORE_ADDR buildaddr;
1985
1986 /* Assume raw_regs is still in %rdi. */
1987 buildaddr = current_insn_ptr;
1988 i = 0;
1989 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 1990 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
1991 i += 4;
1992 append_insns (&buildaddr, i, buf);
1993 current_insn_ptr = buildaddr;
1994 amd64_emit_call (get_raw_reg_func_addr ());
1995}
1996
1997static void
1998amd64_emit_pop (void)
1999{
2000 EMIT_ASM (amd64_pop,
2001 "pop %rax");
2002}
2003
2004static void
2005amd64_emit_stack_flush (void)
2006{
2007 EMIT_ASM (amd64_stack_flush,
2008 "push %rax");
2009}
2010
2011static void
2012amd64_emit_zero_ext (int arg)
2013{
2014 switch (arg)
2015 {
2016 case 8:
2017 EMIT_ASM (amd64_zero_ext_8,
2018 "and $0xff,%rax");
2019 break;
2020 case 16:
2021 EMIT_ASM (amd64_zero_ext_16,
2022 "and $0xffff,%rax");
2023 break;
2024 case 32:
2025 EMIT_ASM (amd64_zero_ext_32,
2026 "mov $0xffffffff,%rcx\n\t"
2027 "and %rcx,%rax");
2028 break;
2029 default:
2030 emit_error = 1;
2031 }
2032}
2033
2034static void
2035amd64_emit_swap (void)
2036{
2037 EMIT_ASM (amd64_swap,
2038 "mov %rax,%rcx\n\t"
2039 "pop %rax\n\t"
2040 "push %rcx");
2041}
2042
2043static void
2044amd64_emit_stack_adjust (int n)
2045{
2046 unsigned char buf[16];
2047 int i;
2048 CORE_ADDR buildaddr = current_insn_ptr;
2049
2050 i = 0;
2051 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2052 buf[i++] = 0x8d;
2053 buf[i++] = 0x64;
2054 buf[i++] = 0x24;
2055 /* This only handles adjustments up to 16, but we don't expect any more. */
2056 buf[i++] = n * 8;
2057 append_insns (&buildaddr, i, buf);
2058 current_insn_ptr = buildaddr;
2059}
2060
2061/* FN's prototype is `LONGEST(*fn)(int)'. */
2062
2063static void
2064amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2065{
2066 unsigned char buf[16];
2067 int i;
2068 CORE_ADDR buildaddr;
2069
2070 buildaddr = current_insn_ptr;
2071 i = 0;
2072 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2073 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2074 i += 4;
2075 append_insns (&buildaddr, i, buf);
2076 current_insn_ptr = buildaddr;
2077 amd64_emit_call (fn);
2078}
2079
4e29fb54 2080/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2081
2082static void
2083amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2084{
2085 unsigned char buf[16];
2086 int i;
2087 CORE_ADDR buildaddr;
2088
2089 buildaddr = current_insn_ptr;
2090 i = 0;
2091 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2092 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2093 i += 4;
2094 append_insns (&buildaddr, i, buf);
2095 current_insn_ptr = buildaddr;
2096 EMIT_ASM (amd64_void_call_2_a,
2097 /* Save away a copy of the stack top. */
2098 "push %rax\n\t"
2099 /* Also pass top as the second argument. */
2100 "mov %rax,%rsi");
2101 amd64_emit_call (fn);
2102 EMIT_ASM (amd64_void_call_2_b,
2103 /* Restore the stack top, %rax may have been trashed. */
2104 "pop %rax");
2105}
2106
df4a0200 2107static void
6b9801d4
SS
2108amd64_emit_eq_goto (int *offset_p, int *size_p)
2109{
2110 EMIT_ASM (amd64_eq,
2111 "cmp %rax,(%rsp)\n\t"
2112 "jne .Lamd64_eq_fallthru\n\t"
2113 "lea 0x8(%rsp),%rsp\n\t"
2114 "pop %rax\n\t"
2115 /* jmp, but don't trust the assembler to choose the right jump */
2116 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2117 ".Lamd64_eq_fallthru:\n\t"
2118 "lea 0x8(%rsp),%rsp\n\t"
2119 "pop %rax");
2120
2121 if (offset_p)
2122 *offset_p = 13;
2123 if (size_p)
2124 *size_p = 4;
2125}
2126
df4a0200 2127static void
6b9801d4
SS
2128amd64_emit_ne_goto (int *offset_p, int *size_p)
2129{
2130 EMIT_ASM (amd64_ne,
2131 "cmp %rax,(%rsp)\n\t"
2132 "je .Lamd64_ne_fallthru\n\t"
2133 "lea 0x8(%rsp),%rsp\n\t"
2134 "pop %rax\n\t"
2135 /* jmp, but don't trust the assembler to choose the right jump */
2136 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2137 ".Lamd64_ne_fallthru:\n\t"
2138 "lea 0x8(%rsp),%rsp\n\t"
2139 "pop %rax");
2140
2141 if (offset_p)
2142 *offset_p = 13;
2143 if (size_p)
2144 *size_p = 4;
2145}
2146
df4a0200 2147static void
6b9801d4
SS
2148amd64_emit_lt_goto (int *offset_p, int *size_p)
2149{
2150 EMIT_ASM (amd64_lt,
2151 "cmp %rax,(%rsp)\n\t"
2152 "jnl .Lamd64_lt_fallthru\n\t"
2153 "lea 0x8(%rsp),%rsp\n\t"
2154 "pop %rax\n\t"
2155 /* jmp, but don't trust the assembler to choose the right jump */
2156 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2157 ".Lamd64_lt_fallthru:\n\t"
2158 "lea 0x8(%rsp),%rsp\n\t"
2159 "pop %rax");
2160
2161 if (offset_p)
2162 *offset_p = 13;
2163 if (size_p)
2164 *size_p = 4;
2165}
2166
df4a0200 2167static void
6b9801d4
SS
2168amd64_emit_le_goto (int *offset_p, int *size_p)
2169{
2170 EMIT_ASM (amd64_le,
2171 "cmp %rax,(%rsp)\n\t"
2172 "jnle .Lamd64_le_fallthru\n\t"
2173 "lea 0x8(%rsp),%rsp\n\t"
2174 "pop %rax\n\t"
2175 /* jmp, but don't trust the assembler to choose the right jump */
2176 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2177 ".Lamd64_le_fallthru:\n\t"
2178 "lea 0x8(%rsp),%rsp\n\t"
2179 "pop %rax");
2180
2181 if (offset_p)
2182 *offset_p = 13;
2183 if (size_p)
2184 *size_p = 4;
2185}
2186
df4a0200 2187static void
6b9801d4
SS
2188amd64_emit_gt_goto (int *offset_p, int *size_p)
2189{
2190 EMIT_ASM (amd64_gt,
2191 "cmp %rax,(%rsp)\n\t"
2192 "jng .Lamd64_gt_fallthru\n\t"
2193 "lea 0x8(%rsp),%rsp\n\t"
2194 "pop %rax\n\t"
2195 /* jmp, but don't trust the assembler to choose the right jump */
2196 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2197 ".Lamd64_gt_fallthru:\n\t"
2198 "lea 0x8(%rsp),%rsp\n\t"
2199 "pop %rax");
2200
2201 if (offset_p)
2202 *offset_p = 13;
2203 if (size_p)
2204 *size_p = 4;
2205}
2206
df4a0200 2207static void
6b9801d4
SS
2208amd64_emit_ge_goto (int *offset_p, int *size_p)
2209{
2210 EMIT_ASM (amd64_ge,
2211 "cmp %rax,(%rsp)\n\t"
2212 "jnge .Lamd64_ge_fallthru\n\t"
2213 ".Lamd64_ge_jump:\n\t"
2214 "lea 0x8(%rsp),%rsp\n\t"
2215 "pop %rax\n\t"
2216 /* jmp, but don't trust the assembler to choose the right jump */
2217 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2218 ".Lamd64_ge_fallthru:\n\t"
2219 "lea 0x8(%rsp),%rsp\n\t"
2220 "pop %rax");
2221
2222 if (offset_p)
2223 *offset_p = 13;
2224 if (size_p)
2225 *size_p = 4;
2226}
2227
6a271cae
PA
2228struct emit_ops amd64_emit_ops =
2229 {
2230 amd64_emit_prologue,
2231 amd64_emit_epilogue,
2232 amd64_emit_add,
2233 amd64_emit_sub,
2234 amd64_emit_mul,
2235 amd64_emit_lsh,
2236 amd64_emit_rsh_signed,
2237 amd64_emit_rsh_unsigned,
2238 amd64_emit_ext,
2239 amd64_emit_log_not,
2240 amd64_emit_bit_and,
2241 amd64_emit_bit_or,
2242 amd64_emit_bit_xor,
2243 amd64_emit_bit_not,
2244 amd64_emit_equal,
2245 amd64_emit_less_signed,
2246 amd64_emit_less_unsigned,
2247 amd64_emit_ref,
2248 amd64_emit_if_goto,
2249 amd64_emit_goto,
2250 amd64_write_goto_address,
2251 amd64_emit_const,
2252 amd64_emit_call,
2253 amd64_emit_reg,
2254 amd64_emit_pop,
2255 amd64_emit_stack_flush,
2256 amd64_emit_zero_ext,
2257 amd64_emit_swap,
2258 amd64_emit_stack_adjust,
2259 amd64_emit_int_call_1,
6b9801d4
SS
2260 amd64_emit_void_call_2,
2261 amd64_emit_eq_goto,
2262 amd64_emit_ne_goto,
2263 amd64_emit_lt_goto,
2264 amd64_emit_le_goto,
2265 amd64_emit_gt_goto,
2266 amd64_emit_ge_goto
6a271cae
PA
2267 };
2268
2269#endif /* __x86_64__ */
2270
2271static void
2272i386_emit_prologue (void)
2273{
2274 EMIT_ASM32 (i386_prologue,
2275 "push %ebp\n\t"
bf15cbda
SS
2276 "mov %esp,%ebp\n\t"
2277 "push %ebx");
6a271cae
PA
2278 /* At this point, the raw regs base address is at 8(%ebp), and the
2279 value pointer is at 12(%ebp). */
2280}
2281
2282static void
2283i386_emit_epilogue (void)
2284{
2285 EMIT_ASM32 (i386_epilogue,
2286 "mov 12(%ebp),%ecx\n\t"
2287 "mov %eax,(%ecx)\n\t"
2288 "mov %ebx,0x4(%ecx)\n\t"
2289 "xor %eax,%eax\n\t"
bf15cbda 2290 "pop %ebx\n\t"
6a271cae
PA
2291 "pop %ebp\n\t"
2292 "ret");
2293}
2294
2295static void
2296i386_emit_add (void)
2297{
2298 EMIT_ASM32 (i386_add,
2299 "add (%esp),%eax\n\t"
2300 "adc 0x4(%esp),%ebx\n\t"
2301 "lea 0x8(%esp),%esp");
2302}
2303
2304static void
2305i386_emit_sub (void)
2306{
2307 EMIT_ASM32 (i386_sub,
2308 "subl %eax,(%esp)\n\t"
2309 "sbbl %ebx,4(%esp)\n\t"
2310 "pop %eax\n\t"
2311 "pop %ebx\n\t");
2312}
2313
2314static void
2315i386_emit_mul (void)
2316{
2317 emit_error = 1;
2318}
2319
2320static void
2321i386_emit_lsh (void)
2322{
2323 emit_error = 1;
2324}
2325
2326static void
2327i386_emit_rsh_signed (void)
2328{
2329 emit_error = 1;
2330}
2331
2332static void
2333i386_emit_rsh_unsigned (void)
2334{
2335 emit_error = 1;
2336}
2337
2338static void
2339i386_emit_ext (int arg)
2340{
2341 switch (arg)
2342 {
2343 case 8:
2344 EMIT_ASM32 (i386_ext_8,
2345 "cbtw\n\t"
2346 "cwtl\n\t"
2347 "movl %eax,%ebx\n\t"
2348 "sarl $31,%ebx");
2349 break;
2350 case 16:
2351 EMIT_ASM32 (i386_ext_16,
2352 "cwtl\n\t"
2353 "movl %eax,%ebx\n\t"
2354 "sarl $31,%ebx");
2355 break;
2356 case 32:
2357 EMIT_ASM32 (i386_ext_32,
2358 "movl %eax,%ebx\n\t"
2359 "sarl $31,%ebx");
2360 break;
2361 default:
2362 emit_error = 1;
2363 }
2364}
2365
2366static void
2367i386_emit_log_not (void)
2368{
2369 EMIT_ASM32 (i386_log_not,
2370 "or %ebx,%eax\n\t"
2371 "test %eax,%eax\n\t"
2372 "sete %cl\n\t"
2373 "xor %ebx,%ebx\n\t"
2374 "movzbl %cl,%eax");
2375}
2376
2377static void
2378i386_emit_bit_and (void)
2379{
2380 EMIT_ASM32 (i386_and,
2381 "and (%esp),%eax\n\t"
2382 "and 0x4(%esp),%ebx\n\t"
2383 "lea 0x8(%esp),%esp");
2384}
2385
2386static void
2387i386_emit_bit_or (void)
2388{
2389 EMIT_ASM32 (i386_or,
2390 "or (%esp),%eax\n\t"
2391 "or 0x4(%esp),%ebx\n\t"
2392 "lea 0x8(%esp),%esp");
2393}
2394
2395static void
2396i386_emit_bit_xor (void)
2397{
2398 EMIT_ASM32 (i386_xor,
2399 "xor (%esp),%eax\n\t"
2400 "xor 0x4(%esp),%ebx\n\t"
2401 "lea 0x8(%esp),%esp");
2402}
2403
2404static void
2405i386_emit_bit_not (void)
2406{
2407 EMIT_ASM32 (i386_bit_not,
2408 "xor $0xffffffff,%eax\n\t"
2409 "xor $0xffffffff,%ebx\n\t");
2410}
2411
2412static void
2413i386_emit_equal (void)
2414{
2415 EMIT_ASM32 (i386_equal,
2416 "cmpl %ebx,4(%esp)\n\t"
2417 "jne .Li386_equal_false\n\t"
2418 "cmpl %eax,(%esp)\n\t"
2419 "je .Li386_equal_true\n\t"
2420 ".Li386_equal_false:\n\t"
2421 "xor %eax,%eax\n\t"
2422 "jmp .Li386_equal_end\n\t"
2423 ".Li386_equal_true:\n\t"
2424 "mov $1,%eax\n\t"
2425 ".Li386_equal_end:\n\t"
2426 "xor %ebx,%ebx\n\t"
2427 "lea 0x8(%esp),%esp");
2428}
2429
2430static void
2431i386_emit_less_signed (void)
2432{
2433 EMIT_ASM32 (i386_less_signed,
2434 "cmpl %ebx,4(%esp)\n\t"
2435 "jl .Li386_less_signed_true\n\t"
2436 "jne .Li386_less_signed_false\n\t"
2437 "cmpl %eax,(%esp)\n\t"
2438 "jl .Li386_less_signed_true\n\t"
2439 ".Li386_less_signed_false:\n\t"
2440 "xor %eax,%eax\n\t"
2441 "jmp .Li386_less_signed_end\n\t"
2442 ".Li386_less_signed_true:\n\t"
2443 "mov $1,%eax\n\t"
2444 ".Li386_less_signed_end:\n\t"
2445 "xor %ebx,%ebx\n\t"
2446 "lea 0x8(%esp),%esp");
2447}
2448
2449static void
2450i386_emit_less_unsigned (void)
2451{
2452 EMIT_ASM32 (i386_less_unsigned,
2453 "cmpl %ebx,4(%esp)\n\t"
2454 "jb .Li386_less_unsigned_true\n\t"
2455 "jne .Li386_less_unsigned_false\n\t"
2456 "cmpl %eax,(%esp)\n\t"
2457 "jb .Li386_less_unsigned_true\n\t"
2458 ".Li386_less_unsigned_false:\n\t"
2459 "xor %eax,%eax\n\t"
2460 "jmp .Li386_less_unsigned_end\n\t"
2461 ".Li386_less_unsigned_true:\n\t"
2462 "mov $1,%eax\n\t"
2463 ".Li386_less_unsigned_end:\n\t"
2464 "xor %ebx,%ebx\n\t"
2465 "lea 0x8(%esp),%esp");
2466}
2467
2468static void
2469i386_emit_ref (int size)
2470{
2471 switch (size)
2472 {
2473 case 1:
2474 EMIT_ASM32 (i386_ref1,
2475 "movb (%eax),%al");
2476 break;
2477 case 2:
2478 EMIT_ASM32 (i386_ref2,
2479 "movw (%eax),%ax");
2480 break;
2481 case 4:
2482 EMIT_ASM32 (i386_ref4,
2483 "movl (%eax),%eax");
2484 break;
2485 case 8:
2486 EMIT_ASM32 (i386_ref8,
2487 "movl 4(%eax),%ebx\n\t"
2488 "movl (%eax),%eax");
2489 break;
2490 }
2491}
2492
2493static void
2494i386_emit_if_goto (int *offset_p, int *size_p)
2495{
2496 EMIT_ASM32 (i386_if_goto,
2497 "mov %eax,%ecx\n\t"
2498 "or %ebx,%ecx\n\t"
2499 "pop %eax\n\t"
2500 "pop %ebx\n\t"
2501 "cmpl $0,%ecx\n\t"
2502 /* Don't trust the assembler to choose the right jump */
2503 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2504
2505 if (offset_p)
2506 *offset_p = 11; /* be sure that this matches the sequence above */
2507 if (size_p)
2508 *size_p = 4;
2509}
2510
2511static void
2512i386_emit_goto (int *offset_p, int *size_p)
2513{
2514 EMIT_ASM32 (i386_goto,
2515 /* Don't trust the assembler to choose the right jump */
2516 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2517 if (offset_p)
2518 *offset_p = 1;
2519 if (size_p)
2520 *size_p = 4;
2521}
2522
2523static void
2524i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2525{
2526 int diff = (to - (from + size));
2527 unsigned char buf[sizeof (int)];
2528
2529 /* We're only doing 4-byte sizes at the moment. */
2530 if (size != 4)
2531 {
2532 emit_error = 1;
2533 return;
2534 }
2535
2536 memcpy (buf, &diff, sizeof (int));
4196ab2a 2537 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
2538}
2539
2540static void
4e29fb54 2541i386_emit_const (LONGEST num)
6a271cae
PA
2542{
2543 unsigned char buf[16];
b00ad6ff 2544 int i, hi, lo;
6a271cae
PA
2545 CORE_ADDR buildaddr = current_insn_ptr;
2546
2547 i = 0;
2548 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2549 lo = num & 0xffffffff;
2550 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2551 i += 4;
2552 hi = ((num >> 32) & 0xffffffff);
2553 if (hi)
2554 {
2555 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2556 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2557 i += 4;
2558 }
2559 else
2560 {
2561 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2562 }
2563 append_insns (&buildaddr, i, buf);
2564 current_insn_ptr = buildaddr;
2565}
2566
2567static void
2568i386_emit_call (CORE_ADDR fn)
2569{
2570 unsigned char buf[16];
2571 int i, offset;
2572 CORE_ADDR buildaddr;
2573
2574 buildaddr = current_insn_ptr;
2575 i = 0;
2576 buf[i++] = 0xe8; /* call <reladdr> */
2577 offset = ((int) fn) - (buildaddr + 5);
2578 memcpy (buf + 1, &offset, 4);
2579 append_insns (&buildaddr, 5, buf);
2580 current_insn_ptr = buildaddr;
2581}
2582
2583static void
2584i386_emit_reg (int reg)
2585{
2586 unsigned char buf[16];
2587 int i;
2588 CORE_ADDR buildaddr;
2589
2590 EMIT_ASM32 (i386_reg_a,
2591 "sub $0x8,%esp");
2592 buildaddr = current_insn_ptr;
2593 i = 0;
2594 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2595 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2596 i += 4;
2597 append_insns (&buildaddr, i, buf);
2598 current_insn_ptr = buildaddr;
2599 EMIT_ASM32 (i386_reg_b,
2600 "mov %eax,4(%esp)\n\t"
2601 "mov 8(%ebp),%eax\n\t"
2602 "mov %eax,(%esp)");
2603 i386_emit_call (get_raw_reg_func_addr ());
2604 EMIT_ASM32 (i386_reg_c,
2605 "xor %ebx,%ebx\n\t"
2606 "lea 0x8(%esp),%esp");
2607}
2608
2609static void
2610i386_emit_pop (void)
2611{
2612 EMIT_ASM32 (i386_pop,
2613 "pop %eax\n\t"
2614 "pop %ebx");
2615}
2616
2617static void
2618i386_emit_stack_flush (void)
2619{
2620 EMIT_ASM32 (i386_stack_flush,
2621 "push %ebx\n\t"
2622 "push %eax");
2623}
2624
2625static void
2626i386_emit_zero_ext (int arg)
2627{
2628 switch (arg)
2629 {
2630 case 8:
2631 EMIT_ASM32 (i386_zero_ext_8,
2632 "and $0xff,%eax\n\t"
2633 "xor %ebx,%ebx");
2634 break;
2635 case 16:
2636 EMIT_ASM32 (i386_zero_ext_16,
2637 "and $0xffff,%eax\n\t"
2638 "xor %ebx,%ebx");
2639 break;
2640 case 32:
2641 EMIT_ASM32 (i386_zero_ext_32,
2642 "xor %ebx,%ebx");
2643 break;
2644 default:
2645 emit_error = 1;
2646 }
2647}
2648
2649static void
2650i386_emit_swap (void)
2651{
2652 EMIT_ASM32 (i386_swap,
2653 "mov %eax,%ecx\n\t"
2654 "mov %ebx,%edx\n\t"
2655 "pop %eax\n\t"
2656 "pop %ebx\n\t"
2657 "push %edx\n\t"
2658 "push %ecx");
2659}
2660
2661static void
2662i386_emit_stack_adjust (int n)
2663{
2664 unsigned char buf[16];
2665 int i;
2666 CORE_ADDR buildaddr = current_insn_ptr;
2667
2668 i = 0;
2669 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2670 buf[i++] = 0x64;
2671 buf[i++] = 0x24;
2672 buf[i++] = n * 8;
2673 append_insns (&buildaddr, i, buf);
2674 current_insn_ptr = buildaddr;
2675}
2676
2677/* FN's prototype is `LONGEST(*fn)(int)'. */
2678
2679static void
2680i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2681{
2682 unsigned char buf[16];
2683 int i;
2684 CORE_ADDR buildaddr;
2685
2686 EMIT_ASM32 (i386_int_call_1_a,
2687 /* Reserve a bit of stack space. */
2688 "sub $0x8,%esp");
2689 /* Put the one argument on the stack. */
2690 buildaddr = current_insn_ptr;
2691 i = 0;
2692 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2693 buf[i++] = 0x04;
2694 buf[i++] = 0x24;
b00ad6ff 2695 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2696 i += 4;
2697 append_insns (&buildaddr, i, buf);
2698 current_insn_ptr = buildaddr;
2699 i386_emit_call (fn);
2700 EMIT_ASM32 (i386_int_call_1_c,
2701 "mov %edx,%ebx\n\t"
2702 "lea 0x8(%esp),%esp");
2703}
2704
4e29fb54 2705/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2706
2707static void
2708i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2709{
2710 unsigned char buf[16];
2711 int i;
2712 CORE_ADDR buildaddr;
2713
2714 EMIT_ASM32 (i386_void_call_2_a,
2715 /* Preserve %eax only; we don't have to worry about %ebx. */
2716 "push %eax\n\t"
2717 /* Reserve a bit of stack space for arguments. */
2718 "sub $0x10,%esp\n\t"
2719 /* Copy "top" to the second argument position. (Note that
2720 we can't assume function won't scribble on its
2721 arguments, so don't try to restore from this.) */
2722 "mov %eax,4(%esp)\n\t"
2723 "mov %ebx,8(%esp)");
2724 /* Put the first argument on the stack. */
2725 buildaddr = current_insn_ptr;
2726 i = 0;
2727 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2728 buf[i++] = 0x04;
2729 buf[i++] = 0x24;
b00ad6ff 2730 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2731 i += 4;
2732 append_insns (&buildaddr, i, buf);
2733 current_insn_ptr = buildaddr;
2734 i386_emit_call (fn);
2735 EMIT_ASM32 (i386_void_call_2_b,
2736 "lea 0x10(%esp),%esp\n\t"
2737 /* Restore original stack top. */
2738 "pop %eax");
2739}
2740
6b9801d4 2741
df4a0200 2742static void
6b9801d4
SS
2743i386_emit_eq_goto (int *offset_p, int *size_p)
2744{
2745 EMIT_ASM32 (eq,
2746 /* Check low half first, more likely to be decider */
2747 "cmpl %eax,(%esp)\n\t"
2748 "jne .Leq_fallthru\n\t"
2749 "cmpl %ebx,4(%esp)\n\t"
2750 "jne .Leq_fallthru\n\t"
2751 "lea 0x8(%esp),%esp\n\t"
2752 "pop %eax\n\t"
2753 "pop %ebx\n\t"
2754 /* jmp, but don't trust the assembler to choose the right jump */
2755 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2756 ".Leq_fallthru:\n\t"
2757 "lea 0x8(%esp),%esp\n\t"
2758 "pop %eax\n\t"
2759 "pop %ebx");
2760
2761 if (offset_p)
2762 *offset_p = 18;
2763 if (size_p)
2764 *size_p = 4;
2765}
2766
df4a0200 2767static void
6b9801d4
SS
2768i386_emit_ne_goto (int *offset_p, int *size_p)
2769{
2770 EMIT_ASM32 (ne,
2771 /* Check low half first, more likely to be decider */
2772 "cmpl %eax,(%esp)\n\t"
2773 "jne .Lne_jump\n\t"
2774 "cmpl %ebx,4(%esp)\n\t"
2775 "je .Lne_fallthru\n\t"
2776 ".Lne_jump:\n\t"
2777 "lea 0x8(%esp),%esp\n\t"
2778 "pop %eax\n\t"
2779 "pop %ebx\n\t"
2780 /* jmp, but don't trust the assembler to choose the right jump */
2781 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2782 ".Lne_fallthru:\n\t"
2783 "lea 0x8(%esp),%esp\n\t"
2784 "pop %eax\n\t"
2785 "pop %ebx");
2786
2787 if (offset_p)
2788 *offset_p = 18;
2789 if (size_p)
2790 *size_p = 4;
2791}
2792
df4a0200 2793static void
6b9801d4
SS
2794i386_emit_lt_goto (int *offset_p, int *size_p)
2795{
2796 EMIT_ASM32 (lt,
2797 "cmpl %ebx,4(%esp)\n\t"
2798 "jl .Llt_jump\n\t"
2799 "jne .Llt_fallthru\n\t"
2800 "cmpl %eax,(%esp)\n\t"
2801 "jnl .Llt_fallthru\n\t"
2802 ".Llt_jump:\n\t"
2803 "lea 0x8(%esp),%esp\n\t"
2804 "pop %eax\n\t"
2805 "pop %ebx\n\t"
2806 /* jmp, but don't trust the assembler to choose the right jump */
2807 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2808 ".Llt_fallthru:\n\t"
2809 "lea 0x8(%esp),%esp\n\t"
2810 "pop %eax\n\t"
2811 "pop %ebx");
2812
2813 if (offset_p)
2814 *offset_p = 20;
2815 if (size_p)
2816 *size_p = 4;
2817}
2818
df4a0200 2819static void
6b9801d4
SS
2820i386_emit_le_goto (int *offset_p, int *size_p)
2821{
2822 EMIT_ASM32 (le,
2823 "cmpl %ebx,4(%esp)\n\t"
2824 "jle .Lle_jump\n\t"
2825 "jne .Lle_fallthru\n\t"
2826 "cmpl %eax,(%esp)\n\t"
2827 "jnle .Lle_fallthru\n\t"
2828 ".Lle_jump:\n\t"
2829 "lea 0x8(%esp),%esp\n\t"
2830 "pop %eax\n\t"
2831 "pop %ebx\n\t"
2832 /* jmp, but don't trust the assembler to choose the right jump */
2833 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2834 ".Lle_fallthru:\n\t"
2835 "lea 0x8(%esp),%esp\n\t"
2836 "pop %eax\n\t"
2837 "pop %ebx");
2838
2839 if (offset_p)
2840 *offset_p = 20;
2841 if (size_p)
2842 *size_p = 4;
2843}
2844
df4a0200 2845static void
6b9801d4
SS
2846i386_emit_gt_goto (int *offset_p, int *size_p)
2847{
2848 EMIT_ASM32 (gt,
2849 "cmpl %ebx,4(%esp)\n\t"
2850 "jg .Lgt_jump\n\t"
2851 "jne .Lgt_fallthru\n\t"
2852 "cmpl %eax,(%esp)\n\t"
2853 "jng .Lgt_fallthru\n\t"
2854 ".Lgt_jump:\n\t"
2855 "lea 0x8(%esp),%esp\n\t"
2856 "pop %eax\n\t"
2857 "pop %ebx\n\t"
2858 /* jmp, but don't trust the assembler to choose the right jump */
2859 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2860 ".Lgt_fallthru:\n\t"
2861 "lea 0x8(%esp),%esp\n\t"
2862 "pop %eax\n\t"
2863 "pop %ebx");
2864
2865 if (offset_p)
2866 *offset_p = 20;
2867 if (size_p)
2868 *size_p = 4;
2869}
2870
df4a0200 2871static void
6b9801d4
SS
2872i386_emit_ge_goto (int *offset_p, int *size_p)
2873{
2874 EMIT_ASM32 (ge,
2875 "cmpl %ebx,4(%esp)\n\t"
2876 "jge .Lge_jump\n\t"
2877 "jne .Lge_fallthru\n\t"
2878 "cmpl %eax,(%esp)\n\t"
2879 "jnge .Lge_fallthru\n\t"
2880 ".Lge_jump:\n\t"
2881 "lea 0x8(%esp),%esp\n\t"
2882 "pop %eax\n\t"
2883 "pop %ebx\n\t"
2884 /* jmp, but don't trust the assembler to choose the right jump */
2885 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2886 ".Lge_fallthru:\n\t"
2887 "lea 0x8(%esp),%esp\n\t"
2888 "pop %eax\n\t"
2889 "pop %ebx");
2890
2891 if (offset_p)
2892 *offset_p = 20;
2893 if (size_p)
2894 *size_p = 4;
2895}
2896
6a271cae
PA
2897struct emit_ops i386_emit_ops =
2898 {
2899 i386_emit_prologue,
2900 i386_emit_epilogue,
2901 i386_emit_add,
2902 i386_emit_sub,
2903 i386_emit_mul,
2904 i386_emit_lsh,
2905 i386_emit_rsh_signed,
2906 i386_emit_rsh_unsigned,
2907 i386_emit_ext,
2908 i386_emit_log_not,
2909 i386_emit_bit_and,
2910 i386_emit_bit_or,
2911 i386_emit_bit_xor,
2912 i386_emit_bit_not,
2913 i386_emit_equal,
2914 i386_emit_less_signed,
2915 i386_emit_less_unsigned,
2916 i386_emit_ref,
2917 i386_emit_if_goto,
2918 i386_emit_goto,
2919 i386_write_goto_address,
2920 i386_emit_const,
2921 i386_emit_call,
2922 i386_emit_reg,
2923 i386_emit_pop,
2924 i386_emit_stack_flush,
2925 i386_emit_zero_ext,
2926 i386_emit_swap,
2927 i386_emit_stack_adjust,
2928 i386_emit_int_call_1,
6b9801d4
SS
2929 i386_emit_void_call_2,
2930 i386_emit_eq_goto,
2931 i386_emit_ne_goto,
2932 i386_emit_lt_goto,
2933 i386_emit_le_goto,
2934 i386_emit_gt_goto,
2935 i386_emit_ge_goto
6a271cae
PA
2936 };
2937
2938
ab64c999
TBA
2939emit_ops *
2940x86_target::emit_ops ()
6a271cae
PA
2941{
2942#ifdef __x86_64__
3aee8918 2943 if (is_64bit_tdesc ())
6a271cae
PA
2944 return &amd64_emit_ops;
2945 else
2946#endif
2947 return &i386_emit_ops;
2948}
2949
3ca4edb6 2950/* Implementation of target ops method "sw_breakpoint_from_kind". */
dd373349 2951
3ca4edb6
TBA
2952const gdb_byte *
2953x86_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349
AT
2954{
2955 *size = x86_breakpoint_len;
2956 return x86_breakpoint;
2957}
2958
c2d6af84
PA
2959static int
2960x86_supports_range_stepping (void)
2961{
2962 return 1;
2963}
2964
7d00775e
AT
2965/* Implementation of linux_target_ops method "supports_hardware_single_step".
2966 */
2967
2968static int
2969x86_supports_hardware_single_step (void)
2970{
2971 return 1;
2972}
2973
ae91f625
MK
2974static int
2975x86_get_ipa_tdesc_idx (void)
2976{
2977 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2978 const struct target_desc *tdesc = regcache->tdesc;
2979
2980#ifdef __x86_64__
b4570e4b 2981 return amd64_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2982#endif
2983
f49ff000 2984 if (tdesc == tdesc_i386_linux_no_xml)
ae91f625 2985 return X86_TDESC_SSE;
ae91f625 2986
f49ff000 2987 return i386_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2988}
2989
d0722149
DE
2990/* This is initialized assuming an amd64 target.
2991 x86_arch_setup will correct it for i386 or amd64 targets. */
2992
2993struct linux_target_ops the_low_target =
2994{
c2d6af84 2995 x86_supports_range_stepping,
7d00775e 2996 x86_supports_hardware_single_step,
82075af2 2997 x86_get_syscall_trapinfo,
ae91f625 2998 x86_get_ipa_tdesc_idx,
d0722149 2999};
3aee8918 3000
ef0478f6
TBA
3001/* The linux target ops object. */
3002
3003linux_process_target *the_linux_target = &the_x86_target;
3004
3aee8918
PA
3005void
3006initialize_low_arch (void)
3007{
3008 /* Initialize the Linux target descriptions. */
3009#ifdef __x86_64__
cc397f3a 3010 tdesc_amd64_linux_no_xml = allocate_target_description ();
b4570e4b
YQ
3011 copy_target_description (tdesc_amd64_linux_no_xml,
3012 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
3013 false));
3aee8918
PA
3014 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3015#endif
f49ff000 3016
cc397f3a 3017 tdesc_i386_linux_no_xml = allocate_target_description ();
f49ff000
YQ
3018 copy_target_description (tdesc_i386_linux_no_xml,
3019 i386_linux_read_description (X86_XSTATE_SSE_MASK));
3aee8918
PA
3020 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3021
3022 initialize_regsets_info (&x86_regsets_info);
3023}
This page took 1.047601 seconds and 4 git commands to generate.