gdbserver/linux-low: turn fast tracepoint ops into methods
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
b811d2c2 3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
d41f6d8e 20#include "server.h"
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "linux-low.h"
25#include "i387-fp.h"
df7e5265 26#include "x86-low.h"
268a13a5 27#include "gdbsupport/x86-xstate.h"
5826e159 28#include "nat/gdb_ptrace.h"
d0722149 29
93813b37
WT
30#ifdef __x86_64__
31#include "nat/amd64-linux-siginfo.h"
32#endif
33
d0722149 34#include "gdb_proc_service.h"
b5737fa9
PA
35/* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37#ifndef ELFMAG0
38#include "elf/common.h"
39#endif
40
268a13a5 41#include "gdbsupport/agent.h"
3aee8918 42#include "tdesc.h"
c144c7a0 43#include "tracepoint.h"
f699aaba 44#include "ax.h"
7b669087 45#include "nat/linux-nat.h"
4b134ca1 46#include "nat/x86-linux.h"
8e5d4070 47#include "nat/x86-linux-dregs.h"
ae91f625 48#include "linux-x86-tdesc.h"
a196ebeb 49
3aee8918
PA
50#ifdef __x86_64__
51static struct target_desc *tdesc_amd64_linux_no_xml;
52#endif
53static struct target_desc *tdesc_i386_linux_no_xml;
54
1570b33e 55
fa593d66 56static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 57static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 58
1570b33e
L
59/* Backward compatibility for gdb without XML support. */
60
61static const char *xmltarget_i386_linux_no_xml = "@<target>\
62<architecture>i386</architecture>\
63<osabi>GNU/Linux</osabi>\
64</target>";
f6d1620c
L
65
66#ifdef __x86_64__
1570b33e
L
67static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68<architecture>i386:x86-64</architecture>\
69<osabi>GNU/Linux</osabi>\
70</target>";
f6d1620c 71#endif
d0722149
DE
72
73#include <sys/reg.h>
74#include <sys/procfs.h>
1570b33e
L
75#include <sys/uio.h>
76
d0722149
DE
77#ifndef PTRACE_GET_THREAD_AREA
78#define PTRACE_GET_THREAD_AREA 25
79#endif
80
81/* This definition comes from prctl.h, but some kernels may not have it. */
82#ifndef PTRACE_ARCH_PRCTL
83#define PTRACE_ARCH_PRCTL 30
84#endif
85
86/* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88#ifndef ARCH_GET_FS
89#define ARCH_SET_GS 0x1001
90#define ARCH_SET_FS 0x1002
91#define ARCH_GET_FS 0x1003
92#define ARCH_GET_GS 0x1004
93#endif
94
ef0478f6
TBA
95/* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99class x86_target : public linux_process_target
100{
101public:
102
aa8d21c9
TBA
103 const regs_info *get_regs_info () override;
104
3ca4edb6
TBA
105 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
106
007c9b97
TBA
107 bool supports_z_point_type (char z_type) override;
108
a5b5da92
TBA
109 void process_qsupported (char **features, int count) override;
110
47f70aa7
TBA
111 bool supports_tracepoints () override;
112
809a0c35
TBA
113 bool supports_fast_tracepoints () override;
114
115 int install_fast_tracepoint_jump_pad
116 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
117 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
118 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
119 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
120 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
121 char *err) override;
122
123 int get_min_fast_tracepoint_insn_len () override;
124
797bcff5
TBA
125protected:
126
127 void low_arch_setup () override;
daca57a7
TBA
128
129 bool low_cannot_fetch_register (int regno) override;
130
131 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
132
133 bool low_supports_breakpoints () override;
134
135 CORE_ADDR low_get_pc (regcache *regcache) override;
136
137 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
d4807ea2
TBA
138
139 int low_decr_pc_after_break () override;
d7146cda
TBA
140
141 bool low_breakpoint_at (CORE_ADDR pc) override;
9db9aa23
TBA
142
143 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
144 int size, raw_breakpoint *bp) override;
145
146 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
147 int size, raw_breakpoint *bp) override;
ac1bbaca
TBA
148
149 bool low_stopped_by_watchpoint () override;
150
151 CORE_ADDR low_stopped_data_address () override;
b35db733
TBA
152
153 /* collect_ptrace_register/supply_ptrace_register are not needed in the
154 native i386 case (no registers smaller than an xfer unit), and are not
155 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
cb63de7c
TBA
156
157 /* Need to fix up i386 siginfo if host is amd64. */
158 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
159 int direction) override;
fd000fb3
TBA
160
161 arch_process_info *low_new_process () override;
162
163 void low_delete_process (arch_process_info *info) override;
164
165 void low_new_thread (lwp_info *) override;
166
167 void low_delete_thread (arch_lwp_info *) override;
168
169 void low_new_fork (process_info *parent, process_info *child) override;
d7599cc0
TBA
170
171 void low_prepare_to_resume (lwp_info *lwp) override;
a5b5da92 172
13e567af
TBA
173 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
174
a5b5da92
TBA
175private:
176
177 /* Update all the target description of all processes; a new GDB
178 connected, and it may or not support xml target descriptions. */
179 void update_xmltarget ();
ef0478f6
TBA
180};
181
182/* The singleton target ops object. */
183
184static x86_target the_x86_target;
185
aa5ca48f
DE
186/* Per-process arch-specific data we want to keep. */
187
188struct arch_process_info
189{
df7e5265 190 struct x86_debug_reg_state debug_reg_state;
aa5ca48f
DE
191};
192
d0722149
DE
193#ifdef __x86_64__
194
195/* Mapping between the general-purpose registers in `struct user'
196 format and GDB's register array layout.
197 Note that the transfer layout uses 64-bit regs. */
198static /*const*/ int i386_regmap[] =
199{
200 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
201 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
202 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
203 DS * 8, ES * 8, FS * 8, GS * 8
204};
205
206#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
207
208/* So code below doesn't have to care, i386 or amd64. */
209#define ORIG_EAX ORIG_RAX
bc9540e8 210#define REGSIZE 8
d0722149
DE
211
212static const int x86_64_regmap[] =
213{
214 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
215 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
216 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
217 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
218 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
219 DS * 8, ES * 8, FS * 8, GS * 8,
220 -1, -1, -1, -1, -1, -1, -1, -1,
221 -1, -1, -1, -1, -1, -1, -1, -1,
222 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
223 -1,
224 -1, -1, -1, -1, -1, -1, -1, -1,
225 ORIG_RAX * 8,
2735833d
WT
226#ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
227 21 * 8, 22 * 8,
228#else
229 -1, -1,
230#endif
a196ebeb 231 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
01f9f808
MS
232 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
233 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
234 -1, -1, -1, -1, -1, -1, -1, -1,
235 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
236 -1, -1, -1, -1, -1, -1, -1, -1,
237 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
238 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
239 -1, -1, -1, -1, -1, -1, -1, -1,
240 -1, -1, -1, -1, -1, -1, -1, -1,
51547df6
MS
241 -1, -1, -1, -1, -1, -1, -1, -1,
242 -1 /* pkru */
d0722149
DE
243};
244
245#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
9e0aa64f 246#define X86_64_USER_REGS (GS + 1)
d0722149
DE
247
248#else /* ! __x86_64__ */
249
250/* Mapping between the general-purpose registers in `struct user'
251 format and GDB's register array layout. */
252static /*const*/ int i386_regmap[] =
253{
254 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
255 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
256 EIP * 4, EFL * 4, CS * 4, SS * 4,
257 DS * 4, ES * 4, FS * 4, GS * 4
258};
259
260#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
261
bc9540e8
PA
262#define REGSIZE 4
263
d0722149 264#endif
3aee8918
PA
265
266#ifdef __x86_64__
267
268/* Returns true if the current inferior belongs to a x86-64 process,
269 per the tdesc. */
270
271static int
272is_64bit_tdesc (void)
273{
0bfdf32f 274 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3aee8918
PA
275
276 return register_size (regcache->tdesc, 0) == 8;
277}
278
279#endif
280
d0722149
DE
281\f
282/* Called by libthread_db. */
283
284ps_err_e
754653a7 285ps_get_thread_area (struct ps_prochandle *ph,
d0722149
DE
286 lwpid_t lwpid, int idx, void **base)
287{
288#ifdef __x86_64__
3aee8918 289 int use_64bit = is_64bit_tdesc ();
d0722149
DE
290
291 if (use_64bit)
292 {
293 switch (idx)
294 {
295 case FS:
296 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
297 return PS_OK;
298 break;
299 case GS:
300 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
301 return PS_OK;
302 break;
303 default:
304 return PS_BADADDR;
305 }
306 return PS_ERR;
307 }
308#endif
309
310 {
311 unsigned int desc[4];
312
313 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
314 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
315 return PS_ERR;
316
d1ec4ce7
DE
317 /* Ensure we properly extend the value to 64-bits for x86_64. */
318 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
319 return PS_OK;
320 }
321}
fa593d66
PA
322
323/* Get the thread area address. This is used to recognize which
324 thread is which when tracing with the in-process agent library. We
325 don't read anything from the address, and treat it as opaque; it's
326 the address itself that we assume is unique per-thread. */
327
13e567af
TBA
328int
329x86_target::low_get_thread_area (int lwpid, CORE_ADDR *addr)
fa593d66
PA
330{
331#ifdef __x86_64__
3aee8918 332 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
333
334 if (use_64bit)
335 {
336 void *base;
337 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
338 {
339 *addr = (CORE_ADDR) (uintptr_t) base;
340 return 0;
341 }
342
343 return -1;
344 }
345#endif
346
347 {
f2907e49 348 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
d86d4aaf
DE
349 struct thread_info *thr = get_lwp_thread (lwp);
350 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
351 unsigned int desc[4];
352 ULONGEST gs = 0;
353 const int reg_thread_area = 3; /* bits to scale down register value. */
354 int idx;
355
356 collect_register_by_name (regcache, "gs", &gs);
357
358 idx = gs >> reg_thread_area;
359
360 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 361 lwpid_of (thr),
493e2a69 362 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
363 return -1;
364
365 *addr = desc[1];
366 return 0;
367 }
368}
369
370
d0722149 371\f
daca57a7
TBA
372bool
373x86_target::low_cannot_store_register (int regno)
d0722149 374{
3aee8918
PA
375#ifdef __x86_64__
376 if (is_64bit_tdesc ())
daca57a7 377 return false;
3aee8918
PA
378#endif
379
d0722149
DE
380 return regno >= I386_NUM_REGS;
381}
382
daca57a7
TBA
383bool
384x86_target::low_cannot_fetch_register (int regno)
d0722149 385{
3aee8918
PA
386#ifdef __x86_64__
387 if (is_64bit_tdesc ())
daca57a7 388 return false;
3aee8918
PA
389#endif
390
d0722149
DE
391 return regno >= I386_NUM_REGS;
392}
393
394static void
442ea881 395x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
396{
397 int i;
398
399#ifdef __x86_64__
3aee8918 400 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
401 {
402 for (i = 0; i < X86_64_NUM_REGS; i++)
403 if (x86_64_regmap[i] != -1)
442ea881 404 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
405
406#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
407 {
408 unsigned long base;
409 int lwpid = lwpid_of (current_thread);
410
411 collect_register_by_name (regcache, "fs_base", &base);
412 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
413
414 collect_register_by_name (regcache, "gs_base", &base);
415 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
416 }
417#endif
418
d0722149
DE
419 return;
420 }
9e0aa64f
JK
421
422 /* 32-bit inferior registers need to be zero-extended.
423 Callers would read uninitialized memory otherwise. */
424 memset (buf, 0x00, X86_64_USER_REGS * 8);
d0722149
DE
425#endif
426
427 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 428 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 429
442ea881 430 collect_register_by_name (regcache, "orig_eax",
bc9540e8 431 ((char *) buf) + ORIG_EAX * REGSIZE);
3f52fdbc 432
e90a813d 433#ifdef __x86_64__
3f52fdbc
KB
434 /* Sign extend EAX value to avoid potential syscall restart
435 problems.
436
437 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
438 for a detailed explanation. */
439 if (register_size (regcache->tdesc, 0) == 4)
440 {
441 void *ptr = ((gdb_byte *) buf
442 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
443
444 *(int64_t *) ptr = *(int32_t *) ptr;
445 }
e90a813d 446#endif
d0722149
DE
447}
448
449static void
442ea881 450x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
451{
452 int i;
453
454#ifdef __x86_64__
3aee8918 455 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
456 {
457 for (i = 0; i < X86_64_NUM_REGS; i++)
458 if (x86_64_regmap[i] != -1)
442ea881 459 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
460
461#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
462 {
463 unsigned long base;
464 int lwpid = lwpid_of (current_thread);
465
466 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
467 supply_register_by_name (regcache, "fs_base", &base);
468
469 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
470 supply_register_by_name (regcache, "gs_base", &base);
471 }
472#endif
d0722149
DE
473 return;
474 }
475#endif
476
477 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 478 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 479
442ea881 480 supply_register_by_name (regcache, "orig_eax",
bc9540e8 481 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
482}
483
484static void
442ea881 485x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
486{
487#ifdef __x86_64__
442ea881 488 i387_cache_to_fxsave (regcache, buf);
d0722149 489#else
442ea881 490 i387_cache_to_fsave (regcache, buf);
d0722149
DE
491#endif
492}
493
494static void
442ea881 495x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
496{
497#ifdef __x86_64__
442ea881 498 i387_fxsave_to_cache (regcache, buf);
d0722149 499#else
442ea881 500 i387_fsave_to_cache (regcache, buf);
d0722149
DE
501#endif
502}
503
504#ifndef __x86_64__
505
506static void
442ea881 507x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 508{
442ea881 509 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
510}
511
512static void
442ea881 513x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 514{
442ea881 515 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
516}
517
518#endif
519
1570b33e
L
520static void
521x86_fill_xstateregset (struct regcache *regcache, void *buf)
522{
523 i387_cache_to_xsave (regcache, buf);
524}
525
526static void
527x86_store_xstateregset (struct regcache *regcache, const void *buf)
528{
529 i387_xsave_to_cache (regcache, buf);
530}
531
d0722149
DE
532/* ??? The non-biarch i386 case stores all the i387 regs twice.
533 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
534 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
535 doesn't work. IWBN to avoid the duplication in the case where it
536 does work. Maybe the arch_setup routine could check whether it works
3aee8918 537 and update the supported regsets accordingly. */
d0722149 538
3aee8918 539static struct regset_info x86_regsets[] =
d0722149
DE
540{
541#ifdef HAVE_PTRACE_GETREGS
1570b33e 542 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
543 GENERAL_REGS,
544 x86_fill_gregset, x86_store_gregset },
1570b33e
L
545 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
546 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
547# ifndef __x86_64__
548# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 549 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
550 EXTENDED_REGS,
551 x86_fill_fpxregset, x86_store_fpxregset },
552# endif
553# endif
1570b33e 554 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
555 FP_REGS,
556 x86_fill_fpregset, x86_store_fpregset },
557#endif /* HAVE_PTRACE_GETREGS */
50bc912a 558 NULL_REGSET
d0722149
DE
559};
560
bf9ae9d8
TBA
561bool
562x86_target::low_supports_breakpoints ()
563{
564 return true;
565}
566
567CORE_ADDR
568x86_target::low_get_pc (regcache *regcache)
d0722149 569{
3aee8918 570 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
571
572 if (use_64bit)
573 {
6598661d
PA
574 uint64_t pc;
575
442ea881 576 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
577 return (CORE_ADDR) pc;
578 }
579 else
580 {
6598661d
PA
581 uint32_t pc;
582
442ea881 583 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
584 return (CORE_ADDR) pc;
585 }
586}
587
bf9ae9d8
TBA
588void
589x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
d0722149 590{
3aee8918 591 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
592
593 if (use_64bit)
594 {
6598661d
PA
595 uint64_t newpc = pc;
596
442ea881 597 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
598 }
599 else
600 {
6598661d
PA
601 uint32_t newpc = pc;
602
442ea881 603 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
604 }
605}
d4807ea2
TBA
606
607int
608x86_target::low_decr_pc_after_break ()
609{
610 return 1;
611}
612
d0722149 613\f
dd373349 614static const gdb_byte x86_breakpoint[] = { 0xCC };
d0722149
DE
615#define x86_breakpoint_len 1
616
d7146cda
TBA
617bool
618x86_target::low_breakpoint_at (CORE_ADDR pc)
d0722149
DE
619{
620 unsigned char c;
621
d7146cda 622 read_memory (pc, &c, 1);
d0722149 623 if (c == 0xCC)
d7146cda 624 return true;
d0722149 625
d7146cda 626 return false;
d0722149
DE
627}
628\f
42995dbd 629/* Low-level function vector. */
df7e5265 630struct x86_dr_low_type x86_dr_low =
42995dbd 631 {
d33472ad
GB
632 x86_linux_dr_set_control,
633 x86_linux_dr_set_addr,
634 x86_linux_dr_get_addr,
635 x86_linux_dr_get_status,
636 x86_linux_dr_get_control,
42995dbd
GB
637 sizeof (void *),
638 };
aa5ca48f 639\f
90d74c30 640/* Breakpoint/Watchpoint support. */
aa5ca48f 641
007c9b97
TBA
642bool
643x86_target::supports_z_point_type (char z_type)
802e8e6d
PA
644{
645 switch (z_type)
646 {
647 case Z_PACKET_SW_BP:
648 case Z_PACKET_HW_BP:
649 case Z_PACKET_WRITE_WP:
650 case Z_PACKET_ACCESS_WP:
007c9b97 651 return true;
802e8e6d 652 default:
007c9b97 653 return false;
802e8e6d
PA
654 }
655}
656
9db9aa23
TBA
657int
658x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
659 int size, raw_breakpoint *bp)
aa5ca48f
DE
660{
661 struct process_info *proc = current_process ();
802e8e6d 662
aa5ca48f
DE
663 switch (type)
664 {
802e8e6d
PA
665 case raw_bkpt_type_hw:
666 case raw_bkpt_type_write_wp:
667 case raw_bkpt_type_access_wp:
a4165e94 668 {
802e8e6d
PA
669 enum target_hw_bp_type hw_type
670 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 671 struct x86_debug_reg_state *state
fe978cb0 672 = &proc->priv->arch_private->debug_reg_state;
a4165e94 673
df7e5265 674 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
a4165e94 675 }
961bd387 676
aa5ca48f
DE
677 default:
678 /* Unsupported. */
679 return 1;
680 }
681}
682
9db9aa23
TBA
683int
684x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
685 int size, raw_breakpoint *bp)
aa5ca48f
DE
686{
687 struct process_info *proc = current_process ();
802e8e6d 688
aa5ca48f
DE
689 switch (type)
690 {
802e8e6d
PA
691 case raw_bkpt_type_hw:
692 case raw_bkpt_type_write_wp:
693 case raw_bkpt_type_access_wp:
a4165e94 694 {
802e8e6d
PA
695 enum target_hw_bp_type hw_type
696 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 697 struct x86_debug_reg_state *state
fe978cb0 698 = &proc->priv->arch_private->debug_reg_state;
a4165e94 699
df7e5265 700 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
a4165e94 701 }
aa5ca48f
DE
702 default:
703 /* Unsupported. */
704 return 1;
705 }
706}
707
ac1bbaca
TBA
708bool
709x86_target::low_stopped_by_watchpoint ()
aa5ca48f
DE
710{
711 struct process_info *proc = current_process ();
fe978cb0 712 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
aa5ca48f
DE
713}
714
ac1bbaca
TBA
715CORE_ADDR
716x86_target::low_stopped_data_address ()
aa5ca48f
DE
717{
718 struct process_info *proc = current_process ();
719 CORE_ADDR addr;
fe978cb0 720 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
df7e5265 721 &addr))
aa5ca48f
DE
722 return addr;
723 return 0;
724}
725\f
726/* Called when a new process is created. */
727
fd000fb3
TBA
728arch_process_info *
729x86_target::low_new_process ()
aa5ca48f 730{
ed859da7 731 struct arch_process_info *info = XCNEW (struct arch_process_info);
aa5ca48f 732
df7e5265 733 x86_low_init_dregs (&info->debug_reg_state);
aa5ca48f
DE
734
735 return info;
736}
737
04ec7890
SM
738/* Called when a process is being deleted. */
739
fd000fb3
TBA
740void
741x86_target::low_delete_process (arch_process_info *info)
04ec7890
SM
742{
743 xfree (info);
744}
745
fd000fb3
TBA
746void
747x86_target::low_new_thread (lwp_info *lwp)
748{
749 /* This comes from nat/. */
750 x86_linux_new_thread (lwp);
751}
3a8a0396 752
fd000fb3
TBA
753void
754x86_target::low_delete_thread (arch_lwp_info *alwp)
755{
756 /* This comes from nat/. */
757 x86_linux_delete_thread (alwp);
758}
759
760/* Target routine for new_fork. */
761
762void
763x86_target::low_new_fork (process_info *parent, process_info *child)
3a8a0396
DB
764{
765 /* These are allocated by linux_add_process. */
766 gdb_assert (parent->priv != NULL
767 && parent->priv->arch_private != NULL);
768 gdb_assert (child->priv != NULL
769 && child->priv->arch_private != NULL);
770
771 /* Linux kernel before 2.6.33 commit
772 72f674d203cd230426437cdcf7dd6f681dad8b0d
773 will inherit hardware debug registers from parent
774 on fork/vfork/clone. Newer Linux kernels create such tasks with
775 zeroed debug registers.
776
777 GDB core assumes the child inherits the watchpoints/hw
778 breakpoints of the parent, and will remove them all from the
779 forked off process. Copy the debug registers mirrors into the
780 new process so that all breakpoints and watchpoints can be
781 removed together. The debug registers mirror will become zeroed
782 in the end before detaching the forked off process, thus making
783 this compatible with older Linux kernels too. */
784
785 *child->priv->arch_private = *parent->priv->arch_private;
786}
787
d7599cc0
TBA
788void
789x86_target::low_prepare_to_resume (lwp_info *lwp)
790{
791 /* This comes from nat/. */
792 x86_linux_prepare_to_resume (lwp);
793}
794
70a0bb6b
GB
795/* See nat/x86-dregs.h. */
796
797struct x86_debug_reg_state *
798x86_debug_reg_state (pid_t pid)
799{
800 struct process_info *proc = find_process_pid (pid);
801
802 return &proc->priv->arch_private->debug_reg_state;
803}
aa5ca48f 804\f
d0722149
DE
805/* When GDBSERVER is built as a 64-bit application on linux, the
806 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
807 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
808 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
809 conversion in-place ourselves. */
810
9cf12d57 811/* Convert a ptrace/host siginfo object, into/from the siginfo in the
d0722149
DE
812 layout of the inferiors' architecture. Returns true if any
813 conversion was done; false otherwise. If DIRECTION is 1, then copy
9cf12d57 814 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
d0722149
DE
815 INF. */
816
cb63de7c
TBA
817bool
818x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
d0722149
DE
819{
820#ifdef __x86_64__
760256f9 821 unsigned int machine;
0bfdf32f 822 int tid = lwpid_of (current_thread);
760256f9
PA
823 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
824
d0722149 825 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 826 if (!is_64bit_tdesc ())
9cf12d57 827 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 828 FIXUP_32);
c92b5177 829 /* No fixup for native x32 GDB. */
760256f9 830 else if (!is_elf64 && sizeof (void *) == 8)
9cf12d57 831 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 832 FIXUP_X32);
d0722149
DE
833#endif
834
cb63de7c 835 return false;
d0722149
DE
836}
837\f
1570b33e
L
838static int use_xml;
839
3aee8918
PA
840/* Format of XSAVE extended state is:
841 struct
842 {
843 fxsave_bytes[0..463]
844 sw_usable_bytes[464..511]
845 xstate_hdr_bytes[512..575]
846 avx_bytes[576..831]
847 future_state etc
848 };
849
850 Same memory layout will be used for the coredump NT_X86_XSTATE
851 representing the XSAVE extended state registers.
852
853 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
854 extended state mask, which is the same as the extended control register
855 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
856 together with the mask saved in the xstate_hdr_bytes to determine what
857 states the processor/OS supports and what state, used or initialized,
858 the process/thread is in. */
859#define I386_LINUX_XSAVE_XCR0_OFFSET 464
860
861/* Does the current host support the GETFPXREGS request? The header
862 file may or may not define it, and even if it is defined, the
863 kernel will return EIO if it's running on a pre-SSE processor. */
864int have_ptrace_getfpxregs =
865#ifdef HAVE_PTRACE_GETFPXREGS
866 -1
867#else
868 0
869#endif
870;
1570b33e 871
3aee8918
PA
872/* Get Linux/x86 target description from running target. */
873
874static const struct target_desc *
875x86_linux_read_description (void)
1570b33e 876{
3aee8918
PA
877 unsigned int machine;
878 int is_elf64;
a196ebeb 879 int xcr0_features;
3aee8918
PA
880 int tid;
881 static uint64_t xcr0;
3a13a53b 882 struct regset_info *regset;
1570b33e 883
0bfdf32f 884 tid = lwpid_of (current_thread);
1570b33e 885
3aee8918 886 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 887
3aee8918 888 if (sizeof (void *) == 4)
3a13a53b 889 {
3aee8918
PA
890 if (is_elf64 > 0)
891 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
892#ifndef __x86_64__
893 else if (machine == EM_X86_64)
894 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
895#endif
896 }
3a13a53b 897
3aee8918
PA
898#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
899 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
900 {
901 elf_fpxregset_t fpxregs;
3a13a53b 902
3aee8918 903 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 904 {
3aee8918
PA
905 have_ptrace_getfpxregs = 0;
906 have_ptrace_getregset = 0;
f49ff000 907 return i386_linux_read_description (X86_XSTATE_X87);
3a13a53b 908 }
3aee8918
PA
909 else
910 have_ptrace_getfpxregs = 1;
3a13a53b 911 }
1570b33e
L
912#endif
913
914 if (!use_xml)
915 {
df7e5265 916 x86_xcr0 = X86_XSTATE_SSE_MASK;
3aee8918 917
1570b33e
L
918 /* Don't use XML. */
919#ifdef __x86_64__
3aee8918
PA
920 if (machine == EM_X86_64)
921 return tdesc_amd64_linux_no_xml;
1570b33e 922 else
1570b33e 923#endif
3aee8918 924 return tdesc_i386_linux_no_xml;
1570b33e
L
925 }
926
1570b33e
L
927 if (have_ptrace_getregset == -1)
928 {
df7e5265 929 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 930 struct iovec iov;
1570b33e
L
931
932 iov.iov_base = xstateregs;
933 iov.iov_len = sizeof (xstateregs);
934
935 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
936 if (ptrace (PTRACE_GETREGSET, tid,
937 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
938 have_ptrace_getregset = 0;
939 else
1570b33e 940 {
3aee8918
PA
941 have_ptrace_getregset = 1;
942
943 /* Get XCR0 from XSAVE extended state. */
944 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
945 / sizeof (uint64_t))];
946
947 /* Use PTRACE_GETREGSET if it is available. */
948 for (regset = x86_regsets;
949 regset->fill_function != NULL; regset++)
950 if (regset->get_request == PTRACE_GETREGSET)
df7e5265 951 regset->size = X86_XSTATE_SIZE (xcr0);
3aee8918
PA
952 else if (regset->type != GENERAL_REGS)
953 regset->size = 0;
1570b33e 954 }
1570b33e
L
955 }
956
3aee8918 957 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb 958 xcr0_features = (have_ptrace_getregset
2e1e43e1 959 && (xcr0 & X86_XSTATE_ALL_MASK));
3aee8918 960
a196ebeb 961 if (xcr0_features)
3aee8918 962 x86_xcr0 = xcr0;
1570b33e 963
3aee8918
PA
964 if (machine == EM_X86_64)
965 {
1570b33e 966#ifdef __x86_64__
b4570e4b 967 const target_desc *tdesc = NULL;
a196ebeb 968
b4570e4b 969 if (xcr0_features)
3aee8918 970 {
b4570e4b
YQ
971 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
972 !is_elf64);
1570b33e 973 }
b4570e4b
YQ
974
975 if (tdesc == NULL)
976 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
977 return tdesc;
3aee8918 978#endif
1570b33e 979 }
3aee8918
PA
980 else
981 {
f49ff000 982 const target_desc *tdesc = NULL;
a1fa17ee 983
f49ff000
YQ
984 if (xcr0_features)
985 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
2b863f51 986
f49ff000
YQ
987 if (tdesc == NULL)
988 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
a196ebeb 989
f49ff000 990 return tdesc;
3aee8918
PA
991 }
992
993 gdb_assert_not_reached ("failed to return tdesc");
994}
995
3aee8918
PA
996/* Update all the target description of all processes; a new GDB
997 connected, and it may or not support xml target descriptions. */
998
797bcff5
TBA
999void
1000x86_target::update_xmltarget ()
3aee8918 1001{
0bfdf32f 1002 struct thread_info *saved_thread = current_thread;
3aee8918
PA
1003
1004 /* Before changing the register cache's internal layout, flush the
1005 contents of the current valid caches back to the threads, and
1006 release the current regcache objects. */
1007 regcache_release ();
1008
797bcff5 1009 for_each_process ([this] (process_info *proc) {
9179355e
SM
1010 int pid = proc->pid;
1011
1012 /* Look up any thread of this process. */
1013 current_thread = find_any_thread_of_pid (pid);
1014
797bcff5 1015 low_arch_setup ();
9179355e 1016 });
3aee8918 1017
0bfdf32f 1018 current_thread = saved_thread;
1570b33e
L
1019}
1020
1021/* Process qSupported query, "xmlRegisters=". Update the buffer size for
1022 PTRACE_GETREGSET. */
1023
a5b5da92
TBA
1024void
1025x86_target::process_qsupported (char **features, int count)
1570b33e 1026{
06e03fff
PA
1027 int i;
1028
1570b33e
L
1029 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1030 with "i386" in qSupported query, it supports x86 XML target
1031 descriptions. */
1032 use_xml = 0;
06e03fff 1033 for (i = 0; i < count; i++)
1570b33e 1034 {
06e03fff 1035 const char *feature = features[i];
1570b33e 1036
06e03fff 1037 if (startswith (feature, "xmlRegisters="))
1570b33e 1038 {
06e03fff 1039 char *copy = xstrdup (feature + 13);
06e03fff 1040
ca3a04f6
CB
1041 char *saveptr;
1042 for (char *p = strtok_r (copy, ",", &saveptr);
1043 p != NULL;
1044 p = strtok_r (NULL, ",", &saveptr))
1570b33e 1045 {
06e03fff
PA
1046 if (strcmp (p, "i386") == 0)
1047 {
1048 use_xml = 1;
1049 break;
1050 }
1570b33e 1051 }
1570b33e 1052
06e03fff
PA
1053 free (copy);
1054 }
1570b33e 1055 }
a5b5da92 1056 update_xmltarget ();
1570b33e
L
1057}
1058
3aee8918 1059/* Common for x86/x86-64. */
d0722149 1060
3aee8918
PA
1061static struct regsets_info x86_regsets_info =
1062 {
1063 x86_regsets, /* regsets */
1064 0, /* num_regsets */
1065 NULL, /* disabled_regsets */
1066 };
214d508e
L
1067
1068#ifdef __x86_64__
3aee8918
PA
1069static struct regs_info amd64_linux_regs_info =
1070 {
1071 NULL, /* regset_bitmap */
1072 NULL, /* usrregs_info */
1073 &x86_regsets_info
1074 };
d0722149 1075#endif
3aee8918
PA
1076static struct usrregs_info i386_linux_usrregs_info =
1077 {
1078 I386_NUM_REGS,
1079 i386_regmap,
1080 };
d0722149 1081
3aee8918
PA
1082static struct regs_info i386_linux_regs_info =
1083 {
1084 NULL, /* regset_bitmap */
1085 &i386_linux_usrregs_info,
1086 &x86_regsets_info
1087 };
d0722149 1088
aa8d21c9
TBA
1089const regs_info *
1090x86_target::get_regs_info ()
3aee8918
PA
1091{
1092#ifdef __x86_64__
1093 if (is_64bit_tdesc ())
1094 return &amd64_linux_regs_info;
1095 else
1096#endif
1097 return &i386_linux_regs_info;
1098}
d0722149 1099
3aee8918
PA
1100/* Initialize the target description for the architecture of the
1101 inferior. */
1570b33e 1102
797bcff5
TBA
1103void
1104x86_target::low_arch_setup ()
3aee8918
PA
1105{
1106 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1107}
1108
82075af2
JS
1109/* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1110 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1111
1112static void
4cc32bec 1113x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
82075af2
JS
1114{
1115 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1116
1117 if (use_64bit)
1118 {
1119 long l_sysno;
82075af2
JS
1120
1121 collect_register_by_name (regcache, "orig_rax", &l_sysno);
82075af2 1122 *sysno = (int) l_sysno;
82075af2
JS
1123 }
1124 else
4cc32bec 1125 collect_register_by_name (regcache, "orig_eax", sysno);
82075af2
JS
1126}
1127
47f70aa7
TBA
1128bool
1129x86_target::supports_tracepoints ()
219f2f23 1130{
47f70aa7 1131 return true;
219f2f23
PA
1132}
1133
fa593d66
PA
1134static void
1135append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1136{
4196ab2a 1137 target_write_memory (*to, buf, len);
fa593d66
PA
1138 *to += len;
1139}
1140
1141static int
a121b7c1 1142push_opcode (unsigned char *buf, const char *op)
fa593d66
PA
1143{
1144 unsigned char *buf_org = buf;
1145
1146 while (1)
1147 {
1148 char *endptr;
1149 unsigned long ul = strtoul (op, &endptr, 16);
1150
1151 if (endptr == op)
1152 break;
1153
1154 *buf++ = ul;
1155 op = endptr;
1156 }
1157
1158 return buf - buf_org;
1159}
1160
1161#ifdef __x86_64__
1162
1163/* Build a jump pad that saves registers and calls a collection
1164 function. Writes a jump instruction to the jump pad to
1165 JJUMPAD_INSN. The caller is responsible to write it in at the
1166 tracepoint address. */
1167
1168static int
1169amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1170 CORE_ADDR collector,
1171 CORE_ADDR lockaddr,
1172 ULONGEST orig_size,
1173 CORE_ADDR *jump_entry,
405f8e94
SS
1174 CORE_ADDR *trampoline,
1175 ULONGEST *trampoline_size,
fa593d66
PA
1176 unsigned char *jjump_pad_insn,
1177 ULONGEST *jjump_pad_insn_size,
1178 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1179 CORE_ADDR *adjusted_insn_addr_end,
1180 char *err)
fa593d66
PA
1181{
1182 unsigned char buf[40];
1183 int i, offset;
f4647387
YQ
1184 int64_t loffset;
1185
fa593d66
PA
1186 CORE_ADDR buildaddr = *jump_entry;
1187
1188 /* Build the jump pad. */
1189
1190 /* First, do tracepoint data collection. Save registers. */
1191 i = 0;
1192 /* Need to ensure stack pointer saved first. */
1193 buf[i++] = 0x54; /* push %rsp */
1194 buf[i++] = 0x55; /* push %rbp */
1195 buf[i++] = 0x57; /* push %rdi */
1196 buf[i++] = 0x56; /* push %rsi */
1197 buf[i++] = 0x52; /* push %rdx */
1198 buf[i++] = 0x51; /* push %rcx */
1199 buf[i++] = 0x53; /* push %rbx */
1200 buf[i++] = 0x50; /* push %rax */
1201 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1202 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1203 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1204 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1205 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1206 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1207 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1208 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1209 buf[i++] = 0x9c; /* pushfq */
c8ef42ee 1210 buf[i++] = 0x48; /* movabs <addr>,%rdi */
fa593d66 1211 buf[i++] = 0xbf;
c8ef42ee
PA
1212 memcpy (buf + i, &tpaddr, 8);
1213 i += 8;
fa593d66
PA
1214 buf[i++] = 0x57; /* push %rdi */
1215 append_insns (&buildaddr, i, buf);
1216
1217 /* Stack space for the collecting_t object. */
1218 i = 0;
1219 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1220 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1221 memcpy (buf + i, &tpoint, 8);
1222 i += 8;
1223 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1224 i += push_opcode (&buf[i],
1225 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1226 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1227 append_insns (&buildaddr, i, buf);
1228
1229 /* spin-lock. */
1230 i = 0;
1231 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1232 memcpy (&buf[i], (void *) &lockaddr, 8);
1233 i += 8;
1234 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1235 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1236 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1237 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1238 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1239 append_insns (&buildaddr, i, buf);
1240
1241 /* Set up the gdb_collect call. */
1242 /* At this point, (stack pointer + 0x18) is the base of our saved
1243 register block. */
1244
1245 i = 0;
1246 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1247 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1248
1249 /* tpoint address may be 64-bit wide. */
1250 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1251 memcpy (buf + i, &tpoint, 8);
1252 i += 8;
1253 append_insns (&buildaddr, i, buf);
1254
1255 /* The collector function being in the shared library, may be
1256 >31-bits away off the jump pad. */
1257 i = 0;
1258 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1259 memcpy (buf + i, &collector, 8);
1260 i += 8;
1261 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1262 append_insns (&buildaddr, i, buf);
1263
1264 /* Clear the spin-lock. */
1265 i = 0;
1266 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1267 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1268 memcpy (buf + i, &lockaddr, 8);
1269 i += 8;
1270 append_insns (&buildaddr, i, buf);
1271
1272 /* Remove stack that had been used for the collect_t object. */
1273 i = 0;
1274 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1275 append_insns (&buildaddr, i, buf);
1276
1277 /* Restore register state. */
1278 i = 0;
1279 buf[i++] = 0x48; /* add $0x8,%rsp */
1280 buf[i++] = 0x83;
1281 buf[i++] = 0xc4;
1282 buf[i++] = 0x08;
1283 buf[i++] = 0x9d; /* popfq */
1284 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1285 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1286 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1287 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1288 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1289 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1290 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1291 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1292 buf[i++] = 0x58; /* pop %rax */
1293 buf[i++] = 0x5b; /* pop %rbx */
1294 buf[i++] = 0x59; /* pop %rcx */
1295 buf[i++] = 0x5a; /* pop %rdx */
1296 buf[i++] = 0x5e; /* pop %rsi */
1297 buf[i++] = 0x5f; /* pop %rdi */
1298 buf[i++] = 0x5d; /* pop %rbp */
1299 buf[i++] = 0x5c; /* pop %rsp */
1300 append_insns (&buildaddr, i, buf);
1301
1302 /* Now, adjust the original instruction to execute in the jump
1303 pad. */
1304 *adjusted_insn_addr = buildaddr;
1305 relocate_instruction (&buildaddr, tpaddr);
1306 *adjusted_insn_addr_end = buildaddr;
1307
1308 /* Finally, write a jump back to the program. */
f4647387
YQ
1309
1310 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1311 if (loffset > INT_MAX || loffset < INT_MIN)
1312 {
1313 sprintf (err,
1314 "E.Jump back from jump pad too far from tracepoint "
1315 "(offset 0x%" PRIx64 " > int32).", loffset);
1316 return 1;
1317 }
1318
1319 offset = (int) loffset;
fa593d66
PA
1320 memcpy (buf, jump_insn, sizeof (jump_insn));
1321 memcpy (buf + 1, &offset, 4);
1322 append_insns (&buildaddr, sizeof (jump_insn), buf);
1323
1324 /* The jump pad is now built. Wire in a jump to our jump pad. This
1325 is always done last (by our caller actually), so that we can
1326 install fast tracepoints with threads running. This relies on
1327 the agent's atomic write support. */
f4647387
YQ
1328 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1329 if (loffset > INT_MAX || loffset < INT_MIN)
1330 {
1331 sprintf (err,
1332 "E.Jump pad too far from tracepoint "
1333 "(offset 0x%" PRIx64 " > int32).", loffset);
1334 return 1;
1335 }
1336
1337 offset = (int) loffset;
1338
fa593d66
PA
1339 memcpy (buf, jump_insn, sizeof (jump_insn));
1340 memcpy (buf + 1, &offset, 4);
1341 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1342 *jjump_pad_insn_size = sizeof (jump_insn);
1343
1344 /* Return the end address of our pad. */
1345 *jump_entry = buildaddr;
1346
1347 return 0;
1348}
1349
1350#endif /* __x86_64__ */
1351
1352/* Build a jump pad that saves registers and calls a collection
1353 function. Writes a jump instruction to the jump pad to
1354 JJUMPAD_INSN. The caller is responsible to write it in at the
1355 tracepoint address. */
1356
1357static int
1358i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1359 CORE_ADDR collector,
1360 CORE_ADDR lockaddr,
1361 ULONGEST orig_size,
1362 CORE_ADDR *jump_entry,
405f8e94
SS
1363 CORE_ADDR *trampoline,
1364 ULONGEST *trampoline_size,
fa593d66
PA
1365 unsigned char *jjump_pad_insn,
1366 ULONGEST *jjump_pad_insn_size,
1367 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1368 CORE_ADDR *adjusted_insn_addr_end,
1369 char *err)
fa593d66
PA
1370{
1371 unsigned char buf[0x100];
1372 int i, offset;
1373 CORE_ADDR buildaddr = *jump_entry;
1374
1375 /* Build the jump pad. */
1376
1377 /* First, do tracepoint data collection. Save registers. */
1378 i = 0;
1379 buf[i++] = 0x60; /* pushad */
1380 buf[i++] = 0x68; /* push tpaddr aka $pc */
1381 *((int *)(buf + i)) = (int) tpaddr;
1382 i += 4;
1383 buf[i++] = 0x9c; /* pushf */
1384 buf[i++] = 0x1e; /* push %ds */
1385 buf[i++] = 0x06; /* push %es */
1386 buf[i++] = 0x0f; /* push %fs */
1387 buf[i++] = 0xa0;
1388 buf[i++] = 0x0f; /* push %gs */
1389 buf[i++] = 0xa8;
1390 buf[i++] = 0x16; /* push %ss */
1391 buf[i++] = 0x0e; /* push %cs */
1392 append_insns (&buildaddr, i, buf);
1393
1394 /* Stack space for the collecting_t object. */
1395 i = 0;
1396 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1397
1398 /* Build the object. */
1399 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1400 memcpy (buf + i, &tpoint, 4);
1401 i += 4;
1402 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1403
1404 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1405 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1406 append_insns (&buildaddr, i, buf);
1407
1408 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1409 If we cared for it, this could be using xchg alternatively. */
1410
1411 i = 0;
1412 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1413 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1414 %esp,<lockaddr> */
1415 memcpy (&buf[i], (void *) &lockaddr, 4);
1416 i += 4;
1417 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1418 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1419 append_insns (&buildaddr, i, buf);
1420
1421
1422 /* Set up arguments to the gdb_collect call. */
1423 i = 0;
1424 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1425 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1426 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1427 append_insns (&buildaddr, i, buf);
1428
1429 i = 0;
1430 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1431 append_insns (&buildaddr, i, buf);
1432
1433 i = 0;
1434 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1435 memcpy (&buf[i], (void *) &tpoint, 4);
1436 i += 4;
1437 append_insns (&buildaddr, i, buf);
1438
1439 buf[0] = 0xe8; /* call <reladdr> */
1440 offset = collector - (buildaddr + sizeof (jump_insn));
1441 memcpy (buf + 1, &offset, 4);
1442 append_insns (&buildaddr, 5, buf);
1443 /* Clean up after the call. */
1444 buf[0] = 0x83; /* add $0x8,%esp */
1445 buf[1] = 0xc4;
1446 buf[2] = 0x08;
1447 append_insns (&buildaddr, 3, buf);
1448
1449
1450 /* Clear the spin-lock. This would need the LOCK prefix on older
1451 broken archs. */
1452 i = 0;
1453 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1454 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1455 memcpy (buf + i, &lockaddr, 4);
1456 i += 4;
1457 append_insns (&buildaddr, i, buf);
1458
1459
1460 /* Remove stack that had been used for the collect_t object. */
1461 i = 0;
1462 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1463 append_insns (&buildaddr, i, buf);
1464
1465 i = 0;
1466 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1467 buf[i++] = 0xc4;
1468 buf[i++] = 0x04;
1469 buf[i++] = 0x17; /* pop %ss */
1470 buf[i++] = 0x0f; /* pop %gs */
1471 buf[i++] = 0xa9;
1472 buf[i++] = 0x0f; /* pop %fs */
1473 buf[i++] = 0xa1;
1474 buf[i++] = 0x07; /* pop %es */
405f8e94 1475 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1476 buf[i++] = 0x9d; /* popf */
1477 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1478 buf[i++] = 0xc4;
1479 buf[i++] = 0x04;
1480 buf[i++] = 0x61; /* popad */
1481 append_insns (&buildaddr, i, buf);
1482
1483 /* Now, adjust the original instruction to execute in the jump
1484 pad. */
1485 *adjusted_insn_addr = buildaddr;
1486 relocate_instruction (&buildaddr, tpaddr);
1487 *adjusted_insn_addr_end = buildaddr;
1488
1489 /* Write the jump back to the program. */
1490 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1491 memcpy (buf, jump_insn, sizeof (jump_insn));
1492 memcpy (buf + 1, &offset, 4);
1493 append_insns (&buildaddr, sizeof (jump_insn), buf);
1494
1495 /* The jump pad is now built. Wire in a jump to our jump pad. This
1496 is always done last (by our caller actually), so that we can
1497 install fast tracepoints with threads running. This relies on
1498 the agent's atomic write support. */
405f8e94
SS
1499 if (orig_size == 4)
1500 {
1501 /* Create a trampoline. */
1502 *trampoline_size = sizeof (jump_insn);
1503 if (!claim_trampoline_space (*trampoline_size, trampoline))
1504 {
1505 /* No trampoline space available. */
1506 strcpy (err,
1507 "E.Cannot allocate trampoline space needed for fast "
1508 "tracepoints on 4-byte instructions.");
1509 return 1;
1510 }
1511
1512 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1513 memcpy (buf, jump_insn, sizeof (jump_insn));
1514 memcpy (buf + 1, &offset, 4);
4196ab2a 1515 target_write_memory (*trampoline, buf, sizeof (jump_insn));
405f8e94
SS
1516
1517 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1518 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1519 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1520 memcpy (buf + 2, &offset, 2);
1521 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1522 *jjump_pad_insn_size = sizeof (small_jump_insn);
1523 }
1524 else
1525 {
1526 /* Else use a 32-bit relative jump instruction. */
1527 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1528 memcpy (buf, jump_insn, sizeof (jump_insn));
1529 memcpy (buf + 1, &offset, 4);
1530 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1531 *jjump_pad_insn_size = sizeof (jump_insn);
1532 }
fa593d66
PA
1533
1534 /* Return the end address of our pad. */
1535 *jump_entry = buildaddr;
1536
1537 return 0;
1538}
1539
809a0c35
TBA
1540bool
1541x86_target::supports_fast_tracepoints ()
1542{
1543 return true;
1544}
1545
1546int
1547x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1548 CORE_ADDR tpaddr,
1549 CORE_ADDR collector,
1550 CORE_ADDR lockaddr,
1551 ULONGEST orig_size,
1552 CORE_ADDR *jump_entry,
1553 CORE_ADDR *trampoline,
1554 ULONGEST *trampoline_size,
1555 unsigned char *jjump_pad_insn,
1556 ULONGEST *jjump_pad_insn_size,
1557 CORE_ADDR *adjusted_insn_addr,
1558 CORE_ADDR *adjusted_insn_addr_end,
1559 char *err)
fa593d66
PA
1560{
1561#ifdef __x86_64__
3aee8918 1562 if (is_64bit_tdesc ())
fa593d66
PA
1563 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1564 collector, lockaddr,
1565 orig_size, jump_entry,
405f8e94 1566 trampoline, trampoline_size,
fa593d66
PA
1567 jjump_pad_insn,
1568 jjump_pad_insn_size,
1569 adjusted_insn_addr,
405f8e94
SS
1570 adjusted_insn_addr_end,
1571 err);
fa593d66
PA
1572#endif
1573
1574 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1575 collector, lockaddr,
1576 orig_size, jump_entry,
405f8e94 1577 trampoline, trampoline_size,
fa593d66
PA
1578 jjump_pad_insn,
1579 jjump_pad_insn_size,
1580 adjusted_insn_addr,
405f8e94
SS
1581 adjusted_insn_addr_end,
1582 err);
1583}
1584
1585/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1586 architectures. */
1587
809a0c35
TBA
1588int
1589x86_target::get_min_fast_tracepoint_insn_len ()
405f8e94
SS
1590{
1591 static int warned_about_fast_tracepoints = 0;
1592
1593#ifdef __x86_64__
1594 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1595 used for fast tracepoints. */
3aee8918 1596 if (is_64bit_tdesc ())
405f8e94
SS
1597 return 5;
1598#endif
1599
58b4daa5 1600 if (agent_loaded_p ())
405f8e94
SS
1601 {
1602 char errbuf[IPA_BUFSIZ];
1603
1604 errbuf[0] = '\0';
1605
1606 /* On x86, if trampolines are available, then 4-byte jump instructions
1607 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1608 with a 4-byte offset are used instead. */
1609 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1610 return 4;
1611 else
1612 {
1613 /* GDB has no channel to explain to user why a shorter fast
1614 tracepoint is not possible, but at least make GDBserver
1615 mention that something has gone awry. */
1616 if (!warned_about_fast_tracepoints)
1617 {
422186a9 1618 warning ("4-byte fast tracepoints not available; %s", errbuf);
405f8e94
SS
1619 warned_about_fast_tracepoints = 1;
1620 }
1621 return 5;
1622 }
1623 }
1624 else
1625 {
1626 /* Indicate that the minimum length is currently unknown since the IPA
1627 has not loaded yet. */
1628 return 0;
1629 }
fa593d66
PA
1630}
1631
6a271cae
PA
1632static void
1633add_insns (unsigned char *start, int len)
1634{
1635 CORE_ADDR buildaddr = current_insn_ptr;
1636
1637 if (debug_threads)
87ce2a04
DE
1638 debug_printf ("Adding %d bytes of insn at %s\n",
1639 len, paddress (buildaddr));
6a271cae
PA
1640
1641 append_insns (&buildaddr, len, start);
1642 current_insn_ptr = buildaddr;
1643}
1644
6a271cae
PA
1645/* Our general strategy for emitting code is to avoid specifying raw
1646 bytes whenever possible, and instead copy a block of inline asm
1647 that is embedded in the function. This is a little messy, because
1648 we need to keep the compiler from discarding what looks like dead
1649 code, plus suppress various warnings. */
1650
9e4344e5
PA
1651#define EMIT_ASM(NAME, INSNS) \
1652 do \
1653 { \
1654 extern unsigned char start_ ## NAME, end_ ## NAME; \
1655 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1656 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1657 "\t" "start_" #NAME ":" \
1658 "\t" INSNS "\n" \
1659 "\t" "end_" #NAME ":"); \
1660 } while (0)
6a271cae
PA
1661
1662#ifdef __x86_64__
1663
1664#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1665 do \
1666 { \
1667 extern unsigned char start_ ## NAME, end_ ## NAME; \
1668 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1669 __asm__ (".code32\n" \
1670 "\t" "jmp end_" #NAME "\n" \
1671 "\t" "start_" #NAME ":\n" \
1672 "\t" INSNS "\n" \
1673 "\t" "end_" #NAME ":\n" \
1674 ".code64\n"); \
1675 } while (0)
6a271cae
PA
1676
1677#else
1678
1679#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1680
1681#endif
1682
1683#ifdef __x86_64__
1684
1685static void
1686amd64_emit_prologue (void)
1687{
1688 EMIT_ASM (amd64_prologue,
1689 "pushq %rbp\n\t"
1690 "movq %rsp,%rbp\n\t"
1691 "sub $0x20,%rsp\n\t"
1692 "movq %rdi,-8(%rbp)\n\t"
1693 "movq %rsi,-16(%rbp)");
1694}
1695
1696
1697static void
1698amd64_emit_epilogue (void)
1699{
1700 EMIT_ASM (amd64_epilogue,
1701 "movq -16(%rbp),%rdi\n\t"
1702 "movq %rax,(%rdi)\n\t"
1703 "xor %rax,%rax\n\t"
1704 "leave\n\t"
1705 "ret");
1706}
1707
1708static void
1709amd64_emit_add (void)
1710{
1711 EMIT_ASM (amd64_add,
1712 "add (%rsp),%rax\n\t"
1713 "lea 0x8(%rsp),%rsp");
1714}
1715
1716static void
1717amd64_emit_sub (void)
1718{
1719 EMIT_ASM (amd64_sub,
1720 "sub %rax,(%rsp)\n\t"
1721 "pop %rax");
1722}
1723
1724static void
1725amd64_emit_mul (void)
1726{
1727 emit_error = 1;
1728}
1729
1730static void
1731amd64_emit_lsh (void)
1732{
1733 emit_error = 1;
1734}
1735
1736static void
1737amd64_emit_rsh_signed (void)
1738{
1739 emit_error = 1;
1740}
1741
1742static void
1743amd64_emit_rsh_unsigned (void)
1744{
1745 emit_error = 1;
1746}
1747
1748static void
1749amd64_emit_ext (int arg)
1750{
1751 switch (arg)
1752 {
1753 case 8:
1754 EMIT_ASM (amd64_ext_8,
1755 "cbtw\n\t"
1756 "cwtl\n\t"
1757 "cltq");
1758 break;
1759 case 16:
1760 EMIT_ASM (amd64_ext_16,
1761 "cwtl\n\t"
1762 "cltq");
1763 break;
1764 case 32:
1765 EMIT_ASM (amd64_ext_32,
1766 "cltq");
1767 break;
1768 default:
1769 emit_error = 1;
1770 }
1771}
1772
1773static void
1774amd64_emit_log_not (void)
1775{
1776 EMIT_ASM (amd64_log_not,
1777 "test %rax,%rax\n\t"
1778 "sete %cl\n\t"
1779 "movzbq %cl,%rax");
1780}
1781
1782static void
1783amd64_emit_bit_and (void)
1784{
1785 EMIT_ASM (amd64_and,
1786 "and (%rsp),%rax\n\t"
1787 "lea 0x8(%rsp),%rsp");
1788}
1789
1790static void
1791amd64_emit_bit_or (void)
1792{
1793 EMIT_ASM (amd64_or,
1794 "or (%rsp),%rax\n\t"
1795 "lea 0x8(%rsp),%rsp");
1796}
1797
1798static void
1799amd64_emit_bit_xor (void)
1800{
1801 EMIT_ASM (amd64_xor,
1802 "xor (%rsp),%rax\n\t"
1803 "lea 0x8(%rsp),%rsp");
1804}
1805
1806static void
1807amd64_emit_bit_not (void)
1808{
1809 EMIT_ASM (amd64_bit_not,
1810 "xorq $0xffffffffffffffff,%rax");
1811}
1812
1813static void
1814amd64_emit_equal (void)
1815{
1816 EMIT_ASM (amd64_equal,
1817 "cmp %rax,(%rsp)\n\t"
1818 "je .Lamd64_equal_true\n\t"
1819 "xor %rax,%rax\n\t"
1820 "jmp .Lamd64_equal_end\n\t"
1821 ".Lamd64_equal_true:\n\t"
1822 "mov $0x1,%rax\n\t"
1823 ".Lamd64_equal_end:\n\t"
1824 "lea 0x8(%rsp),%rsp");
1825}
1826
1827static void
1828amd64_emit_less_signed (void)
1829{
1830 EMIT_ASM (amd64_less_signed,
1831 "cmp %rax,(%rsp)\n\t"
1832 "jl .Lamd64_less_signed_true\n\t"
1833 "xor %rax,%rax\n\t"
1834 "jmp .Lamd64_less_signed_end\n\t"
1835 ".Lamd64_less_signed_true:\n\t"
1836 "mov $1,%rax\n\t"
1837 ".Lamd64_less_signed_end:\n\t"
1838 "lea 0x8(%rsp),%rsp");
1839}
1840
1841static void
1842amd64_emit_less_unsigned (void)
1843{
1844 EMIT_ASM (amd64_less_unsigned,
1845 "cmp %rax,(%rsp)\n\t"
1846 "jb .Lamd64_less_unsigned_true\n\t"
1847 "xor %rax,%rax\n\t"
1848 "jmp .Lamd64_less_unsigned_end\n\t"
1849 ".Lamd64_less_unsigned_true:\n\t"
1850 "mov $1,%rax\n\t"
1851 ".Lamd64_less_unsigned_end:\n\t"
1852 "lea 0x8(%rsp),%rsp");
1853}
1854
1855static void
1856amd64_emit_ref (int size)
1857{
1858 switch (size)
1859 {
1860 case 1:
1861 EMIT_ASM (amd64_ref1,
1862 "movb (%rax),%al");
1863 break;
1864 case 2:
1865 EMIT_ASM (amd64_ref2,
1866 "movw (%rax),%ax");
1867 break;
1868 case 4:
1869 EMIT_ASM (amd64_ref4,
1870 "movl (%rax),%eax");
1871 break;
1872 case 8:
1873 EMIT_ASM (amd64_ref8,
1874 "movq (%rax),%rax");
1875 break;
1876 }
1877}
1878
1879static void
1880amd64_emit_if_goto (int *offset_p, int *size_p)
1881{
1882 EMIT_ASM (amd64_if_goto,
1883 "mov %rax,%rcx\n\t"
1884 "pop %rax\n\t"
1885 "cmp $0,%rcx\n\t"
1886 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1887 if (offset_p)
1888 *offset_p = 10;
1889 if (size_p)
1890 *size_p = 4;
1891}
1892
1893static void
1894amd64_emit_goto (int *offset_p, int *size_p)
1895{
1896 EMIT_ASM (amd64_goto,
1897 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1898 if (offset_p)
1899 *offset_p = 1;
1900 if (size_p)
1901 *size_p = 4;
1902}
1903
1904static void
1905amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1906{
1907 int diff = (to - (from + size));
1908 unsigned char buf[sizeof (int)];
1909
1910 if (size != 4)
1911 {
1912 emit_error = 1;
1913 return;
1914 }
1915
1916 memcpy (buf, &diff, sizeof (int));
4196ab2a 1917 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
1918}
1919
1920static void
4e29fb54 1921amd64_emit_const (LONGEST num)
6a271cae
PA
1922{
1923 unsigned char buf[16];
1924 int i;
1925 CORE_ADDR buildaddr = current_insn_ptr;
1926
1927 i = 0;
1928 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 1929 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
1930 i += 8;
1931 append_insns (&buildaddr, i, buf);
1932 current_insn_ptr = buildaddr;
1933}
1934
1935static void
1936amd64_emit_call (CORE_ADDR fn)
1937{
1938 unsigned char buf[16];
1939 int i;
1940 CORE_ADDR buildaddr;
4e29fb54 1941 LONGEST offset64;
6a271cae
PA
1942
1943 /* The destination function being in the shared library, may be
1944 >31-bits away off the compiled code pad. */
1945
1946 buildaddr = current_insn_ptr;
1947
1948 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1949
1950 i = 0;
1951
1952 if (offset64 > INT_MAX || offset64 < INT_MIN)
1953 {
1954 /* Offset is too large for a call. Use callq, but that requires
1955 a register, so avoid it if possible. Use r10, since it is
1956 call-clobbered, we don't have to push/pop it. */
1957 buf[i++] = 0x48; /* mov $fn,%r10 */
1958 buf[i++] = 0xba;
1959 memcpy (buf + i, &fn, 8);
1960 i += 8;
1961 buf[i++] = 0xff; /* callq *%r10 */
1962 buf[i++] = 0xd2;
1963 }
1964 else
1965 {
1966 int offset32 = offset64; /* we know we can't overflow here. */
ed036b40
PA
1967
1968 buf[i++] = 0xe8; /* call <reladdr> */
6a271cae
PA
1969 memcpy (buf + i, &offset32, 4);
1970 i += 4;
1971 }
1972
1973 append_insns (&buildaddr, i, buf);
1974 current_insn_ptr = buildaddr;
1975}
1976
1977static void
1978amd64_emit_reg (int reg)
1979{
1980 unsigned char buf[16];
1981 int i;
1982 CORE_ADDR buildaddr;
1983
1984 /* Assume raw_regs is still in %rdi. */
1985 buildaddr = current_insn_ptr;
1986 i = 0;
1987 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 1988 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
1989 i += 4;
1990 append_insns (&buildaddr, i, buf);
1991 current_insn_ptr = buildaddr;
1992 amd64_emit_call (get_raw_reg_func_addr ());
1993}
1994
1995static void
1996amd64_emit_pop (void)
1997{
1998 EMIT_ASM (amd64_pop,
1999 "pop %rax");
2000}
2001
2002static void
2003amd64_emit_stack_flush (void)
2004{
2005 EMIT_ASM (amd64_stack_flush,
2006 "push %rax");
2007}
2008
2009static void
2010amd64_emit_zero_ext (int arg)
2011{
2012 switch (arg)
2013 {
2014 case 8:
2015 EMIT_ASM (amd64_zero_ext_8,
2016 "and $0xff,%rax");
2017 break;
2018 case 16:
2019 EMIT_ASM (amd64_zero_ext_16,
2020 "and $0xffff,%rax");
2021 break;
2022 case 32:
2023 EMIT_ASM (amd64_zero_ext_32,
2024 "mov $0xffffffff,%rcx\n\t"
2025 "and %rcx,%rax");
2026 break;
2027 default:
2028 emit_error = 1;
2029 }
2030}
2031
2032static void
2033amd64_emit_swap (void)
2034{
2035 EMIT_ASM (amd64_swap,
2036 "mov %rax,%rcx\n\t"
2037 "pop %rax\n\t"
2038 "push %rcx");
2039}
2040
2041static void
2042amd64_emit_stack_adjust (int n)
2043{
2044 unsigned char buf[16];
2045 int i;
2046 CORE_ADDR buildaddr = current_insn_ptr;
2047
2048 i = 0;
2049 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2050 buf[i++] = 0x8d;
2051 buf[i++] = 0x64;
2052 buf[i++] = 0x24;
2053 /* This only handles adjustments up to 16, but we don't expect any more. */
2054 buf[i++] = n * 8;
2055 append_insns (&buildaddr, i, buf);
2056 current_insn_ptr = buildaddr;
2057}
2058
2059/* FN's prototype is `LONGEST(*fn)(int)'. */
2060
2061static void
2062amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2063{
2064 unsigned char buf[16];
2065 int i;
2066 CORE_ADDR buildaddr;
2067
2068 buildaddr = current_insn_ptr;
2069 i = 0;
2070 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2071 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2072 i += 4;
2073 append_insns (&buildaddr, i, buf);
2074 current_insn_ptr = buildaddr;
2075 amd64_emit_call (fn);
2076}
2077
4e29fb54 2078/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2079
2080static void
2081amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2082{
2083 unsigned char buf[16];
2084 int i;
2085 CORE_ADDR buildaddr;
2086
2087 buildaddr = current_insn_ptr;
2088 i = 0;
2089 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2090 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2091 i += 4;
2092 append_insns (&buildaddr, i, buf);
2093 current_insn_ptr = buildaddr;
2094 EMIT_ASM (amd64_void_call_2_a,
2095 /* Save away a copy of the stack top. */
2096 "push %rax\n\t"
2097 /* Also pass top as the second argument. */
2098 "mov %rax,%rsi");
2099 amd64_emit_call (fn);
2100 EMIT_ASM (amd64_void_call_2_b,
2101 /* Restore the stack top, %rax may have been trashed. */
2102 "pop %rax");
2103}
2104
df4a0200 2105static void
6b9801d4
SS
2106amd64_emit_eq_goto (int *offset_p, int *size_p)
2107{
2108 EMIT_ASM (amd64_eq,
2109 "cmp %rax,(%rsp)\n\t"
2110 "jne .Lamd64_eq_fallthru\n\t"
2111 "lea 0x8(%rsp),%rsp\n\t"
2112 "pop %rax\n\t"
2113 /* jmp, but don't trust the assembler to choose the right jump */
2114 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2115 ".Lamd64_eq_fallthru:\n\t"
2116 "lea 0x8(%rsp),%rsp\n\t"
2117 "pop %rax");
2118
2119 if (offset_p)
2120 *offset_p = 13;
2121 if (size_p)
2122 *size_p = 4;
2123}
2124
df4a0200 2125static void
6b9801d4
SS
2126amd64_emit_ne_goto (int *offset_p, int *size_p)
2127{
2128 EMIT_ASM (amd64_ne,
2129 "cmp %rax,(%rsp)\n\t"
2130 "je .Lamd64_ne_fallthru\n\t"
2131 "lea 0x8(%rsp),%rsp\n\t"
2132 "pop %rax\n\t"
2133 /* jmp, but don't trust the assembler to choose the right jump */
2134 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2135 ".Lamd64_ne_fallthru:\n\t"
2136 "lea 0x8(%rsp),%rsp\n\t"
2137 "pop %rax");
2138
2139 if (offset_p)
2140 *offset_p = 13;
2141 if (size_p)
2142 *size_p = 4;
2143}
2144
df4a0200 2145static void
6b9801d4
SS
2146amd64_emit_lt_goto (int *offset_p, int *size_p)
2147{
2148 EMIT_ASM (amd64_lt,
2149 "cmp %rax,(%rsp)\n\t"
2150 "jnl .Lamd64_lt_fallthru\n\t"
2151 "lea 0x8(%rsp),%rsp\n\t"
2152 "pop %rax\n\t"
2153 /* jmp, but don't trust the assembler to choose the right jump */
2154 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2155 ".Lamd64_lt_fallthru:\n\t"
2156 "lea 0x8(%rsp),%rsp\n\t"
2157 "pop %rax");
2158
2159 if (offset_p)
2160 *offset_p = 13;
2161 if (size_p)
2162 *size_p = 4;
2163}
2164
df4a0200 2165static void
6b9801d4
SS
2166amd64_emit_le_goto (int *offset_p, int *size_p)
2167{
2168 EMIT_ASM (amd64_le,
2169 "cmp %rax,(%rsp)\n\t"
2170 "jnle .Lamd64_le_fallthru\n\t"
2171 "lea 0x8(%rsp),%rsp\n\t"
2172 "pop %rax\n\t"
2173 /* jmp, but don't trust the assembler to choose the right jump */
2174 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2175 ".Lamd64_le_fallthru:\n\t"
2176 "lea 0x8(%rsp),%rsp\n\t"
2177 "pop %rax");
2178
2179 if (offset_p)
2180 *offset_p = 13;
2181 if (size_p)
2182 *size_p = 4;
2183}
2184
df4a0200 2185static void
6b9801d4
SS
2186amd64_emit_gt_goto (int *offset_p, int *size_p)
2187{
2188 EMIT_ASM (amd64_gt,
2189 "cmp %rax,(%rsp)\n\t"
2190 "jng .Lamd64_gt_fallthru\n\t"
2191 "lea 0x8(%rsp),%rsp\n\t"
2192 "pop %rax\n\t"
2193 /* jmp, but don't trust the assembler to choose the right jump */
2194 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2195 ".Lamd64_gt_fallthru:\n\t"
2196 "lea 0x8(%rsp),%rsp\n\t"
2197 "pop %rax");
2198
2199 if (offset_p)
2200 *offset_p = 13;
2201 if (size_p)
2202 *size_p = 4;
2203}
2204
df4a0200 2205static void
6b9801d4
SS
2206amd64_emit_ge_goto (int *offset_p, int *size_p)
2207{
2208 EMIT_ASM (amd64_ge,
2209 "cmp %rax,(%rsp)\n\t"
2210 "jnge .Lamd64_ge_fallthru\n\t"
2211 ".Lamd64_ge_jump:\n\t"
2212 "lea 0x8(%rsp),%rsp\n\t"
2213 "pop %rax\n\t"
2214 /* jmp, but don't trust the assembler to choose the right jump */
2215 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2216 ".Lamd64_ge_fallthru:\n\t"
2217 "lea 0x8(%rsp),%rsp\n\t"
2218 "pop %rax");
2219
2220 if (offset_p)
2221 *offset_p = 13;
2222 if (size_p)
2223 *size_p = 4;
2224}
2225
6a271cae
PA
2226struct emit_ops amd64_emit_ops =
2227 {
2228 amd64_emit_prologue,
2229 amd64_emit_epilogue,
2230 amd64_emit_add,
2231 amd64_emit_sub,
2232 amd64_emit_mul,
2233 amd64_emit_lsh,
2234 amd64_emit_rsh_signed,
2235 amd64_emit_rsh_unsigned,
2236 amd64_emit_ext,
2237 amd64_emit_log_not,
2238 amd64_emit_bit_and,
2239 amd64_emit_bit_or,
2240 amd64_emit_bit_xor,
2241 amd64_emit_bit_not,
2242 amd64_emit_equal,
2243 amd64_emit_less_signed,
2244 amd64_emit_less_unsigned,
2245 amd64_emit_ref,
2246 amd64_emit_if_goto,
2247 amd64_emit_goto,
2248 amd64_write_goto_address,
2249 amd64_emit_const,
2250 amd64_emit_call,
2251 amd64_emit_reg,
2252 amd64_emit_pop,
2253 amd64_emit_stack_flush,
2254 amd64_emit_zero_ext,
2255 amd64_emit_swap,
2256 amd64_emit_stack_adjust,
2257 amd64_emit_int_call_1,
6b9801d4
SS
2258 amd64_emit_void_call_2,
2259 amd64_emit_eq_goto,
2260 amd64_emit_ne_goto,
2261 amd64_emit_lt_goto,
2262 amd64_emit_le_goto,
2263 amd64_emit_gt_goto,
2264 amd64_emit_ge_goto
6a271cae
PA
2265 };
2266
2267#endif /* __x86_64__ */
2268
2269static void
2270i386_emit_prologue (void)
2271{
2272 EMIT_ASM32 (i386_prologue,
2273 "push %ebp\n\t"
bf15cbda
SS
2274 "mov %esp,%ebp\n\t"
2275 "push %ebx");
6a271cae
PA
2276 /* At this point, the raw regs base address is at 8(%ebp), and the
2277 value pointer is at 12(%ebp). */
2278}
2279
2280static void
2281i386_emit_epilogue (void)
2282{
2283 EMIT_ASM32 (i386_epilogue,
2284 "mov 12(%ebp),%ecx\n\t"
2285 "mov %eax,(%ecx)\n\t"
2286 "mov %ebx,0x4(%ecx)\n\t"
2287 "xor %eax,%eax\n\t"
bf15cbda 2288 "pop %ebx\n\t"
6a271cae
PA
2289 "pop %ebp\n\t"
2290 "ret");
2291}
2292
2293static void
2294i386_emit_add (void)
2295{
2296 EMIT_ASM32 (i386_add,
2297 "add (%esp),%eax\n\t"
2298 "adc 0x4(%esp),%ebx\n\t"
2299 "lea 0x8(%esp),%esp");
2300}
2301
2302static void
2303i386_emit_sub (void)
2304{
2305 EMIT_ASM32 (i386_sub,
2306 "subl %eax,(%esp)\n\t"
2307 "sbbl %ebx,4(%esp)\n\t"
2308 "pop %eax\n\t"
2309 "pop %ebx\n\t");
2310}
2311
2312static void
2313i386_emit_mul (void)
2314{
2315 emit_error = 1;
2316}
2317
2318static void
2319i386_emit_lsh (void)
2320{
2321 emit_error = 1;
2322}
2323
2324static void
2325i386_emit_rsh_signed (void)
2326{
2327 emit_error = 1;
2328}
2329
2330static void
2331i386_emit_rsh_unsigned (void)
2332{
2333 emit_error = 1;
2334}
2335
2336static void
2337i386_emit_ext (int arg)
2338{
2339 switch (arg)
2340 {
2341 case 8:
2342 EMIT_ASM32 (i386_ext_8,
2343 "cbtw\n\t"
2344 "cwtl\n\t"
2345 "movl %eax,%ebx\n\t"
2346 "sarl $31,%ebx");
2347 break;
2348 case 16:
2349 EMIT_ASM32 (i386_ext_16,
2350 "cwtl\n\t"
2351 "movl %eax,%ebx\n\t"
2352 "sarl $31,%ebx");
2353 break;
2354 case 32:
2355 EMIT_ASM32 (i386_ext_32,
2356 "movl %eax,%ebx\n\t"
2357 "sarl $31,%ebx");
2358 break;
2359 default:
2360 emit_error = 1;
2361 }
2362}
2363
2364static void
2365i386_emit_log_not (void)
2366{
2367 EMIT_ASM32 (i386_log_not,
2368 "or %ebx,%eax\n\t"
2369 "test %eax,%eax\n\t"
2370 "sete %cl\n\t"
2371 "xor %ebx,%ebx\n\t"
2372 "movzbl %cl,%eax");
2373}
2374
2375static void
2376i386_emit_bit_and (void)
2377{
2378 EMIT_ASM32 (i386_and,
2379 "and (%esp),%eax\n\t"
2380 "and 0x4(%esp),%ebx\n\t"
2381 "lea 0x8(%esp),%esp");
2382}
2383
2384static void
2385i386_emit_bit_or (void)
2386{
2387 EMIT_ASM32 (i386_or,
2388 "or (%esp),%eax\n\t"
2389 "or 0x4(%esp),%ebx\n\t"
2390 "lea 0x8(%esp),%esp");
2391}
2392
2393static void
2394i386_emit_bit_xor (void)
2395{
2396 EMIT_ASM32 (i386_xor,
2397 "xor (%esp),%eax\n\t"
2398 "xor 0x4(%esp),%ebx\n\t"
2399 "lea 0x8(%esp),%esp");
2400}
2401
2402static void
2403i386_emit_bit_not (void)
2404{
2405 EMIT_ASM32 (i386_bit_not,
2406 "xor $0xffffffff,%eax\n\t"
2407 "xor $0xffffffff,%ebx\n\t");
2408}
2409
2410static void
2411i386_emit_equal (void)
2412{
2413 EMIT_ASM32 (i386_equal,
2414 "cmpl %ebx,4(%esp)\n\t"
2415 "jne .Li386_equal_false\n\t"
2416 "cmpl %eax,(%esp)\n\t"
2417 "je .Li386_equal_true\n\t"
2418 ".Li386_equal_false:\n\t"
2419 "xor %eax,%eax\n\t"
2420 "jmp .Li386_equal_end\n\t"
2421 ".Li386_equal_true:\n\t"
2422 "mov $1,%eax\n\t"
2423 ".Li386_equal_end:\n\t"
2424 "xor %ebx,%ebx\n\t"
2425 "lea 0x8(%esp),%esp");
2426}
2427
2428static void
2429i386_emit_less_signed (void)
2430{
2431 EMIT_ASM32 (i386_less_signed,
2432 "cmpl %ebx,4(%esp)\n\t"
2433 "jl .Li386_less_signed_true\n\t"
2434 "jne .Li386_less_signed_false\n\t"
2435 "cmpl %eax,(%esp)\n\t"
2436 "jl .Li386_less_signed_true\n\t"
2437 ".Li386_less_signed_false:\n\t"
2438 "xor %eax,%eax\n\t"
2439 "jmp .Li386_less_signed_end\n\t"
2440 ".Li386_less_signed_true:\n\t"
2441 "mov $1,%eax\n\t"
2442 ".Li386_less_signed_end:\n\t"
2443 "xor %ebx,%ebx\n\t"
2444 "lea 0x8(%esp),%esp");
2445}
2446
2447static void
2448i386_emit_less_unsigned (void)
2449{
2450 EMIT_ASM32 (i386_less_unsigned,
2451 "cmpl %ebx,4(%esp)\n\t"
2452 "jb .Li386_less_unsigned_true\n\t"
2453 "jne .Li386_less_unsigned_false\n\t"
2454 "cmpl %eax,(%esp)\n\t"
2455 "jb .Li386_less_unsigned_true\n\t"
2456 ".Li386_less_unsigned_false:\n\t"
2457 "xor %eax,%eax\n\t"
2458 "jmp .Li386_less_unsigned_end\n\t"
2459 ".Li386_less_unsigned_true:\n\t"
2460 "mov $1,%eax\n\t"
2461 ".Li386_less_unsigned_end:\n\t"
2462 "xor %ebx,%ebx\n\t"
2463 "lea 0x8(%esp),%esp");
2464}
2465
2466static void
2467i386_emit_ref (int size)
2468{
2469 switch (size)
2470 {
2471 case 1:
2472 EMIT_ASM32 (i386_ref1,
2473 "movb (%eax),%al");
2474 break;
2475 case 2:
2476 EMIT_ASM32 (i386_ref2,
2477 "movw (%eax),%ax");
2478 break;
2479 case 4:
2480 EMIT_ASM32 (i386_ref4,
2481 "movl (%eax),%eax");
2482 break;
2483 case 8:
2484 EMIT_ASM32 (i386_ref8,
2485 "movl 4(%eax),%ebx\n\t"
2486 "movl (%eax),%eax");
2487 break;
2488 }
2489}
2490
2491static void
2492i386_emit_if_goto (int *offset_p, int *size_p)
2493{
2494 EMIT_ASM32 (i386_if_goto,
2495 "mov %eax,%ecx\n\t"
2496 "or %ebx,%ecx\n\t"
2497 "pop %eax\n\t"
2498 "pop %ebx\n\t"
2499 "cmpl $0,%ecx\n\t"
2500 /* Don't trust the assembler to choose the right jump */
2501 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2502
2503 if (offset_p)
2504 *offset_p = 11; /* be sure that this matches the sequence above */
2505 if (size_p)
2506 *size_p = 4;
2507}
2508
2509static void
2510i386_emit_goto (int *offset_p, int *size_p)
2511{
2512 EMIT_ASM32 (i386_goto,
2513 /* Don't trust the assembler to choose the right jump */
2514 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2515 if (offset_p)
2516 *offset_p = 1;
2517 if (size_p)
2518 *size_p = 4;
2519}
2520
2521static void
2522i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2523{
2524 int diff = (to - (from + size));
2525 unsigned char buf[sizeof (int)];
2526
2527 /* We're only doing 4-byte sizes at the moment. */
2528 if (size != 4)
2529 {
2530 emit_error = 1;
2531 return;
2532 }
2533
2534 memcpy (buf, &diff, sizeof (int));
4196ab2a 2535 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
2536}
2537
2538static void
4e29fb54 2539i386_emit_const (LONGEST num)
6a271cae
PA
2540{
2541 unsigned char buf[16];
b00ad6ff 2542 int i, hi, lo;
6a271cae
PA
2543 CORE_ADDR buildaddr = current_insn_ptr;
2544
2545 i = 0;
2546 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2547 lo = num & 0xffffffff;
2548 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2549 i += 4;
2550 hi = ((num >> 32) & 0xffffffff);
2551 if (hi)
2552 {
2553 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2554 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2555 i += 4;
2556 }
2557 else
2558 {
2559 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2560 }
2561 append_insns (&buildaddr, i, buf);
2562 current_insn_ptr = buildaddr;
2563}
2564
2565static void
2566i386_emit_call (CORE_ADDR fn)
2567{
2568 unsigned char buf[16];
2569 int i, offset;
2570 CORE_ADDR buildaddr;
2571
2572 buildaddr = current_insn_ptr;
2573 i = 0;
2574 buf[i++] = 0xe8; /* call <reladdr> */
2575 offset = ((int) fn) - (buildaddr + 5);
2576 memcpy (buf + 1, &offset, 4);
2577 append_insns (&buildaddr, 5, buf);
2578 current_insn_ptr = buildaddr;
2579}
2580
2581static void
2582i386_emit_reg (int reg)
2583{
2584 unsigned char buf[16];
2585 int i;
2586 CORE_ADDR buildaddr;
2587
2588 EMIT_ASM32 (i386_reg_a,
2589 "sub $0x8,%esp");
2590 buildaddr = current_insn_ptr;
2591 i = 0;
2592 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2593 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2594 i += 4;
2595 append_insns (&buildaddr, i, buf);
2596 current_insn_ptr = buildaddr;
2597 EMIT_ASM32 (i386_reg_b,
2598 "mov %eax,4(%esp)\n\t"
2599 "mov 8(%ebp),%eax\n\t"
2600 "mov %eax,(%esp)");
2601 i386_emit_call (get_raw_reg_func_addr ());
2602 EMIT_ASM32 (i386_reg_c,
2603 "xor %ebx,%ebx\n\t"
2604 "lea 0x8(%esp),%esp");
2605}
2606
2607static void
2608i386_emit_pop (void)
2609{
2610 EMIT_ASM32 (i386_pop,
2611 "pop %eax\n\t"
2612 "pop %ebx");
2613}
2614
2615static void
2616i386_emit_stack_flush (void)
2617{
2618 EMIT_ASM32 (i386_stack_flush,
2619 "push %ebx\n\t"
2620 "push %eax");
2621}
2622
2623static void
2624i386_emit_zero_ext (int arg)
2625{
2626 switch (arg)
2627 {
2628 case 8:
2629 EMIT_ASM32 (i386_zero_ext_8,
2630 "and $0xff,%eax\n\t"
2631 "xor %ebx,%ebx");
2632 break;
2633 case 16:
2634 EMIT_ASM32 (i386_zero_ext_16,
2635 "and $0xffff,%eax\n\t"
2636 "xor %ebx,%ebx");
2637 break;
2638 case 32:
2639 EMIT_ASM32 (i386_zero_ext_32,
2640 "xor %ebx,%ebx");
2641 break;
2642 default:
2643 emit_error = 1;
2644 }
2645}
2646
2647static void
2648i386_emit_swap (void)
2649{
2650 EMIT_ASM32 (i386_swap,
2651 "mov %eax,%ecx\n\t"
2652 "mov %ebx,%edx\n\t"
2653 "pop %eax\n\t"
2654 "pop %ebx\n\t"
2655 "push %edx\n\t"
2656 "push %ecx");
2657}
2658
2659static void
2660i386_emit_stack_adjust (int n)
2661{
2662 unsigned char buf[16];
2663 int i;
2664 CORE_ADDR buildaddr = current_insn_ptr;
2665
2666 i = 0;
2667 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2668 buf[i++] = 0x64;
2669 buf[i++] = 0x24;
2670 buf[i++] = n * 8;
2671 append_insns (&buildaddr, i, buf);
2672 current_insn_ptr = buildaddr;
2673}
2674
2675/* FN's prototype is `LONGEST(*fn)(int)'. */
2676
2677static void
2678i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2679{
2680 unsigned char buf[16];
2681 int i;
2682 CORE_ADDR buildaddr;
2683
2684 EMIT_ASM32 (i386_int_call_1_a,
2685 /* Reserve a bit of stack space. */
2686 "sub $0x8,%esp");
2687 /* Put the one argument on the stack. */
2688 buildaddr = current_insn_ptr;
2689 i = 0;
2690 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2691 buf[i++] = 0x04;
2692 buf[i++] = 0x24;
b00ad6ff 2693 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2694 i += 4;
2695 append_insns (&buildaddr, i, buf);
2696 current_insn_ptr = buildaddr;
2697 i386_emit_call (fn);
2698 EMIT_ASM32 (i386_int_call_1_c,
2699 "mov %edx,%ebx\n\t"
2700 "lea 0x8(%esp),%esp");
2701}
2702
4e29fb54 2703/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2704
2705static void
2706i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2707{
2708 unsigned char buf[16];
2709 int i;
2710 CORE_ADDR buildaddr;
2711
2712 EMIT_ASM32 (i386_void_call_2_a,
2713 /* Preserve %eax only; we don't have to worry about %ebx. */
2714 "push %eax\n\t"
2715 /* Reserve a bit of stack space for arguments. */
2716 "sub $0x10,%esp\n\t"
2717 /* Copy "top" to the second argument position. (Note that
2718 we can't assume function won't scribble on its
2719 arguments, so don't try to restore from this.) */
2720 "mov %eax,4(%esp)\n\t"
2721 "mov %ebx,8(%esp)");
2722 /* Put the first argument on the stack. */
2723 buildaddr = current_insn_ptr;
2724 i = 0;
2725 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2726 buf[i++] = 0x04;
2727 buf[i++] = 0x24;
b00ad6ff 2728 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2729 i += 4;
2730 append_insns (&buildaddr, i, buf);
2731 current_insn_ptr = buildaddr;
2732 i386_emit_call (fn);
2733 EMIT_ASM32 (i386_void_call_2_b,
2734 "lea 0x10(%esp),%esp\n\t"
2735 /* Restore original stack top. */
2736 "pop %eax");
2737}
2738
6b9801d4 2739
df4a0200 2740static void
6b9801d4
SS
2741i386_emit_eq_goto (int *offset_p, int *size_p)
2742{
2743 EMIT_ASM32 (eq,
2744 /* Check low half first, more likely to be decider */
2745 "cmpl %eax,(%esp)\n\t"
2746 "jne .Leq_fallthru\n\t"
2747 "cmpl %ebx,4(%esp)\n\t"
2748 "jne .Leq_fallthru\n\t"
2749 "lea 0x8(%esp),%esp\n\t"
2750 "pop %eax\n\t"
2751 "pop %ebx\n\t"
2752 /* jmp, but don't trust the assembler to choose the right jump */
2753 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2754 ".Leq_fallthru:\n\t"
2755 "lea 0x8(%esp),%esp\n\t"
2756 "pop %eax\n\t"
2757 "pop %ebx");
2758
2759 if (offset_p)
2760 *offset_p = 18;
2761 if (size_p)
2762 *size_p = 4;
2763}
2764
df4a0200 2765static void
6b9801d4
SS
2766i386_emit_ne_goto (int *offset_p, int *size_p)
2767{
2768 EMIT_ASM32 (ne,
2769 /* Check low half first, more likely to be decider */
2770 "cmpl %eax,(%esp)\n\t"
2771 "jne .Lne_jump\n\t"
2772 "cmpl %ebx,4(%esp)\n\t"
2773 "je .Lne_fallthru\n\t"
2774 ".Lne_jump:\n\t"
2775 "lea 0x8(%esp),%esp\n\t"
2776 "pop %eax\n\t"
2777 "pop %ebx\n\t"
2778 /* jmp, but don't trust the assembler to choose the right jump */
2779 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2780 ".Lne_fallthru:\n\t"
2781 "lea 0x8(%esp),%esp\n\t"
2782 "pop %eax\n\t"
2783 "pop %ebx");
2784
2785 if (offset_p)
2786 *offset_p = 18;
2787 if (size_p)
2788 *size_p = 4;
2789}
2790
df4a0200 2791static void
6b9801d4
SS
2792i386_emit_lt_goto (int *offset_p, int *size_p)
2793{
2794 EMIT_ASM32 (lt,
2795 "cmpl %ebx,4(%esp)\n\t"
2796 "jl .Llt_jump\n\t"
2797 "jne .Llt_fallthru\n\t"
2798 "cmpl %eax,(%esp)\n\t"
2799 "jnl .Llt_fallthru\n\t"
2800 ".Llt_jump:\n\t"
2801 "lea 0x8(%esp),%esp\n\t"
2802 "pop %eax\n\t"
2803 "pop %ebx\n\t"
2804 /* jmp, but don't trust the assembler to choose the right jump */
2805 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2806 ".Llt_fallthru:\n\t"
2807 "lea 0x8(%esp),%esp\n\t"
2808 "pop %eax\n\t"
2809 "pop %ebx");
2810
2811 if (offset_p)
2812 *offset_p = 20;
2813 if (size_p)
2814 *size_p = 4;
2815}
2816
df4a0200 2817static void
6b9801d4
SS
2818i386_emit_le_goto (int *offset_p, int *size_p)
2819{
2820 EMIT_ASM32 (le,
2821 "cmpl %ebx,4(%esp)\n\t"
2822 "jle .Lle_jump\n\t"
2823 "jne .Lle_fallthru\n\t"
2824 "cmpl %eax,(%esp)\n\t"
2825 "jnle .Lle_fallthru\n\t"
2826 ".Lle_jump:\n\t"
2827 "lea 0x8(%esp),%esp\n\t"
2828 "pop %eax\n\t"
2829 "pop %ebx\n\t"
2830 /* jmp, but don't trust the assembler to choose the right jump */
2831 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2832 ".Lle_fallthru:\n\t"
2833 "lea 0x8(%esp),%esp\n\t"
2834 "pop %eax\n\t"
2835 "pop %ebx");
2836
2837 if (offset_p)
2838 *offset_p = 20;
2839 if (size_p)
2840 *size_p = 4;
2841}
2842
df4a0200 2843static void
6b9801d4
SS
2844i386_emit_gt_goto (int *offset_p, int *size_p)
2845{
2846 EMIT_ASM32 (gt,
2847 "cmpl %ebx,4(%esp)\n\t"
2848 "jg .Lgt_jump\n\t"
2849 "jne .Lgt_fallthru\n\t"
2850 "cmpl %eax,(%esp)\n\t"
2851 "jng .Lgt_fallthru\n\t"
2852 ".Lgt_jump:\n\t"
2853 "lea 0x8(%esp),%esp\n\t"
2854 "pop %eax\n\t"
2855 "pop %ebx\n\t"
2856 /* jmp, but don't trust the assembler to choose the right jump */
2857 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2858 ".Lgt_fallthru:\n\t"
2859 "lea 0x8(%esp),%esp\n\t"
2860 "pop %eax\n\t"
2861 "pop %ebx");
2862
2863 if (offset_p)
2864 *offset_p = 20;
2865 if (size_p)
2866 *size_p = 4;
2867}
2868
df4a0200 2869static void
6b9801d4
SS
2870i386_emit_ge_goto (int *offset_p, int *size_p)
2871{
2872 EMIT_ASM32 (ge,
2873 "cmpl %ebx,4(%esp)\n\t"
2874 "jge .Lge_jump\n\t"
2875 "jne .Lge_fallthru\n\t"
2876 "cmpl %eax,(%esp)\n\t"
2877 "jnge .Lge_fallthru\n\t"
2878 ".Lge_jump:\n\t"
2879 "lea 0x8(%esp),%esp\n\t"
2880 "pop %eax\n\t"
2881 "pop %ebx\n\t"
2882 /* jmp, but don't trust the assembler to choose the right jump */
2883 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2884 ".Lge_fallthru:\n\t"
2885 "lea 0x8(%esp),%esp\n\t"
2886 "pop %eax\n\t"
2887 "pop %ebx");
2888
2889 if (offset_p)
2890 *offset_p = 20;
2891 if (size_p)
2892 *size_p = 4;
2893}
2894
6a271cae
PA
2895struct emit_ops i386_emit_ops =
2896 {
2897 i386_emit_prologue,
2898 i386_emit_epilogue,
2899 i386_emit_add,
2900 i386_emit_sub,
2901 i386_emit_mul,
2902 i386_emit_lsh,
2903 i386_emit_rsh_signed,
2904 i386_emit_rsh_unsigned,
2905 i386_emit_ext,
2906 i386_emit_log_not,
2907 i386_emit_bit_and,
2908 i386_emit_bit_or,
2909 i386_emit_bit_xor,
2910 i386_emit_bit_not,
2911 i386_emit_equal,
2912 i386_emit_less_signed,
2913 i386_emit_less_unsigned,
2914 i386_emit_ref,
2915 i386_emit_if_goto,
2916 i386_emit_goto,
2917 i386_write_goto_address,
2918 i386_emit_const,
2919 i386_emit_call,
2920 i386_emit_reg,
2921 i386_emit_pop,
2922 i386_emit_stack_flush,
2923 i386_emit_zero_ext,
2924 i386_emit_swap,
2925 i386_emit_stack_adjust,
2926 i386_emit_int_call_1,
6b9801d4
SS
2927 i386_emit_void_call_2,
2928 i386_emit_eq_goto,
2929 i386_emit_ne_goto,
2930 i386_emit_lt_goto,
2931 i386_emit_le_goto,
2932 i386_emit_gt_goto,
2933 i386_emit_ge_goto
6a271cae
PA
2934 };
2935
2936
2937static struct emit_ops *
2938x86_emit_ops (void)
2939{
2940#ifdef __x86_64__
3aee8918 2941 if (is_64bit_tdesc ())
6a271cae
PA
2942 return &amd64_emit_ops;
2943 else
2944#endif
2945 return &i386_emit_ops;
2946}
2947
3ca4edb6 2948/* Implementation of target ops method "sw_breakpoint_from_kind". */
dd373349 2949
3ca4edb6
TBA
2950const gdb_byte *
2951x86_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349
AT
2952{
2953 *size = x86_breakpoint_len;
2954 return x86_breakpoint;
2955}
2956
c2d6af84
PA
2957static int
2958x86_supports_range_stepping (void)
2959{
2960 return 1;
2961}
2962
7d00775e
AT
2963/* Implementation of linux_target_ops method "supports_hardware_single_step".
2964 */
2965
2966static int
2967x86_supports_hardware_single_step (void)
2968{
2969 return 1;
2970}
2971
ae91f625
MK
2972static int
2973x86_get_ipa_tdesc_idx (void)
2974{
2975 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2976 const struct target_desc *tdesc = regcache->tdesc;
2977
2978#ifdef __x86_64__
b4570e4b 2979 return amd64_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2980#endif
2981
f49ff000 2982 if (tdesc == tdesc_i386_linux_no_xml)
ae91f625 2983 return X86_TDESC_SSE;
ae91f625 2984
f49ff000 2985 return i386_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2986}
2987
d0722149
DE
2988/* This is initialized assuming an amd64 target.
2989 x86_arch_setup will correct it for i386 or amd64 targets. */
2990
2991struct linux_target_ops the_low_target =
2992{
405f8e94 2993 x86_emit_ops,
c2d6af84 2994 x86_supports_range_stepping,
7d00775e 2995 x86_supports_hardware_single_step,
82075af2 2996 x86_get_syscall_trapinfo,
ae91f625 2997 x86_get_ipa_tdesc_idx,
d0722149 2998};
3aee8918 2999
ef0478f6
TBA
3000/* The linux target ops object. */
3001
3002linux_process_target *the_linux_target = &the_x86_target;
3003
3aee8918
PA
3004void
3005initialize_low_arch (void)
3006{
3007 /* Initialize the Linux target descriptions. */
3008#ifdef __x86_64__
cc397f3a 3009 tdesc_amd64_linux_no_xml = allocate_target_description ();
b4570e4b
YQ
3010 copy_target_description (tdesc_amd64_linux_no_xml,
3011 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
3012 false));
3aee8918
PA
3013 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3014#endif
f49ff000 3015
cc397f3a 3016 tdesc_i386_linux_no_xml = allocate_target_description ();
f49ff000
YQ
3017 copy_target_description (tdesc_i386_linux_no_xml,
3018 i386_linux_read_description (X86_XSTATE_SSE_MASK));
3aee8918
PA
3019 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3020
3021 initialize_regsets_info (&x86_regsets_info);
3022}
This page took 1.54938 seconds and 4 git commands to generate.