Use new and delete for windows_thread_info
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
b811d2c2 3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
d41f6d8e 20#include "server.h"
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "linux-low.h"
25#include "i387-fp.h"
df7e5265 26#include "x86-low.h"
268a13a5 27#include "gdbsupport/x86-xstate.h"
5826e159 28#include "nat/gdb_ptrace.h"
d0722149 29
93813b37
WT
30#ifdef __x86_64__
31#include "nat/amd64-linux-siginfo.h"
32#endif
33
d0722149 34#include "gdb_proc_service.h"
b5737fa9
PA
35/* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37#ifndef ELFMAG0
38#include "elf/common.h"
39#endif
40
268a13a5 41#include "gdbsupport/agent.h"
3aee8918 42#include "tdesc.h"
c144c7a0 43#include "tracepoint.h"
f699aaba 44#include "ax.h"
7b669087 45#include "nat/linux-nat.h"
4b134ca1 46#include "nat/x86-linux.h"
8e5d4070 47#include "nat/x86-linux-dregs.h"
ae91f625 48#include "linux-x86-tdesc.h"
a196ebeb 49
3aee8918
PA
50#ifdef __x86_64__
51static struct target_desc *tdesc_amd64_linux_no_xml;
52#endif
53static struct target_desc *tdesc_i386_linux_no_xml;
54
1570b33e 55
fa593d66 56static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 57static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 58
1570b33e
L
59/* Backward compatibility for gdb without XML support. */
60
61static const char *xmltarget_i386_linux_no_xml = "@<target>\
62<architecture>i386</architecture>\
63<osabi>GNU/Linux</osabi>\
64</target>";
f6d1620c
L
65
66#ifdef __x86_64__
1570b33e
L
67static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68<architecture>i386:x86-64</architecture>\
69<osabi>GNU/Linux</osabi>\
70</target>";
f6d1620c 71#endif
d0722149
DE
72
73#include <sys/reg.h>
74#include <sys/procfs.h>
1570b33e
L
75#include <sys/uio.h>
76
d0722149
DE
77#ifndef PTRACE_GET_THREAD_AREA
78#define PTRACE_GET_THREAD_AREA 25
79#endif
80
81/* This definition comes from prctl.h, but some kernels may not have it. */
82#ifndef PTRACE_ARCH_PRCTL
83#define PTRACE_ARCH_PRCTL 30
84#endif
85
86/* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88#ifndef ARCH_GET_FS
89#define ARCH_SET_GS 0x1001
90#define ARCH_SET_FS 0x1002
91#define ARCH_GET_FS 0x1003
92#define ARCH_GET_GS 0x1004
93#endif
94
ef0478f6
TBA
95/* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99class x86_target : public linux_process_target
100{
101public:
102
aa8d21c9
TBA
103 const regs_info *get_regs_info () override;
104
3ca4edb6
TBA
105 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
106
007c9b97
TBA
107 bool supports_z_point_type (char z_type) override;
108
a5b5da92
TBA
109 void process_qsupported (char **features, int count) override;
110
47f70aa7
TBA
111 bool supports_tracepoints () override;
112
809a0c35
TBA
113 bool supports_fast_tracepoints () override;
114
115 int install_fast_tracepoint_jump_pad
116 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
117 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
118 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
119 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
120 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
121 char *err) override;
122
123 int get_min_fast_tracepoint_insn_len () override;
124
ab64c999
TBA
125 struct emit_ops *emit_ops () override;
126
fc5ecdb6
TBA
127 int get_ipa_tdesc_idx () override;
128
797bcff5
TBA
129protected:
130
131 void low_arch_setup () override;
daca57a7
TBA
132
133 bool low_cannot_fetch_register (int regno) override;
134
135 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
136
137 bool low_supports_breakpoints () override;
138
139 CORE_ADDR low_get_pc (regcache *regcache) override;
140
141 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
d4807ea2
TBA
142
143 int low_decr_pc_after_break () override;
d7146cda
TBA
144
145 bool low_breakpoint_at (CORE_ADDR pc) override;
9db9aa23
TBA
146
147 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
148 int size, raw_breakpoint *bp) override;
149
150 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
151 int size, raw_breakpoint *bp) override;
ac1bbaca
TBA
152
153 bool low_stopped_by_watchpoint () override;
154
155 CORE_ADDR low_stopped_data_address () override;
b35db733
TBA
156
157 /* collect_ptrace_register/supply_ptrace_register are not needed in the
158 native i386 case (no registers smaller than an xfer unit), and are not
159 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
cb63de7c
TBA
160
161 /* Need to fix up i386 siginfo if host is amd64. */
162 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
163 int direction) override;
fd000fb3
TBA
164
165 arch_process_info *low_new_process () override;
166
167 void low_delete_process (arch_process_info *info) override;
168
169 void low_new_thread (lwp_info *) override;
170
171 void low_delete_thread (arch_lwp_info *) override;
172
173 void low_new_fork (process_info *parent, process_info *child) override;
d7599cc0
TBA
174
175 void low_prepare_to_resume (lwp_info *lwp) override;
a5b5da92 176
13e567af
TBA
177 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
178
9cfd8715
TBA
179 bool low_supports_range_stepping () override;
180
9eedd27d
TBA
181 bool low_supports_catch_syscall () override;
182
183 void low_get_syscall_trapinfo (regcache *regcache, int *sysno) override;
184
a5b5da92
TBA
185private:
186
187 /* Update all the target description of all processes; a new GDB
188 connected, and it may or not support xml target descriptions. */
189 void update_xmltarget ();
ef0478f6
TBA
190};
191
192/* The singleton target ops object. */
193
194static x86_target the_x86_target;
195
aa5ca48f
DE
196/* Per-process arch-specific data we want to keep. */
197
198struct arch_process_info
199{
df7e5265 200 struct x86_debug_reg_state debug_reg_state;
aa5ca48f
DE
201};
202
d0722149
DE
203#ifdef __x86_64__
204
205/* Mapping between the general-purpose registers in `struct user'
206 format and GDB's register array layout.
207 Note that the transfer layout uses 64-bit regs. */
208static /*const*/ int i386_regmap[] =
209{
210 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
211 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
212 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
213 DS * 8, ES * 8, FS * 8, GS * 8
214};
215
216#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
217
218/* So code below doesn't have to care, i386 or amd64. */
219#define ORIG_EAX ORIG_RAX
bc9540e8 220#define REGSIZE 8
d0722149
DE
221
222static const int x86_64_regmap[] =
223{
224 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
225 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
226 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
227 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
228 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
229 DS * 8, ES * 8, FS * 8, GS * 8,
230 -1, -1, -1, -1, -1, -1, -1, -1,
231 -1, -1, -1, -1, -1, -1, -1, -1,
232 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
233 -1,
234 -1, -1, -1, -1, -1, -1, -1, -1,
235 ORIG_RAX * 8,
2735833d
WT
236#ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
237 21 * 8, 22 * 8,
238#else
239 -1, -1,
240#endif
a196ebeb 241 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
01f9f808
MS
242 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
243 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
244 -1, -1, -1, -1, -1, -1, -1, -1,
245 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
246 -1, -1, -1, -1, -1, -1, -1, -1,
247 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
248 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
249 -1, -1, -1, -1, -1, -1, -1, -1,
250 -1, -1, -1, -1, -1, -1, -1, -1,
51547df6
MS
251 -1, -1, -1, -1, -1, -1, -1, -1,
252 -1 /* pkru */
d0722149
DE
253};
254
255#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
9e0aa64f 256#define X86_64_USER_REGS (GS + 1)
d0722149
DE
257
258#else /* ! __x86_64__ */
259
260/* Mapping between the general-purpose registers in `struct user'
261 format and GDB's register array layout. */
262static /*const*/ int i386_regmap[] =
263{
264 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
265 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
266 EIP * 4, EFL * 4, CS * 4, SS * 4,
267 DS * 4, ES * 4, FS * 4, GS * 4
268};
269
270#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
271
bc9540e8
PA
272#define REGSIZE 4
273
d0722149 274#endif
3aee8918
PA
275
276#ifdef __x86_64__
277
278/* Returns true if the current inferior belongs to a x86-64 process,
279 per the tdesc. */
280
281static int
282is_64bit_tdesc (void)
283{
0bfdf32f 284 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3aee8918
PA
285
286 return register_size (regcache->tdesc, 0) == 8;
287}
288
289#endif
290
d0722149
DE
291\f
292/* Called by libthread_db. */
293
294ps_err_e
754653a7 295ps_get_thread_area (struct ps_prochandle *ph,
d0722149
DE
296 lwpid_t lwpid, int idx, void **base)
297{
298#ifdef __x86_64__
3aee8918 299 int use_64bit = is_64bit_tdesc ();
d0722149
DE
300
301 if (use_64bit)
302 {
303 switch (idx)
304 {
305 case FS:
306 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
307 return PS_OK;
308 break;
309 case GS:
310 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
311 return PS_OK;
312 break;
313 default:
314 return PS_BADADDR;
315 }
316 return PS_ERR;
317 }
318#endif
319
320 {
321 unsigned int desc[4];
322
323 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
324 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
325 return PS_ERR;
326
d1ec4ce7
DE
327 /* Ensure we properly extend the value to 64-bits for x86_64. */
328 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
329 return PS_OK;
330 }
331}
fa593d66
PA
332
333/* Get the thread area address. This is used to recognize which
334 thread is which when tracing with the in-process agent library. We
335 don't read anything from the address, and treat it as opaque; it's
336 the address itself that we assume is unique per-thread. */
337
13e567af
TBA
338int
339x86_target::low_get_thread_area (int lwpid, CORE_ADDR *addr)
fa593d66
PA
340{
341#ifdef __x86_64__
3aee8918 342 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
343
344 if (use_64bit)
345 {
346 void *base;
347 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
348 {
349 *addr = (CORE_ADDR) (uintptr_t) base;
350 return 0;
351 }
352
353 return -1;
354 }
355#endif
356
357 {
f2907e49 358 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
d86d4aaf
DE
359 struct thread_info *thr = get_lwp_thread (lwp);
360 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
361 unsigned int desc[4];
362 ULONGEST gs = 0;
363 const int reg_thread_area = 3; /* bits to scale down register value. */
364 int idx;
365
366 collect_register_by_name (regcache, "gs", &gs);
367
368 idx = gs >> reg_thread_area;
369
370 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 371 lwpid_of (thr),
493e2a69 372 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
373 return -1;
374
375 *addr = desc[1];
376 return 0;
377 }
378}
379
380
d0722149 381\f
daca57a7
TBA
382bool
383x86_target::low_cannot_store_register (int regno)
d0722149 384{
3aee8918
PA
385#ifdef __x86_64__
386 if (is_64bit_tdesc ())
daca57a7 387 return false;
3aee8918
PA
388#endif
389
d0722149
DE
390 return regno >= I386_NUM_REGS;
391}
392
daca57a7
TBA
393bool
394x86_target::low_cannot_fetch_register (int regno)
d0722149 395{
3aee8918
PA
396#ifdef __x86_64__
397 if (is_64bit_tdesc ())
daca57a7 398 return false;
3aee8918
PA
399#endif
400
d0722149
DE
401 return regno >= I386_NUM_REGS;
402}
403
404static void
442ea881 405x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
406{
407 int i;
408
409#ifdef __x86_64__
3aee8918 410 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
411 {
412 for (i = 0; i < X86_64_NUM_REGS; i++)
413 if (x86_64_regmap[i] != -1)
442ea881 414 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
415
416#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
417 {
418 unsigned long base;
419 int lwpid = lwpid_of (current_thread);
420
421 collect_register_by_name (regcache, "fs_base", &base);
422 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
423
424 collect_register_by_name (regcache, "gs_base", &base);
425 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
426 }
427#endif
428
d0722149
DE
429 return;
430 }
9e0aa64f
JK
431
432 /* 32-bit inferior registers need to be zero-extended.
433 Callers would read uninitialized memory otherwise. */
434 memset (buf, 0x00, X86_64_USER_REGS * 8);
d0722149
DE
435#endif
436
437 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 438 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 439
442ea881 440 collect_register_by_name (regcache, "orig_eax",
bc9540e8 441 ((char *) buf) + ORIG_EAX * REGSIZE);
3f52fdbc 442
e90a813d 443#ifdef __x86_64__
3f52fdbc
KB
444 /* Sign extend EAX value to avoid potential syscall restart
445 problems.
446
447 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
448 for a detailed explanation. */
449 if (register_size (regcache->tdesc, 0) == 4)
450 {
451 void *ptr = ((gdb_byte *) buf
452 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
453
454 *(int64_t *) ptr = *(int32_t *) ptr;
455 }
e90a813d 456#endif
d0722149
DE
457}
458
459static void
442ea881 460x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
461{
462 int i;
463
464#ifdef __x86_64__
3aee8918 465 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
466 {
467 for (i = 0; i < X86_64_NUM_REGS; i++)
468 if (x86_64_regmap[i] != -1)
442ea881 469 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
470
471#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
472 {
473 unsigned long base;
474 int lwpid = lwpid_of (current_thread);
475
476 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
477 supply_register_by_name (regcache, "fs_base", &base);
478
479 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
480 supply_register_by_name (regcache, "gs_base", &base);
481 }
482#endif
d0722149
DE
483 return;
484 }
485#endif
486
487 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 488 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 489
442ea881 490 supply_register_by_name (regcache, "orig_eax",
bc9540e8 491 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
492}
493
494static void
442ea881 495x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
496{
497#ifdef __x86_64__
442ea881 498 i387_cache_to_fxsave (regcache, buf);
d0722149 499#else
442ea881 500 i387_cache_to_fsave (regcache, buf);
d0722149
DE
501#endif
502}
503
504static void
442ea881 505x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
506{
507#ifdef __x86_64__
442ea881 508 i387_fxsave_to_cache (regcache, buf);
d0722149 509#else
442ea881 510 i387_fsave_to_cache (regcache, buf);
d0722149
DE
511#endif
512}
513
514#ifndef __x86_64__
515
516static void
442ea881 517x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 518{
442ea881 519 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
520}
521
522static void
442ea881 523x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 524{
442ea881 525 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
526}
527
528#endif
529
1570b33e
L
530static void
531x86_fill_xstateregset (struct regcache *regcache, void *buf)
532{
533 i387_cache_to_xsave (regcache, buf);
534}
535
536static void
537x86_store_xstateregset (struct regcache *regcache, const void *buf)
538{
539 i387_xsave_to_cache (regcache, buf);
540}
541
d0722149
DE
542/* ??? The non-biarch i386 case stores all the i387 regs twice.
543 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
544 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
545 doesn't work. IWBN to avoid the duplication in the case where it
546 does work. Maybe the arch_setup routine could check whether it works
3aee8918 547 and update the supported regsets accordingly. */
d0722149 548
3aee8918 549static struct regset_info x86_regsets[] =
d0722149
DE
550{
551#ifdef HAVE_PTRACE_GETREGS
1570b33e 552 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
553 GENERAL_REGS,
554 x86_fill_gregset, x86_store_gregset },
1570b33e
L
555 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
556 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
557# ifndef __x86_64__
558# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 559 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
560 EXTENDED_REGS,
561 x86_fill_fpxregset, x86_store_fpxregset },
562# endif
563# endif
1570b33e 564 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
565 FP_REGS,
566 x86_fill_fpregset, x86_store_fpregset },
567#endif /* HAVE_PTRACE_GETREGS */
50bc912a 568 NULL_REGSET
d0722149
DE
569};
570
bf9ae9d8
TBA
571bool
572x86_target::low_supports_breakpoints ()
573{
574 return true;
575}
576
577CORE_ADDR
578x86_target::low_get_pc (regcache *regcache)
d0722149 579{
3aee8918 580 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
581
582 if (use_64bit)
583 {
6598661d
PA
584 uint64_t pc;
585
442ea881 586 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
587 return (CORE_ADDR) pc;
588 }
589 else
590 {
6598661d
PA
591 uint32_t pc;
592
442ea881 593 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
594 return (CORE_ADDR) pc;
595 }
596}
597
bf9ae9d8
TBA
598void
599x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
d0722149 600{
3aee8918 601 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
602
603 if (use_64bit)
604 {
6598661d
PA
605 uint64_t newpc = pc;
606
442ea881 607 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
608 }
609 else
610 {
6598661d
PA
611 uint32_t newpc = pc;
612
442ea881 613 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
614 }
615}
d4807ea2
TBA
616
617int
618x86_target::low_decr_pc_after_break ()
619{
620 return 1;
621}
622
d0722149 623\f
dd373349 624static const gdb_byte x86_breakpoint[] = { 0xCC };
d0722149
DE
625#define x86_breakpoint_len 1
626
d7146cda
TBA
627bool
628x86_target::low_breakpoint_at (CORE_ADDR pc)
d0722149
DE
629{
630 unsigned char c;
631
d7146cda 632 read_memory (pc, &c, 1);
d0722149 633 if (c == 0xCC)
d7146cda 634 return true;
d0722149 635
d7146cda 636 return false;
d0722149
DE
637}
638\f
42995dbd 639/* Low-level function vector. */
df7e5265 640struct x86_dr_low_type x86_dr_low =
42995dbd 641 {
d33472ad
GB
642 x86_linux_dr_set_control,
643 x86_linux_dr_set_addr,
644 x86_linux_dr_get_addr,
645 x86_linux_dr_get_status,
646 x86_linux_dr_get_control,
42995dbd
GB
647 sizeof (void *),
648 };
aa5ca48f 649\f
90d74c30 650/* Breakpoint/Watchpoint support. */
aa5ca48f 651
007c9b97
TBA
652bool
653x86_target::supports_z_point_type (char z_type)
802e8e6d
PA
654{
655 switch (z_type)
656 {
657 case Z_PACKET_SW_BP:
658 case Z_PACKET_HW_BP:
659 case Z_PACKET_WRITE_WP:
660 case Z_PACKET_ACCESS_WP:
007c9b97 661 return true;
802e8e6d 662 default:
007c9b97 663 return false;
802e8e6d
PA
664 }
665}
666
9db9aa23
TBA
667int
668x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
669 int size, raw_breakpoint *bp)
aa5ca48f
DE
670{
671 struct process_info *proc = current_process ();
802e8e6d 672
aa5ca48f
DE
673 switch (type)
674 {
802e8e6d
PA
675 case raw_bkpt_type_hw:
676 case raw_bkpt_type_write_wp:
677 case raw_bkpt_type_access_wp:
a4165e94 678 {
802e8e6d
PA
679 enum target_hw_bp_type hw_type
680 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 681 struct x86_debug_reg_state *state
fe978cb0 682 = &proc->priv->arch_private->debug_reg_state;
a4165e94 683
df7e5265 684 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
a4165e94 685 }
961bd387 686
aa5ca48f
DE
687 default:
688 /* Unsupported. */
689 return 1;
690 }
691}
692
9db9aa23
TBA
693int
694x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
695 int size, raw_breakpoint *bp)
aa5ca48f
DE
696{
697 struct process_info *proc = current_process ();
802e8e6d 698
aa5ca48f
DE
699 switch (type)
700 {
802e8e6d
PA
701 case raw_bkpt_type_hw:
702 case raw_bkpt_type_write_wp:
703 case raw_bkpt_type_access_wp:
a4165e94 704 {
802e8e6d
PA
705 enum target_hw_bp_type hw_type
706 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 707 struct x86_debug_reg_state *state
fe978cb0 708 = &proc->priv->arch_private->debug_reg_state;
a4165e94 709
df7e5265 710 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
a4165e94 711 }
aa5ca48f
DE
712 default:
713 /* Unsupported. */
714 return 1;
715 }
716}
717
ac1bbaca
TBA
718bool
719x86_target::low_stopped_by_watchpoint ()
aa5ca48f
DE
720{
721 struct process_info *proc = current_process ();
fe978cb0 722 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
aa5ca48f
DE
723}
724
ac1bbaca
TBA
725CORE_ADDR
726x86_target::low_stopped_data_address ()
aa5ca48f
DE
727{
728 struct process_info *proc = current_process ();
729 CORE_ADDR addr;
fe978cb0 730 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
df7e5265 731 &addr))
aa5ca48f
DE
732 return addr;
733 return 0;
734}
735\f
736/* Called when a new process is created. */
737
fd000fb3
TBA
738arch_process_info *
739x86_target::low_new_process ()
aa5ca48f 740{
ed859da7 741 struct arch_process_info *info = XCNEW (struct arch_process_info);
aa5ca48f 742
df7e5265 743 x86_low_init_dregs (&info->debug_reg_state);
aa5ca48f
DE
744
745 return info;
746}
747
04ec7890
SM
748/* Called when a process is being deleted. */
749
fd000fb3
TBA
750void
751x86_target::low_delete_process (arch_process_info *info)
04ec7890
SM
752{
753 xfree (info);
754}
755
fd000fb3
TBA
756void
757x86_target::low_new_thread (lwp_info *lwp)
758{
759 /* This comes from nat/. */
760 x86_linux_new_thread (lwp);
761}
3a8a0396 762
fd000fb3
TBA
763void
764x86_target::low_delete_thread (arch_lwp_info *alwp)
765{
766 /* This comes from nat/. */
767 x86_linux_delete_thread (alwp);
768}
769
770/* Target routine for new_fork. */
771
772void
773x86_target::low_new_fork (process_info *parent, process_info *child)
3a8a0396
DB
774{
775 /* These are allocated by linux_add_process. */
776 gdb_assert (parent->priv != NULL
777 && parent->priv->arch_private != NULL);
778 gdb_assert (child->priv != NULL
779 && child->priv->arch_private != NULL);
780
781 /* Linux kernel before 2.6.33 commit
782 72f674d203cd230426437cdcf7dd6f681dad8b0d
783 will inherit hardware debug registers from parent
784 on fork/vfork/clone. Newer Linux kernels create such tasks with
785 zeroed debug registers.
786
787 GDB core assumes the child inherits the watchpoints/hw
788 breakpoints of the parent, and will remove them all from the
789 forked off process. Copy the debug registers mirrors into the
790 new process so that all breakpoints and watchpoints can be
791 removed together. The debug registers mirror will become zeroed
792 in the end before detaching the forked off process, thus making
793 this compatible with older Linux kernels too. */
794
795 *child->priv->arch_private = *parent->priv->arch_private;
796}
797
d7599cc0
TBA
798void
799x86_target::low_prepare_to_resume (lwp_info *lwp)
800{
801 /* This comes from nat/. */
802 x86_linux_prepare_to_resume (lwp);
803}
804
70a0bb6b
GB
805/* See nat/x86-dregs.h. */
806
807struct x86_debug_reg_state *
808x86_debug_reg_state (pid_t pid)
809{
810 struct process_info *proc = find_process_pid (pid);
811
812 return &proc->priv->arch_private->debug_reg_state;
813}
aa5ca48f 814\f
d0722149
DE
815/* When GDBSERVER is built as a 64-bit application on linux, the
816 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
817 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
818 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
819 conversion in-place ourselves. */
820
9cf12d57 821/* Convert a ptrace/host siginfo object, into/from the siginfo in the
d0722149
DE
822 layout of the inferiors' architecture. Returns true if any
823 conversion was done; false otherwise. If DIRECTION is 1, then copy
9cf12d57 824 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
d0722149
DE
825 INF. */
826
cb63de7c
TBA
827bool
828x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
d0722149
DE
829{
830#ifdef __x86_64__
760256f9 831 unsigned int machine;
0bfdf32f 832 int tid = lwpid_of (current_thread);
760256f9
PA
833 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
834
d0722149 835 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 836 if (!is_64bit_tdesc ())
9cf12d57 837 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 838 FIXUP_32);
c92b5177 839 /* No fixup for native x32 GDB. */
760256f9 840 else if (!is_elf64 && sizeof (void *) == 8)
9cf12d57 841 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 842 FIXUP_X32);
d0722149
DE
843#endif
844
cb63de7c 845 return false;
d0722149
DE
846}
847\f
1570b33e
L
848static int use_xml;
849
3aee8918
PA
850/* Format of XSAVE extended state is:
851 struct
852 {
853 fxsave_bytes[0..463]
854 sw_usable_bytes[464..511]
855 xstate_hdr_bytes[512..575]
856 avx_bytes[576..831]
857 future_state etc
858 };
859
860 Same memory layout will be used for the coredump NT_X86_XSTATE
861 representing the XSAVE extended state registers.
862
863 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
864 extended state mask, which is the same as the extended control register
865 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
866 together with the mask saved in the xstate_hdr_bytes to determine what
867 states the processor/OS supports and what state, used or initialized,
868 the process/thread is in. */
869#define I386_LINUX_XSAVE_XCR0_OFFSET 464
870
871/* Does the current host support the GETFPXREGS request? The header
872 file may or may not define it, and even if it is defined, the
873 kernel will return EIO if it's running on a pre-SSE processor. */
874int have_ptrace_getfpxregs =
875#ifdef HAVE_PTRACE_GETFPXREGS
876 -1
877#else
878 0
879#endif
880;
1570b33e 881
3aee8918
PA
882/* Get Linux/x86 target description from running target. */
883
884static const struct target_desc *
885x86_linux_read_description (void)
1570b33e 886{
3aee8918
PA
887 unsigned int machine;
888 int is_elf64;
a196ebeb 889 int xcr0_features;
3aee8918
PA
890 int tid;
891 static uint64_t xcr0;
3a13a53b 892 struct regset_info *regset;
1570b33e 893
0bfdf32f 894 tid = lwpid_of (current_thread);
1570b33e 895
3aee8918 896 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 897
3aee8918 898 if (sizeof (void *) == 4)
3a13a53b 899 {
3aee8918
PA
900 if (is_elf64 > 0)
901 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
902#ifndef __x86_64__
903 else if (machine == EM_X86_64)
904 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
905#endif
906 }
3a13a53b 907
3aee8918
PA
908#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
909 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
910 {
911 elf_fpxregset_t fpxregs;
3a13a53b 912
3aee8918 913 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 914 {
3aee8918
PA
915 have_ptrace_getfpxregs = 0;
916 have_ptrace_getregset = 0;
f49ff000 917 return i386_linux_read_description (X86_XSTATE_X87);
3a13a53b 918 }
3aee8918
PA
919 else
920 have_ptrace_getfpxregs = 1;
3a13a53b 921 }
1570b33e
L
922#endif
923
924 if (!use_xml)
925 {
df7e5265 926 x86_xcr0 = X86_XSTATE_SSE_MASK;
3aee8918 927
1570b33e
L
928 /* Don't use XML. */
929#ifdef __x86_64__
3aee8918
PA
930 if (machine == EM_X86_64)
931 return tdesc_amd64_linux_no_xml;
1570b33e 932 else
1570b33e 933#endif
3aee8918 934 return tdesc_i386_linux_no_xml;
1570b33e
L
935 }
936
1570b33e
L
937 if (have_ptrace_getregset == -1)
938 {
df7e5265 939 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 940 struct iovec iov;
1570b33e
L
941
942 iov.iov_base = xstateregs;
943 iov.iov_len = sizeof (xstateregs);
944
945 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
946 if (ptrace (PTRACE_GETREGSET, tid,
947 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
948 have_ptrace_getregset = 0;
949 else
1570b33e 950 {
3aee8918
PA
951 have_ptrace_getregset = 1;
952
953 /* Get XCR0 from XSAVE extended state. */
954 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
955 / sizeof (uint64_t))];
956
957 /* Use PTRACE_GETREGSET if it is available. */
958 for (regset = x86_regsets;
959 regset->fill_function != NULL; regset++)
960 if (regset->get_request == PTRACE_GETREGSET)
df7e5265 961 regset->size = X86_XSTATE_SIZE (xcr0);
3aee8918
PA
962 else if (regset->type != GENERAL_REGS)
963 regset->size = 0;
1570b33e 964 }
1570b33e
L
965 }
966
3aee8918 967 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb 968 xcr0_features = (have_ptrace_getregset
2e1e43e1 969 && (xcr0 & X86_XSTATE_ALL_MASK));
3aee8918 970
a196ebeb 971 if (xcr0_features)
3aee8918 972 x86_xcr0 = xcr0;
1570b33e 973
3aee8918
PA
974 if (machine == EM_X86_64)
975 {
1570b33e 976#ifdef __x86_64__
b4570e4b 977 const target_desc *tdesc = NULL;
a196ebeb 978
b4570e4b 979 if (xcr0_features)
3aee8918 980 {
b4570e4b
YQ
981 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
982 !is_elf64);
1570b33e 983 }
b4570e4b
YQ
984
985 if (tdesc == NULL)
986 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
987 return tdesc;
3aee8918 988#endif
1570b33e 989 }
3aee8918
PA
990 else
991 {
f49ff000 992 const target_desc *tdesc = NULL;
a1fa17ee 993
f49ff000
YQ
994 if (xcr0_features)
995 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
2b863f51 996
f49ff000
YQ
997 if (tdesc == NULL)
998 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
a196ebeb 999
f49ff000 1000 return tdesc;
3aee8918
PA
1001 }
1002
1003 gdb_assert_not_reached ("failed to return tdesc");
1004}
1005
3aee8918
PA
1006/* Update all the target description of all processes; a new GDB
1007 connected, and it may or not support xml target descriptions. */
1008
797bcff5
TBA
1009void
1010x86_target::update_xmltarget ()
3aee8918 1011{
0bfdf32f 1012 struct thread_info *saved_thread = current_thread;
3aee8918
PA
1013
1014 /* Before changing the register cache's internal layout, flush the
1015 contents of the current valid caches back to the threads, and
1016 release the current regcache objects. */
1017 regcache_release ();
1018
797bcff5 1019 for_each_process ([this] (process_info *proc) {
9179355e
SM
1020 int pid = proc->pid;
1021
1022 /* Look up any thread of this process. */
1023 current_thread = find_any_thread_of_pid (pid);
1024
797bcff5 1025 low_arch_setup ();
9179355e 1026 });
3aee8918 1027
0bfdf32f 1028 current_thread = saved_thread;
1570b33e
L
1029}
1030
1031/* Process qSupported query, "xmlRegisters=". Update the buffer size for
1032 PTRACE_GETREGSET. */
1033
a5b5da92
TBA
1034void
1035x86_target::process_qsupported (char **features, int count)
1570b33e 1036{
06e03fff
PA
1037 int i;
1038
1570b33e
L
1039 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1040 with "i386" in qSupported query, it supports x86 XML target
1041 descriptions. */
1042 use_xml = 0;
06e03fff 1043 for (i = 0; i < count; i++)
1570b33e 1044 {
06e03fff 1045 const char *feature = features[i];
1570b33e 1046
06e03fff 1047 if (startswith (feature, "xmlRegisters="))
1570b33e 1048 {
06e03fff 1049 char *copy = xstrdup (feature + 13);
06e03fff 1050
ca3a04f6
CB
1051 char *saveptr;
1052 for (char *p = strtok_r (copy, ",", &saveptr);
1053 p != NULL;
1054 p = strtok_r (NULL, ",", &saveptr))
1570b33e 1055 {
06e03fff
PA
1056 if (strcmp (p, "i386") == 0)
1057 {
1058 use_xml = 1;
1059 break;
1060 }
1570b33e 1061 }
1570b33e 1062
06e03fff
PA
1063 free (copy);
1064 }
1570b33e 1065 }
a5b5da92 1066 update_xmltarget ();
1570b33e
L
1067}
1068
3aee8918 1069/* Common for x86/x86-64. */
d0722149 1070
3aee8918
PA
1071static struct regsets_info x86_regsets_info =
1072 {
1073 x86_regsets, /* regsets */
1074 0, /* num_regsets */
1075 NULL, /* disabled_regsets */
1076 };
214d508e
L
1077
1078#ifdef __x86_64__
3aee8918
PA
1079static struct regs_info amd64_linux_regs_info =
1080 {
1081 NULL, /* regset_bitmap */
1082 NULL, /* usrregs_info */
1083 &x86_regsets_info
1084 };
d0722149 1085#endif
3aee8918
PA
1086static struct usrregs_info i386_linux_usrregs_info =
1087 {
1088 I386_NUM_REGS,
1089 i386_regmap,
1090 };
d0722149 1091
3aee8918
PA
1092static struct regs_info i386_linux_regs_info =
1093 {
1094 NULL, /* regset_bitmap */
1095 &i386_linux_usrregs_info,
1096 &x86_regsets_info
1097 };
d0722149 1098
aa8d21c9
TBA
1099const regs_info *
1100x86_target::get_regs_info ()
3aee8918
PA
1101{
1102#ifdef __x86_64__
1103 if (is_64bit_tdesc ())
1104 return &amd64_linux_regs_info;
1105 else
1106#endif
1107 return &i386_linux_regs_info;
1108}
d0722149 1109
3aee8918
PA
1110/* Initialize the target description for the architecture of the
1111 inferior. */
1570b33e 1112
797bcff5
TBA
1113void
1114x86_target::low_arch_setup ()
3aee8918
PA
1115{
1116 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1117}
1118
9eedd27d
TBA
1119bool
1120x86_target::low_supports_catch_syscall ()
1121{
1122 return true;
1123}
1124
82075af2
JS
1125/* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1126 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1127
9eedd27d
TBA
1128void
1129x86_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
82075af2
JS
1130{
1131 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1132
1133 if (use_64bit)
1134 {
1135 long l_sysno;
82075af2
JS
1136
1137 collect_register_by_name (regcache, "orig_rax", &l_sysno);
82075af2 1138 *sysno = (int) l_sysno;
82075af2
JS
1139 }
1140 else
4cc32bec 1141 collect_register_by_name (regcache, "orig_eax", sysno);
82075af2
JS
1142}
1143
47f70aa7
TBA
1144bool
1145x86_target::supports_tracepoints ()
219f2f23 1146{
47f70aa7 1147 return true;
219f2f23
PA
1148}
1149
fa593d66
PA
1150static void
1151append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1152{
4196ab2a 1153 target_write_memory (*to, buf, len);
fa593d66
PA
1154 *to += len;
1155}
1156
1157static int
a121b7c1 1158push_opcode (unsigned char *buf, const char *op)
fa593d66
PA
1159{
1160 unsigned char *buf_org = buf;
1161
1162 while (1)
1163 {
1164 char *endptr;
1165 unsigned long ul = strtoul (op, &endptr, 16);
1166
1167 if (endptr == op)
1168 break;
1169
1170 *buf++ = ul;
1171 op = endptr;
1172 }
1173
1174 return buf - buf_org;
1175}
1176
1177#ifdef __x86_64__
1178
1179/* Build a jump pad that saves registers and calls a collection
1180 function. Writes a jump instruction to the jump pad to
1181 JJUMPAD_INSN. The caller is responsible to write it in at the
1182 tracepoint address. */
1183
1184static int
1185amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1186 CORE_ADDR collector,
1187 CORE_ADDR lockaddr,
1188 ULONGEST orig_size,
1189 CORE_ADDR *jump_entry,
405f8e94
SS
1190 CORE_ADDR *trampoline,
1191 ULONGEST *trampoline_size,
fa593d66
PA
1192 unsigned char *jjump_pad_insn,
1193 ULONGEST *jjump_pad_insn_size,
1194 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1195 CORE_ADDR *adjusted_insn_addr_end,
1196 char *err)
fa593d66
PA
1197{
1198 unsigned char buf[40];
1199 int i, offset;
f4647387
YQ
1200 int64_t loffset;
1201
fa593d66
PA
1202 CORE_ADDR buildaddr = *jump_entry;
1203
1204 /* Build the jump pad. */
1205
1206 /* First, do tracepoint data collection. Save registers. */
1207 i = 0;
1208 /* Need to ensure stack pointer saved first. */
1209 buf[i++] = 0x54; /* push %rsp */
1210 buf[i++] = 0x55; /* push %rbp */
1211 buf[i++] = 0x57; /* push %rdi */
1212 buf[i++] = 0x56; /* push %rsi */
1213 buf[i++] = 0x52; /* push %rdx */
1214 buf[i++] = 0x51; /* push %rcx */
1215 buf[i++] = 0x53; /* push %rbx */
1216 buf[i++] = 0x50; /* push %rax */
1217 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1218 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1219 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1220 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1221 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1222 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1223 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1224 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1225 buf[i++] = 0x9c; /* pushfq */
c8ef42ee 1226 buf[i++] = 0x48; /* movabs <addr>,%rdi */
fa593d66 1227 buf[i++] = 0xbf;
c8ef42ee
PA
1228 memcpy (buf + i, &tpaddr, 8);
1229 i += 8;
fa593d66
PA
1230 buf[i++] = 0x57; /* push %rdi */
1231 append_insns (&buildaddr, i, buf);
1232
1233 /* Stack space for the collecting_t object. */
1234 i = 0;
1235 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1236 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1237 memcpy (buf + i, &tpoint, 8);
1238 i += 8;
1239 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1240 i += push_opcode (&buf[i],
1241 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1242 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1243 append_insns (&buildaddr, i, buf);
1244
1245 /* spin-lock. */
1246 i = 0;
1247 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1248 memcpy (&buf[i], (void *) &lockaddr, 8);
1249 i += 8;
1250 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1251 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1252 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1253 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1254 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1255 append_insns (&buildaddr, i, buf);
1256
1257 /* Set up the gdb_collect call. */
1258 /* At this point, (stack pointer + 0x18) is the base of our saved
1259 register block. */
1260
1261 i = 0;
1262 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1263 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1264
1265 /* tpoint address may be 64-bit wide. */
1266 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1267 memcpy (buf + i, &tpoint, 8);
1268 i += 8;
1269 append_insns (&buildaddr, i, buf);
1270
1271 /* The collector function being in the shared library, may be
1272 >31-bits away off the jump pad. */
1273 i = 0;
1274 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1275 memcpy (buf + i, &collector, 8);
1276 i += 8;
1277 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1278 append_insns (&buildaddr, i, buf);
1279
1280 /* Clear the spin-lock. */
1281 i = 0;
1282 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1283 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1284 memcpy (buf + i, &lockaddr, 8);
1285 i += 8;
1286 append_insns (&buildaddr, i, buf);
1287
1288 /* Remove stack that had been used for the collect_t object. */
1289 i = 0;
1290 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1291 append_insns (&buildaddr, i, buf);
1292
1293 /* Restore register state. */
1294 i = 0;
1295 buf[i++] = 0x48; /* add $0x8,%rsp */
1296 buf[i++] = 0x83;
1297 buf[i++] = 0xc4;
1298 buf[i++] = 0x08;
1299 buf[i++] = 0x9d; /* popfq */
1300 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1301 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1302 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1303 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1304 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1305 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1306 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1307 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1308 buf[i++] = 0x58; /* pop %rax */
1309 buf[i++] = 0x5b; /* pop %rbx */
1310 buf[i++] = 0x59; /* pop %rcx */
1311 buf[i++] = 0x5a; /* pop %rdx */
1312 buf[i++] = 0x5e; /* pop %rsi */
1313 buf[i++] = 0x5f; /* pop %rdi */
1314 buf[i++] = 0x5d; /* pop %rbp */
1315 buf[i++] = 0x5c; /* pop %rsp */
1316 append_insns (&buildaddr, i, buf);
1317
1318 /* Now, adjust the original instruction to execute in the jump
1319 pad. */
1320 *adjusted_insn_addr = buildaddr;
1321 relocate_instruction (&buildaddr, tpaddr);
1322 *adjusted_insn_addr_end = buildaddr;
1323
1324 /* Finally, write a jump back to the program. */
f4647387
YQ
1325
1326 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1327 if (loffset > INT_MAX || loffset < INT_MIN)
1328 {
1329 sprintf (err,
1330 "E.Jump back from jump pad too far from tracepoint "
1331 "(offset 0x%" PRIx64 " > int32).", loffset);
1332 return 1;
1333 }
1334
1335 offset = (int) loffset;
fa593d66
PA
1336 memcpy (buf, jump_insn, sizeof (jump_insn));
1337 memcpy (buf + 1, &offset, 4);
1338 append_insns (&buildaddr, sizeof (jump_insn), buf);
1339
1340 /* The jump pad is now built. Wire in a jump to our jump pad. This
1341 is always done last (by our caller actually), so that we can
1342 install fast tracepoints with threads running. This relies on
1343 the agent's atomic write support. */
f4647387
YQ
1344 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1345 if (loffset > INT_MAX || loffset < INT_MIN)
1346 {
1347 sprintf (err,
1348 "E.Jump pad too far from tracepoint "
1349 "(offset 0x%" PRIx64 " > int32).", loffset);
1350 return 1;
1351 }
1352
1353 offset = (int) loffset;
1354
fa593d66
PA
1355 memcpy (buf, jump_insn, sizeof (jump_insn));
1356 memcpy (buf + 1, &offset, 4);
1357 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1358 *jjump_pad_insn_size = sizeof (jump_insn);
1359
1360 /* Return the end address of our pad. */
1361 *jump_entry = buildaddr;
1362
1363 return 0;
1364}
1365
1366#endif /* __x86_64__ */
1367
1368/* Build a jump pad that saves registers and calls a collection
1369 function. Writes a jump instruction to the jump pad to
1370 JJUMPAD_INSN. The caller is responsible to write it in at the
1371 tracepoint address. */
1372
1373static int
1374i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1375 CORE_ADDR collector,
1376 CORE_ADDR lockaddr,
1377 ULONGEST orig_size,
1378 CORE_ADDR *jump_entry,
405f8e94
SS
1379 CORE_ADDR *trampoline,
1380 ULONGEST *trampoline_size,
fa593d66
PA
1381 unsigned char *jjump_pad_insn,
1382 ULONGEST *jjump_pad_insn_size,
1383 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1384 CORE_ADDR *adjusted_insn_addr_end,
1385 char *err)
fa593d66
PA
1386{
1387 unsigned char buf[0x100];
1388 int i, offset;
1389 CORE_ADDR buildaddr = *jump_entry;
1390
1391 /* Build the jump pad. */
1392
1393 /* First, do tracepoint data collection. Save registers. */
1394 i = 0;
1395 buf[i++] = 0x60; /* pushad */
1396 buf[i++] = 0x68; /* push tpaddr aka $pc */
1397 *((int *)(buf + i)) = (int) tpaddr;
1398 i += 4;
1399 buf[i++] = 0x9c; /* pushf */
1400 buf[i++] = 0x1e; /* push %ds */
1401 buf[i++] = 0x06; /* push %es */
1402 buf[i++] = 0x0f; /* push %fs */
1403 buf[i++] = 0xa0;
1404 buf[i++] = 0x0f; /* push %gs */
1405 buf[i++] = 0xa8;
1406 buf[i++] = 0x16; /* push %ss */
1407 buf[i++] = 0x0e; /* push %cs */
1408 append_insns (&buildaddr, i, buf);
1409
1410 /* Stack space for the collecting_t object. */
1411 i = 0;
1412 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1413
1414 /* Build the object. */
1415 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1416 memcpy (buf + i, &tpoint, 4);
1417 i += 4;
1418 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1419
1420 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1421 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1422 append_insns (&buildaddr, i, buf);
1423
1424 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1425 If we cared for it, this could be using xchg alternatively. */
1426
1427 i = 0;
1428 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1429 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1430 %esp,<lockaddr> */
1431 memcpy (&buf[i], (void *) &lockaddr, 4);
1432 i += 4;
1433 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1434 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1435 append_insns (&buildaddr, i, buf);
1436
1437
1438 /* Set up arguments to the gdb_collect call. */
1439 i = 0;
1440 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1441 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1442 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1443 append_insns (&buildaddr, i, buf);
1444
1445 i = 0;
1446 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1447 append_insns (&buildaddr, i, buf);
1448
1449 i = 0;
1450 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1451 memcpy (&buf[i], (void *) &tpoint, 4);
1452 i += 4;
1453 append_insns (&buildaddr, i, buf);
1454
1455 buf[0] = 0xe8; /* call <reladdr> */
1456 offset = collector - (buildaddr + sizeof (jump_insn));
1457 memcpy (buf + 1, &offset, 4);
1458 append_insns (&buildaddr, 5, buf);
1459 /* Clean up after the call. */
1460 buf[0] = 0x83; /* add $0x8,%esp */
1461 buf[1] = 0xc4;
1462 buf[2] = 0x08;
1463 append_insns (&buildaddr, 3, buf);
1464
1465
1466 /* Clear the spin-lock. This would need the LOCK prefix on older
1467 broken archs. */
1468 i = 0;
1469 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1470 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1471 memcpy (buf + i, &lockaddr, 4);
1472 i += 4;
1473 append_insns (&buildaddr, i, buf);
1474
1475
1476 /* Remove stack that had been used for the collect_t object. */
1477 i = 0;
1478 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1479 append_insns (&buildaddr, i, buf);
1480
1481 i = 0;
1482 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1483 buf[i++] = 0xc4;
1484 buf[i++] = 0x04;
1485 buf[i++] = 0x17; /* pop %ss */
1486 buf[i++] = 0x0f; /* pop %gs */
1487 buf[i++] = 0xa9;
1488 buf[i++] = 0x0f; /* pop %fs */
1489 buf[i++] = 0xa1;
1490 buf[i++] = 0x07; /* pop %es */
405f8e94 1491 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1492 buf[i++] = 0x9d; /* popf */
1493 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1494 buf[i++] = 0xc4;
1495 buf[i++] = 0x04;
1496 buf[i++] = 0x61; /* popad */
1497 append_insns (&buildaddr, i, buf);
1498
1499 /* Now, adjust the original instruction to execute in the jump
1500 pad. */
1501 *adjusted_insn_addr = buildaddr;
1502 relocate_instruction (&buildaddr, tpaddr);
1503 *adjusted_insn_addr_end = buildaddr;
1504
1505 /* Write the jump back to the program. */
1506 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1507 memcpy (buf, jump_insn, sizeof (jump_insn));
1508 memcpy (buf + 1, &offset, 4);
1509 append_insns (&buildaddr, sizeof (jump_insn), buf);
1510
1511 /* The jump pad is now built. Wire in a jump to our jump pad. This
1512 is always done last (by our caller actually), so that we can
1513 install fast tracepoints with threads running. This relies on
1514 the agent's atomic write support. */
405f8e94
SS
1515 if (orig_size == 4)
1516 {
1517 /* Create a trampoline. */
1518 *trampoline_size = sizeof (jump_insn);
1519 if (!claim_trampoline_space (*trampoline_size, trampoline))
1520 {
1521 /* No trampoline space available. */
1522 strcpy (err,
1523 "E.Cannot allocate trampoline space needed for fast "
1524 "tracepoints on 4-byte instructions.");
1525 return 1;
1526 }
1527
1528 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1529 memcpy (buf, jump_insn, sizeof (jump_insn));
1530 memcpy (buf + 1, &offset, 4);
4196ab2a 1531 target_write_memory (*trampoline, buf, sizeof (jump_insn));
405f8e94
SS
1532
1533 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1534 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1535 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1536 memcpy (buf + 2, &offset, 2);
1537 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1538 *jjump_pad_insn_size = sizeof (small_jump_insn);
1539 }
1540 else
1541 {
1542 /* Else use a 32-bit relative jump instruction. */
1543 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1544 memcpy (buf, jump_insn, sizeof (jump_insn));
1545 memcpy (buf + 1, &offset, 4);
1546 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1547 *jjump_pad_insn_size = sizeof (jump_insn);
1548 }
fa593d66
PA
1549
1550 /* Return the end address of our pad. */
1551 *jump_entry = buildaddr;
1552
1553 return 0;
1554}
1555
809a0c35
TBA
1556bool
1557x86_target::supports_fast_tracepoints ()
1558{
1559 return true;
1560}
1561
1562int
1563x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1564 CORE_ADDR tpaddr,
1565 CORE_ADDR collector,
1566 CORE_ADDR lockaddr,
1567 ULONGEST orig_size,
1568 CORE_ADDR *jump_entry,
1569 CORE_ADDR *trampoline,
1570 ULONGEST *trampoline_size,
1571 unsigned char *jjump_pad_insn,
1572 ULONGEST *jjump_pad_insn_size,
1573 CORE_ADDR *adjusted_insn_addr,
1574 CORE_ADDR *adjusted_insn_addr_end,
1575 char *err)
fa593d66
PA
1576{
1577#ifdef __x86_64__
3aee8918 1578 if (is_64bit_tdesc ())
fa593d66
PA
1579 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1580 collector, lockaddr,
1581 orig_size, jump_entry,
405f8e94 1582 trampoline, trampoline_size,
fa593d66
PA
1583 jjump_pad_insn,
1584 jjump_pad_insn_size,
1585 adjusted_insn_addr,
405f8e94
SS
1586 adjusted_insn_addr_end,
1587 err);
fa593d66
PA
1588#endif
1589
1590 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1591 collector, lockaddr,
1592 orig_size, jump_entry,
405f8e94 1593 trampoline, trampoline_size,
fa593d66
PA
1594 jjump_pad_insn,
1595 jjump_pad_insn_size,
1596 adjusted_insn_addr,
405f8e94
SS
1597 adjusted_insn_addr_end,
1598 err);
1599}
1600
1601/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1602 architectures. */
1603
809a0c35
TBA
1604int
1605x86_target::get_min_fast_tracepoint_insn_len ()
405f8e94
SS
1606{
1607 static int warned_about_fast_tracepoints = 0;
1608
1609#ifdef __x86_64__
1610 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1611 used for fast tracepoints. */
3aee8918 1612 if (is_64bit_tdesc ())
405f8e94
SS
1613 return 5;
1614#endif
1615
58b4daa5 1616 if (agent_loaded_p ())
405f8e94
SS
1617 {
1618 char errbuf[IPA_BUFSIZ];
1619
1620 errbuf[0] = '\0';
1621
1622 /* On x86, if trampolines are available, then 4-byte jump instructions
1623 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1624 with a 4-byte offset are used instead. */
1625 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1626 return 4;
1627 else
1628 {
1629 /* GDB has no channel to explain to user why a shorter fast
1630 tracepoint is not possible, but at least make GDBserver
1631 mention that something has gone awry. */
1632 if (!warned_about_fast_tracepoints)
1633 {
422186a9 1634 warning ("4-byte fast tracepoints not available; %s", errbuf);
405f8e94
SS
1635 warned_about_fast_tracepoints = 1;
1636 }
1637 return 5;
1638 }
1639 }
1640 else
1641 {
1642 /* Indicate that the minimum length is currently unknown since the IPA
1643 has not loaded yet. */
1644 return 0;
1645 }
fa593d66
PA
1646}
1647
6a271cae
PA
1648static void
1649add_insns (unsigned char *start, int len)
1650{
1651 CORE_ADDR buildaddr = current_insn_ptr;
1652
1653 if (debug_threads)
87ce2a04
DE
1654 debug_printf ("Adding %d bytes of insn at %s\n",
1655 len, paddress (buildaddr));
6a271cae
PA
1656
1657 append_insns (&buildaddr, len, start);
1658 current_insn_ptr = buildaddr;
1659}
1660
6a271cae
PA
1661/* Our general strategy for emitting code is to avoid specifying raw
1662 bytes whenever possible, and instead copy a block of inline asm
1663 that is embedded in the function. This is a little messy, because
1664 we need to keep the compiler from discarding what looks like dead
1665 code, plus suppress various warnings. */
1666
9e4344e5
PA
1667#define EMIT_ASM(NAME, INSNS) \
1668 do \
1669 { \
1670 extern unsigned char start_ ## NAME, end_ ## NAME; \
1671 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1672 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1673 "\t" "start_" #NAME ":" \
1674 "\t" INSNS "\n" \
1675 "\t" "end_" #NAME ":"); \
1676 } while (0)
6a271cae
PA
1677
1678#ifdef __x86_64__
1679
1680#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1681 do \
1682 { \
1683 extern unsigned char start_ ## NAME, end_ ## NAME; \
1684 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1685 __asm__ (".code32\n" \
1686 "\t" "jmp end_" #NAME "\n" \
1687 "\t" "start_" #NAME ":\n" \
1688 "\t" INSNS "\n" \
1689 "\t" "end_" #NAME ":\n" \
1690 ".code64\n"); \
1691 } while (0)
6a271cae
PA
1692
1693#else
1694
1695#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1696
1697#endif
1698
1699#ifdef __x86_64__
1700
1701static void
1702amd64_emit_prologue (void)
1703{
1704 EMIT_ASM (amd64_prologue,
1705 "pushq %rbp\n\t"
1706 "movq %rsp,%rbp\n\t"
1707 "sub $0x20,%rsp\n\t"
1708 "movq %rdi,-8(%rbp)\n\t"
1709 "movq %rsi,-16(%rbp)");
1710}
1711
1712
1713static void
1714amd64_emit_epilogue (void)
1715{
1716 EMIT_ASM (amd64_epilogue,
1717 "movq -16(%rbp),%rdi\n\t"
1718 "movq %rax,(%rdi)\n\t"
1719 "xor %rax,%rax\n\t"
1720 "leave\n\t"
1721 "ret");
1722}
1723
1724static void
1725amd64_emit_add (void)
1726{
1727 EMIT_ASM (amd64_add,
1728 "add (%rsp),%rax\n\t"
1729 "lea 0x8(%rsp),%rsp");
1730}
1731
1732static void
1733amd64_emit_sub (void)
1734{
1735 EMIT_ASM (amd64_sub,
1736 "sub %rax,(%rsp)\n\t"
1737 "pop %rax");
1738}
1739
1740static void
1741amd64_emit_mul (void)
1742{
1743 emit_error = 1;
1744}
1745
1746static void
1747amd64_emit_lsh (void)
1748{
1749 emit_error = 1;
1750}
1751
1752static void
1753amd64_emit_rsh_signed (void)
1754{
1755 emit_error = 1;
1756}
1757
1758static void
1759amd64_emit_rsh_unsigned (void)
1760{
1761 emit_error = 1;
1762}
1763
1764static void
1765amd64_emit_ext (int arg)
1766{
1767 switch (arg)
1768 {
1769 case 8:
1770 EMIT_ASM (amd64_ext_8,
1771 "cbtw\n\t"
1772 "cwtl\n\t"
1773 "cltq");
1774 break;
1775 case 16:
1776 EMIT_ASM (amd64_ext_16,
1777 "cwtl\n\t"
1778 "cltq");
1779 break;
1780 case 32:
1781 EMIT_ASM (amd64_ext_32,
1782 "cltq");
1783 break;
1784 default:
1785 emit_error = 1;
1786 }
1787}
1788
1789static void
1790amd64_emit_log_not (void)
1791{
1792 EMIT_ASM (amd64_log_not,
1793 "test %rax,%rax\n\t"
1794 "sete %cl\n\t"
1795 "movzbq %cl,%rax");
1796}
1797
1798static void
1799amd64_emit_bit_and (void)
1800{
1801 EMIT_ASM (amd64_and,
1802 "and (%rsp),%rax\n\t"
1803 "lea 0x8(%rsp),%rsp");
1804}
1805
1806static void
1807amd64_emit_bit_or (void)
1808{
1809 EMIT_ASM (amd64_or,
1810 "or (%rsp),%rax\n\t"
1811 "lea 0x8(%rsp),%rsp");
1812}
1813
1814static void
1815amd64_emit_bit_xor (void)
1816{
1817 EMIT_ASM (amd64_xor,
1818 "xor (%rsp),%rax\n\t"
1819 "lea 0x8(%rsp),%rsp");
1820}
1821
1822static void
1823amd64_emit_bit_not (void)
1824{
1825 EMIT_ASM (amd64_bit_not,
1826 "xorq $0xffffffffffffffff,%rax");
1827}
1828
1829static void
1830amd64_emit_equal (void)
1831{
1832 EMIT_ASM (amd64_equal,
1833 "cmp %rax,(%rsp)\n\t"
1834 "je .Lamd64_equal_true\n\t"
1835 "xor %rax,%rax\n\t"
1836 "jmp .Lamd64_equal_end\n\t"
1837 ".Lamd64_equal_true:\n\t"
1838 "mov $0x1,%rax\n\t"
1839 ".Lamd64_equal_end:\n\t"
1840 "lea 0x8(%rsp),%rsp");
1841}
1842
1843static void
1844amd64_emit_less_signed (void)
1845{
1846 EMIT_ASM (amd64_less_signed,
1847 "cmp %rax,(%rsp)\n\t"
1848 "jl .Lamd64_less_signed_true\n\t"
1849 "xor %rax,%rax\n\t"
1850 "jmp .Lamd64_less_signed_end\n\t"
1851 ".Lamd64_less_signed_true:\n\t"
1852 "mov $1,%rax\n\t"
1853 ".Lamd64_less_signed_end:\n\t"
1854 "lea 0x8(%rsp),%rsp");
1855}
1856
1857static void
1858amd64_emit_less_unsigned (void)
1859{
1860 EMIT_ASM (amd64_less_unsigned,
1861 "cmp %rax,(%rsp)\n\t"
1862 "jb .Lamd64_less_unsigned_true\n\t"
1863 "xor %rax,%rax\n\t"
1864 "jmp .Lamd64_less_unsigned_end\n\t"
1865 ".Lamd64_less_unsigned_true:\n\t"
1866 "mov $1,%rax\n\t"
1867 ".Lamd64_less_unsigned_end:\n\t"
1868 "lea 0x8(%rsp),%rsp");
1869}
1870
1871static void
1872amd64_emit_ref (int size)
1873{
1874 switch (size)
1875 {
1876 case 1:
1877 EMIT_ASM (amd64_ref1,
1878 "movb (%rax),%al");
1879 break;
1880 case 2:
1881 EMIT_ASM (amd64_ref2,
1882 "movw (%rax),%ax");
1883 break;
1884 case 4:
1885 EMIT_ASM (amd64_ref4,
1886 "movl (%rax),%eax");
1887 break;
1888 case 8:
1889 EMIT_ASM (amd64_ref8,
1890 "movq (%rax),%rax");
1891 break;
1892 }
1893}
1894
1895static void
1896amd64_emit_if_goto (int *offset_p, int *size_p)
1897{
1898 EMIT_ASM (amd64_if_goto,
1899 "mov %rax,%rcx\n\t"
1900 "pop %rax\n\t"
1901 "cmp $0,%rcx\n\t"
1902 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1903 if (offset_p)
1904 *offset_p = 10;
1905 if (size_p)
1906 *size_p = 4;
1907}
1908
1909static void
1910amd64_emit_goto (int *offset_p, int *size_p)
1911{
1912 EMIT_ASM (amd64_goto,
1913 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1914 if (offset_p)
1915 *offset_p = 1;
1916 if (size_p)
1917 *size_p = 4;
1918}
1919
1920static void
1921amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1922{
1923 int diff = (to - (from + size));
1924 unsigned char buf[sizeof (int)];
1925
1926 if (size != 4)
1927 {
1928 emit_error = 1;
1929 return;
1930 }
1931
1932 memcpy (buf, &diff, sizeof (int));
4196ab2a 1933 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
1934}
1935
1936static void
4e29fb54 1937amd64_emit_const (LONGEST num)
6a271cae
PA
1938{
1939 unsigned char buf[16];
1940 int i;
1941 CORE_ADDR buildaddr = current_insn_ptr;
1942
1943 i = 0;
1944 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 1945 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
1946 i += 8;
1947 append_insns (&buildaddr, i, buf);
1948 current_insn_ptr = buildaddr;
1949}
1950
1951static void
1952amd64_emit_call (CORE_ADDR fn)
1953{
1954 unsigned char buf[16];
1955 int i;
1956 CORE_ADDR buildaddr;
4e29fb54 1957 LONGEST offset64;
6a271cae
PA
1958
1959 /* The destination function being in the shared library, may be
1960 >31-bits away off the compiled code pad. */
1961
1962 buildaddr = current_insn_ptr;
1963
1964 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1965
1966 i = 0;
1967
1968 if (offset64 > INT_MAX || offset64 < INT_MIN)
1969 {
1970 /* Offset is too large for a call. Use callq, but that requires
1971 a register, so avoid it if possible. Use r10, since it is
1972 call-clobbered, we don't have to push/pop it. */
1973 buf[i++] = 0x48; /* mov $fn,%r10 */
1974 buf[i++] = 0xba;
1975 memcpy (buf + i, &fn, 8);
1976 i += 8;
1977 buf[i++] = 0xff; /* callq *%r10 */
1978 buf[i++] = 0xd2;
1979 }
1980 else
1981 {
1982 int offset32 = offset64; /* we know we can't overflow here. */
ed036b40
PA
1983
1984 buf[i++] = 0xe8; /* call <reladdr> */
6a271cae
PA
1985 memcpy (buf + i, &offset32, 4);
1986 i += 4;
1987 }
1988
1989 append_insns (&buildaddr, i, buf);
1990 current_insn_ptr = buildaddr;
1991}
1992
1993static void
1994amd64_emit_reg (int reg)
1995{
1996 unsigned char buf[16];
1997 int i;
1998 CORE_ADDR buildaddr;
1999
2000 /* Assume raw_regs is still in %rdi. */
2001 buildaddr = current_insn_ptr;
2002 i = 0;
2003 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 2004 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2005 i += 4;
2006 append_insns (&buildaddr, i, buf);
2007 current_insn_ptr = buildaddr;
2008 amd64_emit_call (get_raw_reg_func_addr ());
2009}
2010
2011static void
2012amd64_emit_pop (void)
2013{
2014 EMIT_ASM (amd64_pop,
2015 "pop %rax");
2016}
2017
2018static void
2019amd64_emit_stack_flush (void)
2020{
2021 EMIT_ASM (amd64_stack_flush,
2022 "push %rax");
2023}
2024
2025static void
2026amd64_emit_zero_ext (int arg)
2027{
2028 switch (arg)
2029 {
2030 case 8:
2031 EMIT_ASM (amd64_zero_ext_8,
2032 "and $0xff,%rax");
2033 break;
2034 case 16:
2035 EMIT_ASM (amd64_zero_ext_16,
2036 "and $0xffff,%rax");
2037 break;
2038 case 32:
2039 EMIT_ASM (amd64_zero_ext_32,
2040 "mov $0xffffffff,%rcx\n\t"
2041 "and %rcx,%rax");
2042 break;
2043 default:
2044 emit_error = 1;
2045 }
2046}
2047
2048static void
2049amd64_emit_swap (void)
2050{
2051 EMIT_ASM (amd64_swap,
2052 "mov %rax,%rcx\n\t"
2053 "pop %rax\n\t"
2054 "push %rcx");
2055}
2056
2057static void
2058amd64_emit_stack_adjust (int n)
2059{
2060 unsigned char buf[16];
2061 int i;
2062 CORE_ADDR buildaddr = current_insn_ptr;
2063
2064 i = 0;
2065 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2066 buf[i++] = 0x8d;
2067 buf[i++] = 0x64;
2068 buf[i++] = 0x24;
2069 /* This only handles adjustments up to 16, but we don't expect any more. */
2070 buf[i++] = n * 8;
2071 append_insns (&buildaddr, i, buf);
2072 current_insn_ptr = buildaddr;
2073}
2074
2075/* FN's prototype is `LONGEST(*fn)(int)'. */
2076
2077static void
2078amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2079{
2080 unsigned char buf[16];
2081 int i;
2082 CORE_ADDR buildaddr;
2083
2084 buildaddr = current_insn_ptr;
2085 i = 0;
2086 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2087 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2088 i += 4;
2089 append_insns (&buildaddr, i, buf);
2090 current_insn_ptr = buildaddr;
2091 amd64_emit_call (fn);
2092}
2093
4e29fb54 2094/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2095
2096static void
2097amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2098{
2099 unsigned char buf[16];
2100 int i;
2101 CORE_ADDR buildaddr;
2102
2103 buildaddr = current_insn_ptr;
2104 i = 0;
2105 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2106 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2107 i += 4;
2108 append_insns (&buildaddr, i, buf);
2109 current_insn_ptr = buildaddr;
2110 EMIT_ASM (amd64_void_call_2_a,
2111 /* Save away a copy of the stack top. */
2112 "push %rax\n\t"
2113 /* Also pass top as the second argument. */
2114 "mov %rax,%rsi");
2115 amd64_emit_call (fn);
2116 EMIT_ASM (amd64_void_call_2_b,
2117 /* Restore the stack top, %rax may have been trashed. */
2118 "pop %rax");
2119}
2120
df4a0200 2121static void
6b9801d4
SS
2122amd64_emit_eq_goto (int *offset_p, int *size_p)
2123{
2124 EMIT_ASM (amd64_eq,
2125 "cmp %rax,(%rsp)\n\t"
2126 "jne .Lamd64_eq_fallthru\n\t"
2127 "lea 0x8(%rsp),%rsp\n\t"
2128 "pop %rax\n\t"
2129 /* jmp, but don't trust the assembler to choose the right jump */
2130 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2131 ".Lamd64_eq_fallthru:\n\t"
2132 "lea 0x8(%rsp),%rsp\n\t"
2133 "pop %rax");
2134
2135 if (offset_p)
2136 *offset_p = 13;
2137 if (size_p)
2138 *size_p = 4;
2139}
2140
df4a0200 2141static void
6b9801d4
SS
2142amd64_emit_ne_goto (int *offset_p, int *size_p)
2143{
2144 EMIT_ASM (amd64_ne,
2145 "cmp %rax,(%rsp)\n\t"
2146 "je .Lamd64_ne_fallthru\n\t"
2147 "lea 0x8(%rsp),%rsp\n\t"
2148 "pop %rax\n\t"
2149 /* jmp, but don't trust the assembler to choose the right jump */
2150 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2151 ".Lamd64_ne_fallthru:\n\t"
2152 "lea 0x8(%rsp),%rsp\n\t"
2153 "pop %rax");
2154
2155 if (offset_p)
2156 *offset_p = 13;
2157 if (size_p)
2158 *size_p = 4;
2159}
2160
df4a0200 2161static void
6b9801d4
SS
2162amd64_emit_lt_goto (int *offset_p, int *size_p)
2163{
2164 EMIT_ASM (amd64_lt,
2165 "cmp %rax,(%rsp)\n\t"
2166 "jnl .Lamd64_lt_fallthru\n\t"
2167 "lea 0x8(%rsp),%rsp\n\t"
2168 "pop %rax\n\t"
2169 /* jmp, but don't trust the assembler to choose the right jump */
2170 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2171 ".Lamd64_lt_fallthru:\n\t"
2172 "lea 0x8(%rsp),%rsp\n\t"
2173 "pop %rax");
2174
2175 if (offset_p)
2176 *offset_p = 13;
2177 if (size_p)
2178 *size_p = 4;
2179}
2180
df4a0200 2181static void
6b9801d4
SS
2182amd64_emit_le_goto (int *offset_p, int *size_p)
2183{
2184 EMIT_ASM (amd64_le,
2185 "cmp %rax,(%rsp)\n\t"
2186 "jnle .Lamd64_le_fallthru\n\t"
2187 "lea 0x8(%rsp),%rsp\n\t"
2188 "pop %rax\n\t"
2189 /* jmp, but don't trust the assembler to choose the right jump */
2190 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2191 ".Lamd64_le_fallthru:\n\t"
2192 "lea 0x8(%rsp),%rsp\n\t"
2193 "pop %rax");
2194
2195 if (offset_p)
2196 *offset_p = 13;
2197 if (size_p)
2198 *size_p = 4;
2199}
2200
df4a0200 2201static void
6b9801d4
SS
2202amd64_emit_gt_goto (int *offset_p, int *size_p)
2203{
2204 EMIT_ASM (amd64_gt,
2205 "cmp %rax,(%rsp)\n\t"
2206 "jng .Lamd64_gt_fallthru\n\t"
2207 "lea 0x8(%rsp),%rsp\n\t"
2208 "pop %rax\n\t"
2209 /* jmp, but don't trust the assembler to choose the right jump */
2210 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2211 ".Lamd64_gt_fallthru:\n\t"
2212 "lea 0x8(%rsp),%rsp\n\t"
2213 "pop %rax");
2214
2215 if (offset_p)
2216 *offset_p = 13;
2217 if (size_p)
2218 *size_p = 4;
2219}
2220
df4a0200 2221static void
6b9801d4
SS
2222amd64_emit_ge_goto (int *offset_p, int *size_p)
2223{
2224 EMIT_ASM (amd64_ge,
2225 "cmp %rax,(%rsp)\n\t"
2226 "jnge .Lamd64_ge_fallthru\n\t"
2227 ".Lamd64_ge_jump:\n\t"
2228 "lea 0x8(%rsp),%rsp\n\t"
2229 "pop %rax\n\t"
2230 /* jmp, but don't trust the assembler to choose the right jump */
2231 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2232 ".Lamd64_ge_fallthru:\n\t"
2233 "lea 0x8(%rsp),%rsp\n\t"
2234 "pop %rax");
2235
2236 if (offset_p)
2237 *offset_p = 13;
2238 if (size_p)
2239 *size_p = 4;
2240}
2241
6a271cae
PA
2242struct emit_ops amd64_emit_ops =
2243 {
2244 amd64_emit_prologue,
2245 amd64_emit_epilogue,
2246 amd64_emit_add,
2247 amd64_emit_sub,
2248 amd64_emit_mul,
2249 amd64_emit_lsh,
2250 amd64_emit_rsh_signed,
2251 amd64_emit_rsh_unsigned,
2252 amd64_emit_ext,
2253 amd64_emit_log_not,
2254 amd64_emit_bit_and,
2255 amd64_emit_bit_or,
2256 amd64_emit_bit_xor,
2257 amd64_emit_bit_not,
2258 amd64_emit_equal,
2259 amd64_emit_less_signed,
2260 amd64_emit_less_unsigned,
2261 amd64_emit_ref,
2262 amd64_emit_if_goto,
2263 amd64_emit_goto,
2264 amd64_write_goto_address,
2265 amd64_emit_const,
2266 amd64_emit_call,
2267 amd64_emit_reg,
2268 amd64_emit_pop,
2269 amd64_emit_stack_flush,
2270 amd64_emit_zero_ext,
2271 amd64_emit_swap,
2272 amd64_emit_stack_adjust,
2273 amd64_emit_int_call_1,
6b9801d4
SS
2274 amd64_emit_void_call_2,
2275 amd64_emit_eq_goto,
2276 amd64_emit_ne_goto,
2277 amd64_emit_lt_goto,
2278 amd64_emit_le_goto,
2279 amd64_emit_gt_goto,
2280 amd64_emit_ge_goto
6a271cae
PA
2281 };
2282
2283#endif /* __x86_64__ */
2284
2285static void
2286i386_emit_prologue (void)
2287{
2288 EMIT_ASM32 (i386_prologue,
2289 "push %ebp\n\t"
bf15cbda
SS
2290 "mov %esp,%ebp\n\t"
2291 "push %ebx");
6a271cae
PA
2292 /* At this point, the raw regs base address is at 8(%ebp), and the
2293 value pointer is at 12(%ebp). */
2294}
2295
2296static void
2297i386_emit_epilogue (void)
2298{
2299 EMIT_ASM32 (i386_epilogue,
2300 "mov 12(%ebp),%ecx\n\t"
2301 "mov %eax,(%ecx)\n\t"
2302 "mov %ebx,0x4(%ecx)\n\t"
2303 "xor %eax,%eax\n\t"
bf15cbda 2304 "pop %ebx\n\t"
6a271cae
PA
2305 "pop %ebp\n\t"
2306 "ret");
2307}
2308
2309static void
2310i386_emit_add (void)
2311{
2312 EMIT_ASM32 (i386_add,
2313 "add (%esp),%eax\n\t"
2314 "adc 0x4(%esp),%ebx\n\t"
2315 "lea 0x8(%esp),%esp");
2316}
2317
2318static void
2319i386_emit_sub (void)
2320{
2321 EMIT_ASM32 (i386_sub,
2322 "subl %eax,(%esp)\n\t"
2323 "sbbl %ebx,4(%esp)\n\t"
2324 "pop %eax\n\t"
2325 "pop %ebx\n\t");
2326}
2327
2328static void
2329i386_emit_mul (void)
2330{
2331 emit_error = 1;
2332}
2333
2334static void
2335i386_emit_lsh (void)
2336{
2337 emit_error = 1;
2338}
2339
2340static void
2341i386_emit_rsh_signed (void)
2342{
2343 emit_error = 1;
2344}
2345
2346static void
2347i386_emit_rsh_unsigned (void)
2348{
2349 emit_error = 1;
2350}
2351
2352static void
2353i386_emit_ext (int arg)
2354{
2355 switch (arg)
2356 {
2357 case 8:
2358 EMIT_ASM32 (i386_ext_8,
2359 "cbtw\n\t"
2360 "cwtl\n\t"
2361 "movl %eax,%ebx\n\t"
2362 "sarl $31,%ebx");
2363 break;
2364 case 16:
2365 EMIT_ASM32 (i386_ext_16,
2366 "cwtl\n\t"
2367 "movl %eax,%ebx\n\t"
2368 "sarl $31,%ebx");
2369 break;
2370 case 32:
2371 EMIT_ASM32 (i386_ext_32,
2372 "movl %eax,%ebx\n\t"
2373 "sarl $31,%ebx");
2374 break;
2375 default:
2376 emit_error = 1;
2377 }
2378}
2379
2380static void
2381i386_emit_log_not (void)
2382{
2383 EMIT_ASM32 (i386_log_not,
2384 "or %ebx,%eax\n\t"
2385 "test %eax,%eax\n\t"
2386 "sete %cl\n\t"
2387 "xor %ebx,%ebx\n\t"
2388 "movzbl %cl,%eax");
2389}
2390
2391static void
2392i386_emit_bit_and (void)
2393{
2394 EMIT_ASM32 (i386_and,
2395 "and (%esp),%eax\n\t"
2396 "and 0x4(%esp),%ebx\n\t"
2397 "lea 0x8(%esp),%esp");
2398}
2399
2400static void
2401i386_emit_bit_or (void)
2402{
2403 EMIT_ASM32 (i386_or,
2404 "or (%esp),%eax\n\t"
2405 "or 0x4(%esp),%ebx\n\t"
2406 "lea 0x8(%esp),%esp");
2407}
2408
2409static void
2410i386_emit_bit_xor (void)
2411{
2412 EMIT_ASM32 (i386_xor,
2413 "xor (%esp),%eax\n\t"
2414 "xor 0x4(%esp),%ebx\n\t"
2415 "lea 0x8(%esp),%esp");
2416}
2417
2418static void
2419i386_emit_bit_not (void)
2420{
2421 EMIT_ASM32 (i386_bit_not,
2422 "xor $0xffffffff,%eax\n\t"
2423 "xor $0xffffffff,%ebx\n\t");
2424}
2425
2426static void
2427i386_emit_equal (void)
2428{
2429 EMIT_ASM32 (i386_equal,
2430 "cmpl %ebx,4(%esp)\n\t"
2431 "jne .Li386_equal_false\n\t"
2432 "cmpl %eax,(%esp)\n\t"
2433 "je .Li386_equal_true\n\t"
2434 ".Li386_equal_false:\n\t"
2435 "xor %eax,%eax\n\t"
2436 "jmp .Li386_equal_end\n\t"
2437 ".Li386_equal_true:\n\t"
2438 "mov $1,%eax\n\t"
2439 ".Li386_equal_end:\n\t"
2440 "xor %ebx,%ebx\n\t"
2441 "lea 0x8(%esp),%esp");
2442}
2443
2444static void
2445i386_emit_less_signed (void)
2446{
2447 EMIT_ASM32 (i386_less_signed,
2448 "cmpl %ebx,4(%esp)\n\t"
2449 "jl .Li386_less_signed_true\n\t"
2450 "jne .Li386_less_signed_false\n\t"
2451 "cmpl %eax,(%esp)\n\t"
2452 "jl .Li386_less_signed_true\n\t"
2453 ".Li386_less_signed_false:\n\t"
2454 "xor %eax,%eax\n\t"
2455 "jmp .Li386_less_signed_end\n\t"
2456 ".Li386_less_signed_true:\n\t"
2457 "mov $1,%eax\n\t"
2458 ".Li386_less_signed_end:\n\t"
2459 "xor %ebx,%ebx\n\t"
2460 "lea 0x8(%esp),%esp");
2461}
2462
2463static void
2464i386_emit_less_unsigned (void)
2465{
2466 EMIT_ASM32 (i386_less_unsigned,
2467 "cmpl %ebx,4(%esp)\n\t"
2468 "jb .Li386_less_unsigned_true\n\t"
2469 "jne .Li386_less_unsigned_false\n\t"
2470 "cmpl %eax,(%esp)\n\t"
2471 "jb .Li386_less_unsigned_true\n\t"
2472 ".Li386_less_unsigned_false:\n\t"
2473 "xor %eax,%eax\n\t"
2474 "jmp .Li386_less_unsigned_end\n\t"
2475 ".Li386_less_unsigned_true:\n\t"
2476 "mov $1,%eax\n\t"
2477 ".Li386_less_unsigned_end:\n\t"
2478 "xor %ebx,%ebx\n\t"
2479 "lea 0x8(%esp),%esp");
2480}
2481
2482static void
2483i386_emit_ref (int size)
2484{
2485 switch (size)
2486 {
2487 case 1:
2488 EMIT_ASM32 (i386_ref1,
2489 "movb (%eax),%al");
2490 break;
2491 case 2:
2492 EMIT_ASM32 (i386_ref2,
2493 "movw (%eax),%ax");
2494 break;
2495 case 4:
2496 EMIT_ASM32 (i386_ref4,
2497 "movl (%eax),%eax");
2498 break;
2499 case 8:
2500 EMIT_ASM32 (i386_ref8,
2501 "movl 4(%eax),%ebx\n\t"
2502 "movl (%eax),%eax");
2503 break;
2504 }
2505}
2506
2507static void
2508i386_emit_if_goto (int *offset_p, int *size_p)
2509{
2510 EMIT_ASM32 (i386_if_goto,
2511 "mov %eax,%ecx\n\t"
2512 "or %ebx,%ecx\n\t"
2513 "pop %eax\n\t"
2514 "pop %ebx\n\t"
2515 "cmpl $0,%ecx\n\t"
2516 /* Don't trust the assembler to choose the right jump */
2517 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2518
2519 if (offset_p)
2520 *offset_p = 11; /* be sure that this matches the sequence above */
2521 if (size_p)
2522 *size_p = 4;
2523}
2524
2525static void
2526i386_emit_goto (int *offset_p, int *size_p)
2527{
2528 EMIT_ASM32 (i386_goto,
2529 /* Don't trust the assembler to choose the right jump */
2530 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2531 if (offset_p)
2532 *offset_p = 1;
2533 if (size_p)
2534 *size_p = 4;
2535}
2536
2537static void
2538i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2539{
2540 int diff = (to - (from + size));
2541 unsigned char buf[sizeof (int)];
2542
2543 /* We're only doing 4-byte sizes at the moment. */
2544 if (size != 4)
2545 {
2546 emit_error = 1;
2547 return;
2548 }
2549
2550 memcpy (buf, &diff, sizeof (int));
4196ab2a 2551 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
2552}
2553
2554static void
4e29fb54 2555i386_emit_const (LONGEST num)
6a271cae
PA
2556{
2557 unsigned char buf[16];
b00ad6ff 2558 int i, hi, lo;
6a271cae
PA
2559 CORE_ADDR buildaddr = current_insn_ptr;
2560
2561 i = 0;
2562 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2563 lo = num & 0xffffffff;
2564 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2565 i += 4;
2566 hi = ((num >> 32) & 0xffffffff);
2567 if (hi)
2568 {
2569 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2570 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2571 i += 4;
2572 }
2573 else
2574 {
2575 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2576 }
2577 append_insns (&buildaddr, i, buf);
2578 current_insn_ptr = buildaddr;
2579}
2580
2581static void
2582i386_emit_call (CORE_ADDR fn)
2583{
2584 unsigned char buf[16];
2585 int i, offset;
2586 CORE_ADDR buildaddr;
2587
2588 buildaddr = current_insn_ptr;
2589 i = 0;
2590 buf[i++] = 0xe8; /* call <reladdr> */
2591 offset = ((int) fn) - (buildaddr + 5);
2592 memcpy (buf + 1, &offset, 4);
2593 append_insns (&buildaddr, 5, buf);
2594 current_insn_ptr = buildaddr;
2595}
2596
2597static void
2598i386_emit_reg (int reg)
2599{
2600 unsigned char buf[16];
2601 int i;
2602 CORE_ADDR buildaddr;
2603
2604 EMIT_ASM32 (i386_reg_a,
2605 "sub $0x8,%esp");
2606 buildaddr = current_insn_ptr;
2607 i = 0;
2608 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2609 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2610 i += 4;
2611 append_insns (&buildaddr, i, buf);
2612 current_insn_ptr = buildaddr;
2613 EMIT_ASM32 (i386_reg_b,
2614 "mov %eax,4(%esp)\n\t"
2615 "mov 8(%ebp),%eax\n\t"
2616 "mov %eax,(%esp)");
2617 i386_emit_call (get_raw_reg_func_addr ());
2618 EMIT_ASM32 (i386_reg_c,
2619 "xor %ebx,%ebx\n\t"
2620 "lea 0x8(%esp),%esp");
2621}
2622
2623static void
2624i386_emit_pop (void)
2625{
2626 EMIT_ASM32 (i386_pop,
2627 "pop %eax\n\t"
2628 "pop %ebx");
2629}
2630
2631static void
2632i386_emit_stack_flush (void)
2633{
2634 EMIT_ASM32 (i386_stack_flush,
2635 "push %ebx\n\t"
2636 "push %eax");
2637}
2638
2639static void
2640i386_emit_zero_ext (int arg)
2641{
2642 switch (arg)
2643 {
2644 case 8:
2645 EMIT_ASM32 (i386_zero_ext_8,
2646 "and $0xff,%eax\n\t"
2647 "xor %ebx,%ebx");
2648 break;
2649 case 16:
2650 EMIT_ASM32 (i386_zero_ext_16,
2651 "and $0xffff,%eax\n\t"
2652 "xor %ebx,%ebx");
2653 break;
2654 case 32:
2655 EMIT_ASM32 (i386_zero_ext_32,
2656 "xor %ebx,%ebx");
2657 break;
2658 default:
2659 emit_error = 1;
2660 }
2661}
2662
2663static void
2664i386_emit_swap (void)
2665{
2666 EMIT_ASM32 (i386_swap,
2667 "mov %eax,%ecx\n\t"
2668 "mov %ebx,%edx\n\t"
2669 "pop %eax\n\t"
2670 "pop %ebx\n\t"
2671 "push %edx\n\t"
2672 "push %ecx");
2673}
2674
2675static void
2676i386_emit_stack_adjust (int n)
2677{
2678 unsigned char buf[16];
2679 int i;
2680 CORE_ADDR buildaddr = current_insn_ptr;
2681
2682 i = 0;
2683 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2684 buf[i++] = 0x64;
2685 buf[i++] = 0x24;
2686 buf[i++] = n * 8;
2687 append_insns (&buildaddr, i, buf);
2688 current_insn_ptr = buildaddr;
2689}
2690
2691/* FN's prototype is `LONGEST(*fn)(int)'. */
2692
2693static void
2694i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2695{
2696 unsigned char buf[16];
2697 int i;
2698 CORE_ADDR buildaddr;
2699
2700 EMIT_ASM32 (i386_int_call_1_a,
2701 /* Reserve a bit of stack space. */
2702 "sub $0x8,%esp");
2703 /* Put the one argument on the stack. */
2704 buildaddr = current_insn_ptr;
2705 i = 0;
2706 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2707 buf[i++] = 0x04;
2708 buf[i++] = 0x24;
b00ad6ff 2709 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2710 i += 4;
2711 append_insns (&buildaddr, i, buf);
2712 current_insn_ptr = buildaddr;
2713 i386_emit_call (fn);
2714 EMIT_ASM32 (i386_int_call_1_c,
2715 "mov %edx,%ebx\n\t"
2716 "lea 0x8(%esp),%esp");
2717}
2718
4e29fb54 2719/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2720
2721static void
2722i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2723{
2724 unsigned char buf[16];
2725 int i;
2726 CORE_ADDR buildaddr;
2727
2728 EMIT_ASM32 (i386_void_call_2_a,
2729 /* Preserve %eax only; we don't have to worry about %ebx. */
2730 "push %eax\n\t"
2731 /* Reserve a bit of stack space for arguments. */
2732 "sub $0x10,%esp\n\t"
2733 /* Copy "top" to the second argument position. (Note that
2734 we can't assume function won't scribble on its
2735 arguments, so don't try to restore from this.) */
2736 "mov %eax,4(%esp)\n\t"
2737 "mov %ebx,8(%esp)");
2738 /* Put the first argument on the stack. */
2739 buildaddr = current_insn_ptr;
2740 i = 0;
2741 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2742 buf[i++] = 0x04;
2743 buf[i++] = 0x24;
b00ad6ff 2744 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2745 i += 4;
2746 append_insns (&buildaddr, i, buf);
2747 current_insn_ptr = buildaddr;
2748 i386_emit_call (fn);
2749 EMIT_ASM32 (i386_void_call_2_b,
2750 "lea 0x10(%esp),%esp\n\t"
2751 /* Restore original stack top. */
2752 "pop %eax");
2753}
2754
6b9801d4 2755
df4a0200 2756static void
6b9801d4
SS
2757i386_emit_eq_goto (int *offset_p, int *size_p)
2758{
2759 EMIT_ASM32 (eq,
2760 /* Check low half first, more likely to be decider */
2761 "cmpl %eax,(%esp)\n\t"
2762 "jne .Leq_fallthru\n\t"
2763 "cmpl %ebx,4(%esp)\n\t"
2764 "jne .Leq_fallthru\n\t"
2765 "lea 0x8(%esp),%esp\n\t"
2766 "pop %eax\n\t"
2767 "pop %ebx\n\t"
2768 /* jmp, but don't trust the assembler to choose the right jump */
2769 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2770 ".Leq_fallthru:\n\t"
2771 "lea 0x8(%esp),%esp\n\t"
2772 "pop %eax\n\t"
2773 "pop %ebx");
2774
2775 if (offset_p)
2776 *offset_p = 18;
2777 if (size_p)
2778 *size_p = 4;
2779}
2780
df4a0200 2781static void
6b9801d4
SS
2782i386_emit_ne_goto (int *offset_p, int *size_p)
2783{
2784 EMIT_ASM32 (ne,
2785 /* Check low half first, more likely to be decider */
2786 "cmpl %eax,(%esp)\n\t"
2787 "jne .Lne_jump\n\t"
2788 "cmpl %ebx,4(%esp)\n\t"
2789 "je .Lne_fallthru\n\t"
2790 ".Lne_jump:\n\t"
2791 "lea 0x8(%esp),%esp\n\t"
2792 "pop %eax\n\t"
2793 "pop %ebx\n\t"
2794 /* jmp, but don't trust the assembler to choose the right jump */
2795 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2796 ".Lne_fallthru:\n\t"
2797 "lea 0x8(%esp),%esp\n\t"
2798 "pop %eax\n\t"
2799 "pop %ebx");
2800
2801 if (offset_p)
2802 *offset_p = 18;
2803 if (size_p)
2804 *size_p = 4;
2805}
2806
df4a0200 2807static void
6b9801d4
SS
2808i386_emit_lt_goto (int *offset_p, int *size_p)
2809{
2810 EMIT_ASM32 (lt,
2811 "cmpl %ebx,4(%esp)\n\t"
2812 "jl .Llt_jump\n\t"
2813 "jne .Llt_fallthru\n\t"
2814 "cmpl %eax,(%esp)\n\t"
2815 "jnl .Llt_fallthru\n\t"
2816 ".Llt_jump:\n\t"
2817 "lea 0x8(%esp),%esp\n\t"
2818 "pop %eax\n\t"
2819 "pop %ebx\n\t"
2820 /* jmp, but don't trust the assembler to choose the right jump */
2821 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2822 ".Llt_fallthru:\n\t"
2823 "lea 0x8(%esp),%esp\n\t"
2824 "pop %eax\n\t"
2825 "pop %ebx");
2826
2827 if (offset_p)
2828 *offset_p = 20;
2829 if (size_p)
2830 *size_p = 4;
2831}
2832
df4a0200 2833static void
6b9801d4
SS
2834i386_emit_le_goto (int *offset_p, int *size_p)
2835{
2836 EMIT_ASM32 (le,
2837 "cmpl %ebx,4(%esp)\n\t"
2838 "jle .Lle_jump\n\t"
2839 "jne .Lle_fallthru\n\t"
2840 "cmpl %eax,(%esp)\n\t"
2841 "jnle .Lle_fallthru\n\t"
2842 ".Lle_jump:\n\t"
2843 "lea 0x8(%esp),%esp\n\t"
2844 "pop %eax\n\t"
2845 "pop %ebx\n\t"
2846 /* jmp, but don't trust the assembler to choose the right jump */
2847 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2848 ".Lle_fallthru:\n\t"
2849 "lea 0x8(%esp),%esp\n\t"
2850 "pop %eax\n\t"
2851 "pop %ebx");
2852
2853 if (offset_p)
2854 *offset_p = 20;
2855 if (size_p)
2856 *size_p = 4;
2857}
2858
df4a0200 2859static void
6b9801d4
SS
2860i386_emit_gt_goto (int *offset_p, int *size_p)
2861{
2862 EMIT_ASM32 (gt,
2863 "cmpl %ebx,4(%esp)\n\t"
2864 "jg .Lgt_jump\n\t"
2865 "jne .Lgt_fallthru\n\t"
2866 "cmpl %eax,(%esp)\n\t"
2867 "jng .Lgt_fallthru\n\t"
2868 ".Lgt_jump:\n\t"
2869 "lea 0x8(%esp),%esp\n\t"
2870 "pop %eax\n\t"
2871 "pop %ebx\n\t"
2872 /* jmp, but don't trust the assembler to choose the right jump */
2873 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2874 ".Lgt_fallthru:\n\t"
2875 "lea 0x8(%esp),%esp\n\t"
2876 "pop %eax\n\t"
2877 "pop %ebx");
2878
2879 if (offset_p)
2880 *offset_p = 20;
2881 if (size_p)
2882 *size_p = 4;
2883}
2884
df4a0200 2885static void
6b9801d4
SS
2886i386_emit_ge_goto (int *offset_p, int *size_p)
2887{
2888 EMIT_ASM32 (ge,
2889 "cmpl %ebx,4(%esp)\n\t"
2890 "jge .Lge_jump\n\t"
2891 "jne .Lge_fallthru\n\t"
2892 "cmpl %eax,(%esp)\n\t"
2893 "jnge .Lge_fallthru\n\t"
2894 ".Lge_jump:\n\t"
2895 "lea 0x8(%esp),%esp\n\t"
2896 "pop %eax\n\t"
2897 "pop %ebx\n\t"
2898 /* jmp, but don't trust the assembler to choose the right jump */
2899 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2900 ".Lge_fallthru:\n\t"
2901 "lea 0x8(%esp),%esp\n\t"
2902 "pop %eax\n\t"
2903 "pop %ebx");
2904
2905 if (offset_p)
2906 *offset_p = 20;
2907 if (size_p)
2908 *size_p = 4;
2909}
2910
6a271cae
PA
2911struct emit_ops i386_emit_ops =
2912 {
2913 i386_emit_prologue,
2914 i386_emit_epilogue,
2915 i386_emit_add,
2916 i386_emit_sub,
2917 i386_emit_mul,
2918 i386_emit_lsh,
2919 i386_emit_rsh_signed,
2920 i386_emit_rsh_unsigned,
2921 i386_emit_ext,
2922 i386_emit_log_not,
2923 i386_emit_bit_and,
2924 i386_emit_bit_or,
2925 i386_emit_bit_xor,
2926 i386_emit_bit_not,
2927 i386_emit_equal,
2928 i386_emit_less_signed,
2929 i386_emit_less_unsigned,
2930 i386_emit_ref,
2931 i386_emit_if_goto,
2932 i386_emit_goto,
2933 i386_write_goto_address,
2934 i386_emit_const,
2935 i386_emit_call,
2936 i386_emit_reg,
2937 i386_emit_pop,
2938 i386_emit_stack_flush,
2939 i386_emit_zero_ext,
2940 i386_emit_swap,
2941 i386_emit_stack_adjust,
2942 i386_emit_int_call_1,
6b9801d4
SS
2943 i386_emit_void_call_2,
2944 i386_emit_eq_goto,
2945 i386_emit_ne_goto,
2946 i386_emit_lt_goto,
2947 i386_emit_le_goto,
2948 i386_emit_gt_goto,
2949 i386_emit_ge_goto
6a271cae
PA
2950 };
2951
2952
ab64c999
TBA
2953emit_ops *
2954x86_target::emit_ops ()
6a271cae
PA
2955{
2956#ifdef __x86_64__
3aee8918 2957 if (is_64bit_tdesc ())
6a271cae
PA
2958 return &amd64_emit_ops;
2959 else
2960#endif
2961 return &i386_emit_ops;
2962}
2963
3ca4edb6 2964/* Implementation of target ops method "sw_breakpoint_from_kind". */
dd373349 2965
3ca4edb6
TBA
2966const gdb_byte *
2967x86_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349
AT
2968{
2969 *size = x86_breakpoint_len;
2970 return x86_breakpoint;
2971}
2972
9cfd8715
TBA
2973bool
2974x86_target::low_supports_range_stepping ()
c2d6af84 2975{
9cfd8715 2976 return true;
c2d6af84
PA
2977}
2978
fc5ecdb6
TBA
2979int
2980x86_target::get_ipa_tdesc_idx ()
ae91f625
MK
2981{
2982 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2983 const struct target_desc *tdesc = regcache->tdesc;
2984
2985#ifdef __x86_64__
b4570e4b 2986 return amd64_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2987#endif
2988
f49ff000 2989 if (tdesc == tdesc_i386_linux_no_xml)
ae91f625 2990 return X86_TDESC_SSE;
ae91f625 2991
f49ff000 2992 return i386_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2993}
2994
ef0478f6
TBA
2995/* The linux target ops object. */
2996
2997linux_process_target *the_linux_target = &the_x86_target;
2998
3aee8918
PA
2999void
3000initialize_low_arch (void)
3001{
3002 /* Initialize the Linux target descriptions. */
3003#ifdef __x86_64__
cc397f3a 3004 tdesc_amd64_linux_no_xml = allocate_target_description ();
b4570e4b
YQ
3005 copy_target_description (tdesc_amd64_linux_no_xml,
3006 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
3007 false));
3aee8918
PA
3008 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3009#endif
f49ff000 3010
cc397f3a 3011 tdesc_i386_linux_no_xml = allocate_target_description ();
f49ff000
YQ
3012 copy_target_description (tdesc_i386_linux_no_xml,
3013 i386_linux_read_description (X86_XSTATE_SSE_MASK));
3aee8918
PA
3014 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3015
3016 initialize_regsets_info (&x86_regsets_info);
3017}
This page took 1.295135 seconds and 4 git commands to generate.