gdb: fix vfork with multiple threads
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3666a048 3 Copyright (C) 2002-2021 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
d41f6d8e 20#include "server.h"
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "linux-low.h"
25#include "i387-fp.h"
df7e5265 26#include "x86-low.h"
268a13a5 27#include "gdbsupport/x86-xstate.h"
5826e159 28#include "nat/gdb_ptrace.h"
d0722149 29
93813b37
WT
30#ifdef __x86_64__
31#include "nat/amd64-linux-siginfo.h"
32#endif
33
d0722149 34#include "gdb_proc_service.h"
b5737fa9
PA
35/* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37#ifndef ELFMAG0
38#include "elf/common.h"
39#endif
40
268a13a5 41#include "gdbsupport/agent.h"
3aee8918 42#include "tdesc.h"
c144c7a0 43#include "tracepoint.h"
f699aaba 44#include "ax.h"
7b669087 45#include "nat/linux-nat.h"
4b134ca1 46#include "nat/x86-linux.h"
8e5d4070 47#include "nat/x86-linux-dregs.h"
ae91f625 48#include "linux-x86-tdesc.h"
a196ebeb 49
3aee8918 50#ifdef __x86_64__
51a948fd 51static target_desc_up tdesc_amd64_linux_no_xml;
3aee8918 52#endif
51a948fd 53static target_desc_up tdesc_i386_linux_no_xml;
3aee8918 54
1570b33e 55
fa593d66 56static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 57static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 58
1570b33e
L
59/* Backward compatibility for gdb without XML support. */
60
db92ac45 61static const char xmltarget_i386_linux_no_xml[] = "@<target>\
1570b33e
L
62<architecture>i386</architecture>\
63<osabi>GNU/Linux</osabi>\
64</target>";
f6d1620c
L
65
66#ifdef __x86_64__
db92ac45 67static const char xmltarget_amd64_linux_no_xml[] = "@<target>\
1570b33e
L
68<architecture>i386:x86-64</architecture>\
69<osabi>GNU/Linux</osabi>\
70</target>";
f6d1620c 71#endif
d0722149
DE
72
73#include <sys/reg.h>
74#include <sys/procfs.h>
1570b33e
L
75#include <sys/uio.h>
76
d0722149
DE
77#ifndef PTRACE_GET_THREAD_AREA
78#define PTRACE_GET_THREAD_AREA 25
79#endif
80
81/* This definition comes from prctl.h, but some kernels may not have it. */
82#ifndef PTRACE_ARCH_PRCTL
83#define PTRACE_ARCH_PRCTL 30
84#endif
85
86/* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88#ifndef ARCH_GET_FS
89#define ARCH_SET_GS 0x1001
90#define ARCH_SET_FS 0x1002
91#define ARCH_GET_FS 0x1003
92#define ARCH_GET_GS 0x1004
93#endif
94
ef0478f6
TBA
95/* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99class x86_target : public linux_process_target
100{
101public:
102
aa8d21c9
TBA
103 const regs_info *get_regs_info () override;
104
3ca4edb6
TBA
105 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
106
007c9b97
TBA
107 bool supports_z_point_type (char z_type) override;
108
b315b67d 109 void process_qsupported (gdb::array_view<const char * const> features) override;
a5b5da92 110
47f70aa7
TBA
111 bool supports_tracepoints () override;
112
809a0c35
TBA
113 bool supports_fast_tracepoints () override;
114
115 int install_fast_tracepoint_jump_pad
116 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
117 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
118 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
119 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
120 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
121 char *err) override;
122
123 int get_min_fast_tracepoint_insn_len () override;
124
ab64c999
TBA
125 struct emit_ops *emit_ops () override;
126
fc5ecdb6
TBA
127 int get_ipa_tdesc_idx () override;
128
797bcff5
TBA
129protected:
130
131 void low_arch_setup () override;
daca57a7
TBA
132
133 bool low_cannot_fetch_register (int regno) override;
134
135 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
136
137 bool low_supports_breakpoints () override;
138
139 CORE_ADDR low_get_pc (regcache *regcache) override;
140
141 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
d4807ea2
TBA
142
143 int low_decr_pc_after_break () override;
d7146cda
TBA
144
145 bool low_breakpoint_at (CORE_ADDR pc) override;
9db9aa23
TBA
146
147 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
148 int size, raw_breakpoint *bp) override;
149
150 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
151 int size, raw_breakpoint *bp) override;
ac1bbaca
TBA
152
153 bool low_stopped_by_watchpoint () override;
154
155 CORE_ADDR low_stopped_data_address () override;
b35db733
TBA
156
157 /* collect_ptrace_register/supply_ptrace_register are not needed in the
158 native i386 case (no registers smaller than an xfer unit), and are not
159 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
cb63de7c
TBA
160
161 /* Need to fix up i386 siginfo if host is amd64. */
162 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
163 int direction) override;
fd000fb3
TBA
164
165 arch_process_info *low_new_process () override;
166
167 void low_delete_process (arch_process_info *info) override;
168
169 void low_new_thread (lwp_info *) override;
170
171 void low_delete_thread (arch_lwp_info *) override;
172
173 void low_new_fork (process_info *parent, process_info *child) override;
d7599cc0
TBA
174
175 void low_prepare_to_resume (lwp_info *lwp) override;
a5b5da92 176
13e567af
TBA
177 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
178
9cfd8715
TBA
179 bool low_supports_range_stepping () override;
180
9eedd27d
TBA
181 bool low_supports_catch_syscall () override;
182
183 void low_get_syscall_trapinfo (regcache *regcache, int *sysno) override;
184
a5b5da92
TBA
185private:
186
187 /* Update all the target description of all processes; a new GDB
188 connected, and it may or not support xml target descriptions. */
189 void update_xmltarget ();
ef0478f6
TBA
190};
191
192/* The singleton target ops object. */
193
194static x86_target the_x86_target;
195
aa5ca48f
DE
196/* Per-process arch-specific data we want to keep. */
197
198struct arch_process_info
199{
df7e5265 200 struct x86_debug_reg_state debug_reg_state;
aa5ca48f
DE
201};
202
d0722149
DE
203#ifdef __x86_64__
204
205/* Mapping between the general-purpose registers in `struct user'
206 format and GDB's register array layout.
207 Note that the transfer layout uses 64-bit regs. */
208static /*const*/ int i386_regmap[] =
209{
210 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
211 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
212 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
213 DS * 8, ES * 8, FS * 8, GS * 8
214};
215
216#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
217
218/* So code below doesn't have to care, i386 or amd64. */
219#define ORIG_EAX ORIG_RAX
bc9540e8 220#define REGSIZE 8
d0722149
DE
221
222static const int x86_64_regmap[] =
223{
224 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
225 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
226 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
227 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
228 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
229 DS * 8, ES * 8, FS * 8, GS * 8,
230 -1, -1, -1, -1, -1, -1, -1, -1,
231 -1, -1, -1, -1, -1, -1, -1, -1,
232 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
233 -1,
234 -1, -1, -1, -1, -1, -1, -1, -1,
235 ORIG_RAX * 8,
2735833d 236 21 * 8, 22 * 8,
a196ebeb 237 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
01f9f808
MS
238 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
239 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
240 -1, -1, -1, -1, -1, -1, -1, -1,
241 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
242 -1, -1, -1, -1, -1, -1, -1, -1,
243 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
244 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
245 -1, -1, -1, -1, -1, -1, -1, -1,
246 -1, -1, -1, -1, -1, -1, -1, -1,
51547df6
MS
247 -1, -1, -1, -1, -1, -1, -1, -1,
248 -1 /* pkru */
d0722149
DE
249};
250
251#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
9e0aa64f 252#define X86_64_USER_REGS (GS + 1)
d0722149
DE
253
254#else /* ! __x86_64__ */
255
256/* Mapping between the general-purpose registers in `struct user'
257 format and GDB's register array layout. */
258static /*const*/ int i386_regmap[] =
259{
260 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
261 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
262 EIP * 4, EFL * 4, CS * 4, SS * 4,
263 DS * 4, ES * 4, FS * 4, GS * 4
264};
265
266#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
267
bc9540e8
PA
268#define REGSIZE 4
269
d0722149 270#endif
3aee8918
PA
271
272#ifdef __x86_64__
273
274/* Returns true if the current inferior belongs to a x86-64 process,
275 per the tdesc. */
276
277static int
278is_64bit_tdesc (void)
279{
0bfdf32f 280 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3aee8918
PA
281
282 return register_size (regcache->tdesc, 0) == 8;
283}
284
285#endif
286
d0722149
DE
287\f
288/* Called by libthread_db. */
289
290ps_err_e
754653a7 291ps_get_thread_area (struct ps_prochandle *ph,
d0722149
DE
292 lwpid_t lwpid, int idx, void **base)
293{
294#ifdef __x86_64__
3aee8918 295 int use_64bit = is_64bit_tdesc ();
d0722149
DE
296
297 if (use_64bit)
298 {
299 switch (idx)
300 {
301 case FS:
302 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
303 return PS_OK;
304 break;
305 case GS:
306 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
307 return PS_OK;
308 break;
309 default:
310 return PS_BADADDR;
311 }
312 return PS_ERR;
313 }
314#endif
315
316 {
317 unsigned int desc[4];
318
319 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
320 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
321 return PS_ERR;
322
d1ec4ce7
DE
323 /* Ensure we properly extend the value to 64-bits for x86_64. */
324 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
325 return PS_OK;
326 }
327}
fa593d66
PA
328
329/* Get the thread area address. This is used to recognize which
330 thread is which when tracing with the in-process agent library. We
331 don't read anything from the address, and treat it as opaque; it's
332 the address itself that we assume is unique per-thread. */
333
13e567af
TBA
334int
335x86_target::low_get_thread_area (int lwpid, CORE_ADDR *addr)
fa593d66
PA
336{
337#ifdef __x86_64__
3aee8918 338 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
339
340 if (use_64bit)
341 {
342 void *base;
343 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
344 {
345 *addr = (CORE_ADDR) (uintptr_t) base;
346 return 0;
347 }
348
349 return -1;
350 }
351#endif
352
353 {
f2907e49 354 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
d86d4aaf
DE
355 struct thread_info *thr = get_lwp_thread (lwp);
356 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
357 unsigned int desc[4];
358 ULONGEST gs = 0;
359 const int reg_thread_area = 3; /* bits to scale down register value. */
360 int idx;
361
362 collect_register_by_name (regcache, "gs", &gs);
363
364 idx = gs >> reg_thread_area;
365
366 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 367 lwpid_of (thr),
493e2a69 368 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
369 return -1;
370
371 *addr = desc[1];
372 return 0;
373 }
374}
375
376
d0722149 377\f
daca57a7
TBA
378bool
379x86_target::low_cannot_store_register (int regno)
d0722149 380{
3aee8918
PA
381#ifdef __x86_64__
382 if (is_64bit_tdesc ())
daca57a7 383 return false;
3aee8918
PA
384#endif
385
d0722149
DE
386 return regno >= I386_NUM_REGS;
387}
388
daca57a7
TBA
389bool
390x86_target::low_cannot_fetch_register (int regno)
d0722149 391{
3aee8918
PA
392#ifdef __x86_64__
393 if (is_64bit_tdesc ())
daca57a7 394 return false;
3aee8918
PA
395#endif
396
d0722149
DE
397 return regno >= I386_NUM_REGS;
398}
399
037e8112
TV
400static void
401collect_register_i386 (struct regcache *regcache, int regno, void *buf)
402{
403 collect_register (regcache, regno, buf);
404
405#ifdef __x86_64__
406 /* In case of x86_64 -m32, collect_register only writes 4 bytes, but the
407 space reserved in buf for the register is 8 bytes. Make sure the entire
408 reserved space is initialized. */
409
410 gdb_assert (register_size (regcache->tdesc, regno) == 4);
411
412 if (regno == RAX)
413 {
414 /* Sign extend EAX value to avoid potential syscall restart
415 problems.
416
417 See amd64_linux_collect_native_gregset() in
418 gdb/amd64-linux-nat.c for a detailed explanation. */
419 *(int64_t *) buf = *(int32_t *) buf;
420 }
421 else
422 {
423 /* Zero-extend. */
424 *(uint64_t *) buf = *(uint32_t *) buf;
425 }
426#endif
427}
428
d0722149 429static void
442ea881 430x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
431{
432 int i;
433
434#ifdef __x86_64__
3aee8918 435 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
436 {
437 for (i = 0; i < X86_64_NUM_REGS; i++)
438 if (x86_64_regmap[i] != -1)
442ea881 439 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d 440
d0722149
DE
441 return;
442 }
443#endif
444
445 for (i = 0; i < I386_NUM_REGS; i++)
037e8112 446 collect_register_i386 (regcache, i, ((char *) buf) + i386_regmap[i]);
3f52fdbc 447
037e8112
TV
448 /* Handle ORIG_EAX, which is not in i386_regmap. */
449 collect_register_i386 (regcache, find_regno (regcache->tdesc, "orig_eax"),
450 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
451}
452
453static void
442ea881 454x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
455{
456 int i;
457
458#ifdef __x86_64__
3aee8918 459 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
460 {
461 for (i = 0; i < X86_64_NUM_REGS; i++)
462 if (x86_64_regmap[i] != -1)
442ea881 463 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d 464
d0722149
DE
465 return;
466 }
467#endif
468
469 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 470 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 471
442ea881 472 supply_register_by_name (regcache, "orig_eax",
bc9540e8 473 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
474}
475
476static void
442ea881 477x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
478{
479#ifdef __x86_64__
442ea881 480 i387_cache_to_fxsave (regcache, buf);
d0722149 481#else
442ea881 482 i387_cache_to_fsave (regcache, buf);
d0722149
DE
483#endif
484}
485
486static void
442ea881 487x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
488{
489#ifdef __x86_64__
442ea881 490 i387_fxsave_to_cache (regcache, buf);
d0722149 491#else
442ea881 492 i387_fsave_to_cache (regcache, buf);
d0722149
DE
493#endif
494}
495
496#ifndef __x86_64__
497
498static void
442ea881 499x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 500{
442ea881 501 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
502}
503
504static void
442ea881 505x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 506{
442ea881 507 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
508}
509
510#endif
511
1570b33e
L
512static void
513x86_fill_xstateregset (struct regcache *regcache, void *buf)
514{
515 i387_cache_to_xsave (regcache, buf);
516}
517
518static void
519x86_store_xstateregset (struct regcache *regcache, const void *buf)
520{
521 i387_xsave_to_cache (regcache, buf);
522}
523
d0722149
DE
524/* ??? The non-biarch i386 case stores all the i387 regs twice.
525 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
526 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
527 doesn't work. IWBN to avoid the duplication in the case where it
528 does work. Maybe the arch_setup routine could check whether it works
3aee8918 529 and update the supported regsets accordingly. */
d0722149 530
3aee8918 531static struct regset_info x86_regsets[] =
d0722149
DE
532{
533#ifdef HAVE_PTRACE_GETREGS
1570b33e 534 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
535 GENERAL_REGS,
536 x86_fill_gregset, x86_store_gregset },
1570b33e
L
537 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
538 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
539# ifndef __x86_64__
540# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 541 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
542 EXTENDED_REGS,
543 x86_fill_fpxregset, x86_store_fpxregset },
544# endif
545# endif
1570b33e 546 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
547 FP_REGS,
548 x86_fill_fpregset, x86_store_fpregset },
549#endif /* HAVE_PTRACE_GETREGS */
50bc912a 550 NULL_REGSET
d0722149
DE
551};
552
bf9ae9d8
TBA
553bool
554x86_target::low_supports_breakpoints ()
555{
556 return true;
557}
558
559CORE_ADDR
560x86_target::low_get_pc (regcache *regcache)
d0722149 561{
3aee8918 562 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
563
564 if (use_64bit)
565 {
6598661d
PA
566 uint64_t pc;
567
442ea881 568 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
569 return (CORE_ADDR) pc;
570 }
571 else
572 {
6598661d
PA
573 uint32_t pc;
574
442ea881 575 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
576 return (CORE_ADDR) pc;
577 }
578}
579
bf9ae9d8
TBA
580void
581x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
d0722149 582{
3aee8918 583 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
584
585 if (use_64bit)
586 {
6598661d
PA
587 uint64_t newpc = pc;
588
442ea881 589 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
590 }
591 else
592 {
6598661d
PA
593 uint32_t newpc = pc;
594
442ea881 595 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
596 }
597}
d4807ea2
TBA
598
599int
600x86_target::low_decr_pc_after_break ()
601{
602 return 1;
603}
604
d0722149 605\f
dd373349 606static const gdb_byte x86_breakpoint[] = { 0xCC };
d0722149
DE
607#define x86_breakpoint_len 1
608
d7146cda
TBA
609bool
610x86_target::low_breakpoint_at (CORE_ADDR pc)
d0722149
DE
611{
612 unsigned char c;
613
d7146cda 614 read_memory (pc, &c, 1);
d0722149 615 if (c == 0xCC)
d7146cda 616 return true;
d0722149 617
d7146cda 618 return false;
d0722149
DE
619}
620\f
42995dbd 621/* Low-level function vector. */
df7e5265 622struct x86_dr_low_type x86_dr_low =
42995dbd 623 {
d33472ad
GB
624 x86_linux_dr_set_control,
625 x86_linux_dr_set_addr,
626 x86_linux_dr_get_addr,
627 x86_linux_dr_get_status,
628 x86_linux_dr_get_control,
42995dbd
GB
629 sizeof (void *),
630 };
aa5ca48f 631\f
90d74c30 632/* Breakpoint/Watchpoint support. */
aa5ca48f 633
007c9b97
TBA
634bool
635x86_target::supports_z_point_type (char z_type)
802e8e6d
PA
636{
637 switch (z_type)
638 {
639 case Z_PACKET_SW_BP:
640 case Z_PACKET_HW_BP:
641 case Z_PACKET_WRITE_WP:
642 case Z_PACKET_ACCESS_WP:
007c9b97 643 return true;
802e8e6d 644 default:
007c9b97 645 return false;
802e8e6d
PA
646 }
647}
648
9db9aa23
TBA
649int
650x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
651 int size, raw_breakpoint *bp)
aa5ca48f
DE
652{
653 struct process_info *proc = current_process ();
802e8e6d 654
aa5ca48f
DE
655 switch (type)
656 {
802e8e6d
PA
657 case raw_bkpt_type_hw:
658 case raw_bkpt_type_write_wp:
659 case raw_bkpt_type_access_wp:
a4165e94 660 {
802e8e6d
PA
661 enum target_hw_bp_type hw_type
662 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 663 struct x86_debug_reg_state *state
fe978cb0 664 = &proc->priv->arch_private->debug_reg_state;
a4165e94 665
df7e5265 666 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
a4165e94 667 }
961bd387 668
aa5ca48f
DE
669 default:
670 /* Unsupported. */
671 return 1;
672 }
673}
674
9db9aa23
TBA
675int
676x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
677 int size, raw_breakpoint *bp)
aa5ca48f
DE
678{
679 struct process_info *proc = current_process ();
802e8e6d 680
aa5ca48f
DE
681 switch (type)
682 {
802e8e6d
PA
683 case raw_bkpt_type_hw:
684 case raw_bkpt_type_write_wp:
685 case raw_bkpt_type_access_wp:
a4165e94 686 {
802e8e6d
PA
687 enum target_hw_bp_type hw_type
688 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 689 struct x86_debug_reg_state *state
fe978cb0 690 = &proc->priv->arch_private->debug_reg_state;
a4165e94 691
df7e5265 692 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
a4165e94 693 }
aa5ca48f
DE
694 default:
695 /* Unsupported. */
696 return 1;
697 }
698}
699
ac1bbaca
TBA
700bool
701x86_target::low_stopped_by_watchpoint ()
aa5ca48f
DE
702{
703 struct process_info *proc = current_process ();
fe978cb0 704 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
aa5ca48f
DE
705}
706
ac1bbaca
TBA
707CORE_ADDR
708x86_target::low_stopped_data_address ()
aa5ca48f
DE
709{
710 struct process_info *proc = current_process ();
711 CORE_ADDR addr;
fe978cb0 712 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
df7e5265 713 &addr))
aa5ca48f
DE
714 return addr;
715 return 0;
716}
717\f
718/* Called when a new process is created. */
719
fd000fb3
TBA
720arch_process_info *
721x86_target::low_new_process ()
aa5ca48f 722{
ed859da7 723 struct arch_process_info *info = XCNEW (struct arch_process_info);
aa5ca48f 724
df7e5265 725 x86_low_init_dregs (&info->debug_reg_state);
aa5ca48f
DE
726
727 return info;
728}
729
04ec7890
SM
730/* Called when a process is being deleted. */
731
fd000fb3
TBA
732void
733x86_target::low_delete_process (arch_process_info *info)
04ec7890
SM
734{
735 xfree (info);
736}
737
fd000fb3
TBA
738void
739x86_target::low_new_thread (lwp_info *lwp)
740{
741 /* This comes from nat/. */
742 x86_linux_new_thread (lwp);
743}
3a8a0396 744
fd000fb3
TBA
745void
746x86_target::low_delete_thread (arch_lwp_info *alwp)
747{
748 /* This comes from nat/. */
749 x86_linux_delete_thread (alwp);
750}
751
752/* Target routine for new_fork. */
753
754void
755x86_target::low_new_fork (process_info *parent, process_info *child)
3a8a0396
DB
756{
757 /* These are allocated by linux_add_process. */
758 gdb_assert (parent->priv != NULL
759 && parent->priv->arch_private != NULL);
760 gdb_assert (child->priv != NULL
761 && child->priv->arch_private != NULL);
762
763 /* Linux kernel before 2.6.33 commit
764 72f674d203cd230426437cdcf7dd6f681dad8b0d
765 will inherit hardware debug registers from parent
766 on fork/vfork/clone. Newer Linux kernels create such tasks with
767 zeroed debug registers.
768
769 GDB core assumes the child inherits the watchpoints/hw
770 breakpoints of the parent, and will remove them all from the
771 forked off process. Copy the debug registers mirrors into the
772 new process so that all breakpoints and watchpoints can be
773 removed together. The debug registers mirror will become zeroed
774 in the end before detaching the forked off process, thus making
775 this compatible with older Linux kernels too. */
776
777 *child->priv->arch_private = *parent->priv->arch_private;
778}
779
d7599cc0
TBA
780void
781x86_target::low_prepare_to_resume (lwp_info *lwp)
782{
783 /* This comes from nat/. */
784 x86_linux_prepare_to_resume (lwp);
785}
786
70a0bb6b
GB
787/* See nat/x86-dregs.h. */
788
789struct x86_debug_reg_state *
790x86_debug_reg_state (pid_t pid)
791{
792 struct process_info *proc = find_process_pid (pid);
793
794 return &proc->priv->arch_private->debug_reg_state;
795}
aa5ca48f 796\f
d0722149
DE
797/* When GDBSERVER is built as a 64-bit application on linux, the
798 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
799 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
800 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
801 conversion in-place ourselves. */
802
9cf12d57 803/* Convert a ptrace/host siginfo object, into/from the siginfo in the
d0722149
DE
804 layout of the inferiors' architecture. Returns true if any
805 conversion was done; false otherwise. If DIRECTION is 1, then copy
9cf12d57 806 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
d0722149
DE
807 INF. */
808
cb63de7c
TBA
809bool
810x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
d0722149
DE
811{
812#ifdef __x86_64__
760256f9 813 unsigned int machine;
0bfdf32f 814 int tid = lwpid_of (current_thread);
760256f9
PA
815 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
816
d0722149 817 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 818 if (!is_64bit_tdesc ())
9cf12d57 819 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 820 FIXUP_32);
c92b5177 821 /* No fixup for native x32 GDB. */
760256f9 822 else if (!is_elf64 && sizeof (void *) == 8)
9cf12d57 823 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 824 FIXUP_X32);
d0722149
DE
825#endif
826
cb63de7c 827 return false;
d0722149
DE
828}
829\f
1570b33e
L
830static int use_xml;
831
3aee8918
PA
832/* Format of XSAVE extended state is:
833 struct
834 {
835 fxsave_bytes[0..463]
836 sw_usable_bytes[464..511]
837 xstate_hdr_bytes[512..575]
838 avx_bytes[576..831]
839 future_state etc
840 };
841
842 Same memory layout will be used for the coredump NT_X86_XSTATE
843 representing the XSAVE extended state registers.
844
845 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
846 extended state mask, which is the same as the extended control register
847 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
848 together with the mask saved in the xstate_hdr_bytes to determine what
849 states the processor/OS supports and what state, used or initialized,
850 the process/thread is in. */
851#define I386_LINUX_XSAVE_XCR0_OFFSET 464
852
853/* Does the current host support the GETFPXREGS request? The header
854 file may or may not define it, and even if it is defined, the
855 kernel will return EIO if it's running on a pre-SSE processor. */
856int have_ptrace_getfpxregs =
857#ifdef HAVE_PTRACE_GETFPXREGS
858 -1
859#else
860 0
861#endif
862;
1570b33e 863
3aee8918
PA
864/* Get Linux/x86 target description from running target. */
865
866static const struct target_desc *
867x86_linux_read_description (void)
1570b33e 868{
3aee8918
PA
869 unsigned int machine;
870 int is_elf64;
a196ebeb 871 int xcr0_features;
3aee8918
PA
872 int tid;
873 static uint64_t xcr0;
3a13a53b 874 struct regset_info *regset;
1570b33e 875
0bfdf32f 876 tid = lwpid_of (current_thread);
1570b33e 877
3aee8918 878 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 879
3aee8918 880 if (sizeof (void *) == 4)
3a13a53b 881 {
3aee8918
PA
882 if (is_elf64 > 0)
883 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
884#ifndef __x86_64__
885 else if (machine == EM_X86_64)
886 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
887#endif
888 }
3a13a53b 889
3aee8918
PA
890#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
891 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
892 {
893 elf_fpxregset_t fpxregs;
3a13a53b 894
3aee8918 895 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 896 {
3aee8918
PA
897 have_ptrace_getfpxregs = 0;
898 have_ptrace_getregset = 0;
f49ff000 899 return i386_linux_read_description (X86_XSTATE_X87);
3a13a53b 900 }
3aee8918
PA
901 else
902 have_ptrace_getfpxregs = 1;
3a13a53b 903 }
1570b33e
L
904#endif
905
906 if (!use_xml)
907 {
df7e5265 908 x86_xcr0 = X86_XSTATE_SSE_MASK;
3aee8918 909
1570b33e
L
910 /* Don't use XML. */
911#ifdef __x86_64__
3aee8918 912 if (machine == EM_X86_64)
51a948fd 913 return tdesc_amd64_linux_no_xml.get ();
1570b33e 914 else
1570b33e 915#endif
51a948fd 916 return tdesc_i386_linux_no_xml.get ();
1570b33e
L
917 }
918
1570b33e
L
919 if (have_ptrace_getregset == -1)
920 {
df7e5265 921 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 922 struct iovec iov;
1570b33e
L
923
924 iov.iov_base = xstateregs;
925 iov.iov_len = sizeof (xstateregs);
926
927 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
928 if (ptrace (PTRACE_GETREGSET, tid,
929 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
930 have_ptrace_getregset = 0;
931 else
1570b33e 932 {
3aee8918
PA
933 have_ptrace_getregset = 1;
934
935 /* Get XCR0 from XSAVE extended state. */
936 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
937 / sizeof (uint64_t))];
938
939 /* Use PTRACE_GETREGSET if it is available. */
940 for (regset = x86_regsets;
941 regset->fill_function != NULL; regset++)
942 if (regset->get_request == PTRACE_GETREGSET)
df7e5265 943 regset->size = X86_XSTATE_SIZE (xcr0);
3aee8918
PA
944 else if (regset->type != GENERAL_REGS)
945 regset->size = 0;
1570b33e 946 }
1570b33e
L
947 }
948
3aee8918 949 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb 950 xcr0_features = (have_ptrace_getregset
2e1e43e1 951 && (xcr0 & X86_XSTATE_ALL_MASK));
3aee8918 952
a196ebeb 953 if (xcr0_features)
3aee8918 954 x86_xcr0 = xcr0;
1570b33e 955
3aee8918
PA
956 if (machine == EM_X86_64)
957 {
1570b33e 958#ifdef __x86_64__
b4570e4b 959 const target_desc *tdesc = NULL;
a196ebeb 960
b4570e4b 961 if (xcr0_features)
3aee8918 962 {
b4570e4b
YQ
963 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
964 !is_elf64);
1570b33e 965 }
b4570e4b
YQ
966
967 if (tdesc == NULL)
968 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
969 return tdesc;
3aee8918 970#endif
1570b33e 971 }
3aee8918
PA
972 else
973 {
f49ff000 974 const target_desc *tdesc = NULL;
a1fa17ee 975
f49ff000
YQ
976 if (xcr0_features)
977 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
2b863f51 978
f49ff000
YQ
979 if (tdesc == NULL)
980 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
a196ebeb 981
f49ff000 982 return tdesc;
3aee8918
PA
983 }
984
985 gdb_assert_not_reached ("failed to return tdesc");
986}
987
3aee8918
PA
988/* Update all the target description of all processes; a new GDB
989 connected, and it may or not support xml target descriptions. */
990
797bcff5
TBA
991void
992x86_target::update_xmltarget ()
3aee8918 993{
0bfdf32f 994 struct thread_info *saved_thread = current_thread;
3aee8918
PA
995
996 /* Before changing the register cache's internal layout, flush the
997 contents of the current valid caches back to the threads, and
998 release the current regcache objects. */
999 regcache_release ();
1000
797bcff5 1001 for_each_process ([this] (process_info *proc) {
9179355e
SM
1002 int pid = proc->pid;
1003
1004 /* Look up any thread of this process. */
1005 current_thread = find_any_thread_of_pid (pid);
1006
797bcff5 1007 low_arch_setup ();
9179355e 1008 });
3aee8918 1009
0bfdf32f 1010 current_thread = saved_thread;
1570b33e
L
1011}
1012
1013/* Process qSupported query, "xmlRegisters=". Update the buffer size for
1014 PTRACE_GETREGSET. */
1015
a5b5da92 1016void
b315b67d 1017x86_target::process_qsupported (gdb::array_view<const char * const> features)
1570b33e
L
1018{
1019 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1020 with "i386" in qSupported query, it supports x86 XML target
1021 descriptions. */
1022 use_xml = 0;
1570b33e 1023
b315b67d
SM
1024 for (const char *feature : features)
1025 {
06e03fff 1026 if (startswith (feature, "xmlRegisters="))
1570b33e 1027 {
06e03fff 1028 char *copy = xstrdup (feature + 13);
06e03fff 1029
ca3a04f6
CB
1030 char *saveptr;
1031 for (char *p = strtok_r (copy, ",", &saveptr);
1032 p != NULL;
1033 p = strtok_r (NULL, ",", &saveptr))
1570b33e 1034 {
06e03fff
PA
1035 if (strcmp (p, "i386") == 0)
1036 {
1037 use_xml = 1;
1038 break;
1039 }
1570b33e 1040 }
1570b33e 1041
06e03fff
PA
1042 free (copy);
1043 }
1570b33e 1044 }
b315b67d 1045
a5b5da92 1046 update_xmltarget ();
1570b33e
L
1047}
1048
3aee8918 1049/* Common for x86/x86-64. */
d0722149 1050
3aee8918
PA
1051static struct regsets_info x86_regsets_info =
1052 {
1053 x86_regsets, /* regsets */
1054 0, /* num_regsets */
1055 NULL, /* disabled_regsets */
1056 };
214d508e
L
1057
1058#ifdef __x86_64__
3aee8918
PA
1059static struct regs_info amd64_linux_regs_info =
1060 {
1061 NULL, /* regset_bitmap */
1062 NULL, /* usrregs_info */
1063 &x86_regsets_info
1064 };
d0722149 1065#endif
3aee8918
PA
1066static struct usrregs_info i386_linux_usrregs_info =
1067 {
1068 I386_NUM_REGS,
1069 i386_regmap,
1070 };
d0722149 1071
3aee8918
PA
1072static struct regs_info i386_linux_regs_info =
1073 {
1074 NULL, /* regset_bitmap */
1075 &i386_linux_usrregs_info,
1076 &x86_regsets_info
1077 };
d0722149 1078
aa8d21c9
TBA
1079const regs_info *
1080x86_target::get_regs_info ()
3aee8918
PA
1081{
1082#ifdef __x86_64__
1083 if (is_64bit_tdesc ())
1084 return &amd64_linux_regs_info;
1085 else
1086#endif
1087 return &i386_linux_regs_info;
1088}
d0722149 1089
3aee8918
PA
1090/* Initialize the target description for the architecture of the
1091 inferior. */
1570b33e 1092
797bcff5
TBA
1093void
1094x86_target::low_arch_setup ()
3aee8918
PA
1095{
1096 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1097}
1098
9eedd27d
TBA
1099bool
1100x86_target::low_supports_catch_syscall ()
1101{
1102 return true;
1103}
1104
82075af2
JS
1105/* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1106 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1107
9eedd27d
TBA
1108void
1109x86_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
82075af2
JS
1110{
1111 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1112
1113 if (use_64bit)
1114 {
1115 long l_sysno;
82075af2
JS
1116
1117 collect_register_by_name (regcache, "orig_rax", &l_sysno);
82075af2 1118 *sysno = (int) l_sysno;
82075af2
JS
1119 }
1120 else
4cc32bec 1121 collect_register_by_name (regcache, "orig_eax", sysno);
82075af2
JS
1122}
1123
47f70aa7
TBA
1124bool
1125x86_target::supports_tracepoints ()
219f2f23 1126{
47f70aa7 1127 return true;
219f2f23
PA
1128}
1129
fa593d66
PA
1130static void
1131append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1132{
4196ab2a 1133 target_write_memory (*to, buf, len);
fa593d66
PA
1134 *to += len;
1135}
1136
1137static int
a121b7c1 1138push_opcode (unsigned char *buf, const char *op)
fa593d66
PA
1139{
1140 unsigned char *buf_org = buf;
1141
1142 while (1)
1143 {
1144 char *endptr;
1145 unsigned long ul = strtoul (op, &endptr, 16);
1146
1147 if (endptr == op)
1148 break;
1149
1150 *buf++ = ul;
1151 op = endptr;
1152 }
1153
1154 return buf - buf_org;
1155}
1156
1157#ifdef __x86_64__
1158
1159/* Build a jump pad that saves registers and calls a collection
1160 function. Writes a jump instruction to the jump pad to
1161 JJUMPAD_INSN. The caller is responsible to write it in at the
1162 tracepoint address. */
1163
1164static int
1165amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1166 CORE_ADDR collector,
1167 CORE_ADDR lockaddr,
1168 ULONGEST orig_size,
1169 CORE_ADDR *jump_entry,
405f8e94
SS
1170 CORE_ADDR *trampoline,
1171 ULONGEST *trampoline_size,
fa593d66
PA
1172 unsigned char *jjump_pad_insn,
1173 ULONGEST *jjump_pad_insn_size,
1174 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1175 CORE_ADDR *adjusted_insn_addr_end,
1176 char *err)
fa593d66
PA
1177{
1178 unsigned char buf[40];
1179 int i, offset;
f4647387
YQ
1180 int64_t loffset;
1181
fa593d66
PA
1182 CORE_ADDR buildaddr = *jump_entry;
1183
1184 /* Build the jump pad. */
1185
1186 /* First, do tracepoint data collection. Save registers. */
1187 i = 0;
1188 /* Need to ensure stack pointer saved first. */
1189 buf[i++] = 0x54; /* push %rsp */
1190 buf[i++] = 0x55; /* push %rbp */
1191 buf[i++] = 0x57; /* push %rdi */
1192 buf[i++] = 0x56; /* push %rsi */
1193 buf[i++] = 0x52; /* push %rdx */
1194 buf[i++] = 0x51; /* push %rcx */
1195 buf[i++] = 0x53; /* push %rbx */
1196 buf[i++] = 0x50; /* push %rax */
1197 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1198 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1199 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1200 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1201 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1202 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1203 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1204 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1205 buf[i++] = 0x9c; /* pushfq */
c8ef42ee 1206 buf[i++] = 0x48; /* movabs <addr>,%rdi */
fa593d66 1207 buf[i++] = 0xbf;
c8ef42ee
PA
1208 memcpy (buf + i, &tpaddr, 8);
1209 i += 8;
fa593d66
PA
1210 buf[i++] = 0x57; /* push %rdi */
1211 append_insns (&buildaddr, i, buf);
1212
1213 /* Stack space for the collecting_t object. */
1214 i = 0;
1215 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1216 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1217 memcpy (buf + i, &tpoint, 8);
1218 i += 8;
1219 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1220 i += push_opcode (&buf[i],
1221 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1222 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1223 append_insns (&buildaddr, i, buf);
1224
1225 /* spin-lock. */
1226 i = 0;
1227 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1228 memcpy (&buf[i], (void *) &lockaddr, 8);
1229 i += 8;
1230 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1231 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1232 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1233 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1234 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1235 append_insns (&buildaddr, i, buf);
1236
1237 /* Set up the gdb_collect call. */
1238 /* At this point, (stack pointer + 0x18) is the base of our saved
1239 register block. */
1240
1241 i = 0;
1242 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1243 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1244
1245 /* tpoint address may be 64-bit wide. */
1246 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1247 memcpy (buf + i, &tpoint, 8);
1248 i += 8;
1249 append_insns (&buildaddr, i, buf);
1250
1251 /* The collector function being in the shared library, may be
1252 >31-bits away off the jump pad. */
1253 i = 0;
1254 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1255 memcpy (buf + i, &collector, 8);
1256 i += 8;
1257 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1258 append_insns (&buildaddr, i, buf);
1259
1260 /* Clear the spin-lock. */
1261 i = 0;
1262 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1263 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1264 memcpy (buf + i, &lockaddr, 8);
1265 i += 8;
1266 append_insns (&buildaddr, i, buf);
1267
1268 /* Remove stack that had been used for the collect_t object. */
1269 i = 0;
1270 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1271 append_insns (&buildaddr, i, buf);
1272
1273 /* Restore register state. */
1274 i = 0;
1275 buf[i++] = 0x48; /* add $0x8,%rsp */
1276 buf[i++] = 0x83;
1277 buf[i++] = 0xc4;
1278 buf[i++] = 0x08;
1279 buf[i++] = 0x9d; /* popfq */
1280 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1281 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1282 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1283 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1284 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1285 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1286 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1287 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1288 buf[i++] = 0x58; /* pop %rax */
1289 buf[i++] = 0x5b; /* pop %rbx */
1290 buf[i++] = 0x59; /* pop %rcx */
1291 buf[i++] = 0x5a; /* pop %rdx */
1292 buf[i++] = 0x5e; /* pop %rsi */
1293 buf[i++] = 0x5f; /* pop %rdi */
1294 buf[i++] = 0x5d; /* pop %rbp */
1295 buf[i++] = 0x5c; /* pop %rsp */
1296 append_insns (&buildaddr, i, buf);
1297
1298 /* Now, adjust the original instruction to execute in the jump
1299 pad. */
1300 *adjusted_insn_addr = buildaddr;
1301 relocate_instruction (&buildaddr, tpaddr);
1302 *adjusted_insn_addr_end = buildaddr;
1303
1304 /* Finally, write a jump back to the program. */
f4647387
YQ
1305
1306 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1307 if (loffset > INT_MAX || loffset < INT_MIN)
1308 {
1309 sprintf (err,
1310 "E.Jump back from jump pad too far from tracepoint "
1311 "(offset 0x%" PRIx64 " > int32).", loffset);
1312 return 1;
1313 }
1314
1315 offset = (int) loffset;
fa593d66
PA
1316 memcpy (buf, jump_insn, sizeof (jump_insn));
1317 memcpy (buf + 1, &offset, 4);
1318 append_insns (&buildaddr, sizeof (jump_insn), buf);
1319
1320 /* The jump pad is now built. Wire in a jump to our jump pad. This
1321 is always done last (by our caller actually), so that we can
1322 install fast tracepoints with threads running. This relies on
1323 the agent's atomic write support. */
f4647387
YQ
1324 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1325 if (loffset > INT_MAX || loffset < INT_MIN)
1326 {
1327 sprintf (err,
1328 "E.Jump pad too far from tracepoint "
1329 "(offset 0x%" PRIx64 " > int32).", loffset);
1330 return 1;
1331 }
1332
1333 offset = (int) loffset;
1334
fa593d66
PA
1335 memcpy (buf, jump_insn, sizeof (jump_insn));
1336 memcpy (buf + 1, &offset, 4);
1337 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1338 *jjump_pad_insn_size = sizeof (jump_insn);
1339
1340 /* Return the end address of our pad. */
1341 *jump_entry = buildaddr;
1342
1343 return 0;
1344}
1345
1346#endif /* __x86_64__ */
1347
1348/* Build a jump pad that saves registers and calls a collection
1349 function. Writes a jump instruction to the jump pad to
1350 JJUMPAD_INSN. The caller is responsible to write it in at the
1351 tracepoint address. */
1352
1353static int
1354i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1355 CORE_ADDR collector,
1356 CORE_ADDR lockaddr,
1357 ULONGEST orig_size,
1358 CORE_ADDR *jump_entry,
405f8e94
SS
1359 CORE_ADDR *trampoline,
1360 ULONGEST *trampoline_size,
fa593d66
PA
1361 unsigned char *jjump_pad_insn,
1362 ULONGEST *jjump_pad_insn_size,
1363 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1364 CORE_ADDR *adjusted_insn_addr_end,
1365 char *err)
fa593d66
PA
1366{
1367 unsigned char buf[0x100];
1368 int i, offset;
1369 CORE_ADDR buildaddr = *jump_entry;
1370
1371 /* Build the jump pad. */
1372
1373 /* First, do tracepoint data collection. Save registers. */
1374 i = 0;
1375 buf[i++] = 0x60; /* pushad */
1376 buf[i++] = 0x68; /* push tpaddr aka $pc */
1377 *((int *)(buf + i)) = (int) tpaddr;
1378 i += 4;
1379 buf[i++] = 0x9c; /* pushf */
1380 buf[i++] = 0x1e; /* push %ds */
1381 buf[i++] = 0x06; /* push %es */
1382 buf[i++] = 0x0f; /* push %fs */
1383 buf[i++] = 0xa0;
1384 buf[i++] = 0x0f; /* push %gs */
1385 buf[i++] = 0xa8;
1386 buf[i++] = 0x16; /* push %ss */
1387 buf[i++] = 0x0e; /* push %cs */
1388 append_insns (&buildaddr, i, buf);
1389
1390 /* Stack space for the collecting_t object. */
1391 i = 0;
1392 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1393
1394 /* Build the object. */
1395 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1396 memcpy (buf + i, &tpoint, 4);
1397 i += 4;
1398 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1399
1400 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1401 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1402 append_insns (&buildaddr, i, buf);
1403
1404 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1405 If we cared for it, this could be using xchg alternatively. */
1406
1407 i = 0;
1408 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1409 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1410 %esp,<lockaddr> */
1411 memcpy (&buf[i], (void *) &lockaddr, 4);
1412 i += 4;
1413 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1414 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1415 append_insns (&buildaddr, i, buf);
1416
1417
1418 /* Set up arguments to the gdb_collect call. */
1419 i = 0;
1420 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1421 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1422 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1423 append_insns (&buildaddr, i, buf);
1424
1425 i = 0;
1426 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1427 append_insns (&buildaddr, i, buf);
1428
1429 i = 0;
1430 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1431 memcpy (&buf[i], (void *) &tpoint, 4);
1432 i += 4;
1433 append_insns (&buildaddr, i, buf);
1434
1435 buf[0] = 0xe8; /* call <reladdr> */
1436 offset = collector - (buildaddr + sizeof (jump_insn));
1437 memcpy (buf + 1, &offset, 4);
1438 append_insns (&buildaddr, 5, buf);
1439 /* Clean up after the call. */
1440 buf[0] = 0x83; /* add $0x8,%esp */
1441 buf[1] = 0xc4;
1442 buf[2] = 0x08;
1443 append_insns (&buildaddr, 3, buf);
1444
1445
1446 /* Clear the spin-lock. This would need the LOCK prefix on older
1447 broken archs. */
1448 i = 0;
1449 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1450 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1451 memcpy (buf + i, &lockaddr, 4);
1452 i += 4;
1453 append_insns (&buildaddr, i, buf);
1454
1455
1456 /* Remove stack that had been used for the collect_t object. */
1457 i = 0;
1458 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1459 append_insns (&buildaddr, i, buf);
1460
1461 i = 0;
1462 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1463 buf[i++] = 0xc4;
1464 buf[i++] = 0x04;
1465 buf[i++] = 0x17; /* pop %ss */
1466 buf[i++] = 0x0f; /* pop %gs */
1467 buf[i++] = 0xa9;
1468 buf[i++] = 0x0f; /* pop %fs */
1469 buf[i++] = 0xa1;
1470 buf[i++] = 0x07; /* pop %es */
405f8e94 1471 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1472 buf[i++] = 0x9d; /* popf */
1473 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1474 buf[i++] = 0xc4;
1475 buf[i++] = 0x04;
1476 buf[i++] = 0x61; /* popad */
1477 append_insns (&buildaddr, i, buf);
1478
1479 /* Now, adjust the original instruction to execute in the jump
1480 pad. */
1481 *adjusted_insn_addr = buildaddr;
1482 relocate_instruction (&buildaddr, tpaddr);
1483 *adjusted_insn_addr_end = buildaddr;
1484
1485 /* Write the jump back to the program. */
1486 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1487 memcpy (buf, jump_insn, sizeof (jump_insn));
1488 memcpy (buf + 1, &offset, 4);
1489 append_insns (&buildaddr, sizeof (jump_insn), buf);
1490
1491 /* The jump pad is now built. Wire in a jump to our jump pad. This
1492 is always done last (by our caller actually), so that we can
1493 install fast tracepoints with threads running. This relies on
1494 the agent's atomic write support. */
405f8e94
SS
1495 if (orig_size == 4)
1496 {
1497 /* Create a trampoline. */
1498 *trampoline_size = sizeof (jump_insn);
1499 if (!claim_trampoline_space (*trampoline_size, trampoline))
1500 {
1501 /* No trampoline space available. */
1502 strcpy (err,
1503 "E.Cannot allocate trampoline space needed for fast "
1504 "tracepoints on 4-byte instructions.");
1505 return 1;
1506 }
1507
1508 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1509 memcpy (buf, jump_insn, sizeof (jump_insn));
1510 memcpy (buf + 1, &offset, 4);
4196ab2a 1511 target_write_memory (*trampoline, buf, sizeof (jump_insn));
405f8e94
SS
1512
1513 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1514 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1515 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1516 memcpy (buf + 2, &offset, 2);
1517 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1518 *jjump_pad_insn_size = sizeof (small_jump_insn);
1519 }
1520 else
1521 {
1522 /* Else use a 32-bit relative jump instruction. */
1523 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1524 memcpy (buf, jump_insn, sizeof (jump_insn));
1525 memcpy (buf + 1, &offset, 4);
1526 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1527 *jjump_pad_insn_size = sizeof (jump_insn);
1528 }
fa593d66
PA
1529
1530 /* Return the end address of our pad. */
1531 *jump_entry = buildaddr;
1532
1533 return 0;
1534}
1535
809a0c35
TBA
1536bool
1537x86_target::supports_fast_tracepoints ()
1538{
1539 return true;
1540}
1541
1542int
1543x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1544 CORE_ADDR tpaddr,
1545 CORE_ADDR collector,
1546 CORE_ADDR lockaddr,
1547 ULONGEST orig_size,
1548 CORE_ADDR *jump_entry,
1549 CORE_ADDR *trampoline,
1550 ULONGEST *trampoline_size,
1551 unsigned char *jjump_pad_insn,
1552 ULONGEST *jjump_pad_insn_size,
1553 CORE_ADDR *adjusted_insn_addr,
1554 CORE_ADDR *adjusted_insn_addr_end,
1555 char *err)
fa593d66
PA
1556{
1557#ifdef __x86_64__
3aee8918 1558 if (is_64bit_tdesc ())
fa593d66
PA
1559 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1560 collector, lockaddr,
1561 orig_size, jump_entry,
405f8e94 1562 trampoline, trampoline_size,
fa593d66
PA
1563 jjump_pad_insn,
1564 jjump_pad_insn_size,
1565 adjusted_insn_addr,
405f8e94
SS
1566 adjusted_insn_addr_end,
1567 err);
fa593d66
PA
1568#endif
1569
1570 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1571 collector, lockaddr,
1572 orig_size, jump_entry,
405f8e94 1573 trampoline, trampoline_size,
fa593d66
PA
1574 jjump_pad_insn,
1575 jjump_pad_insn_size,
1576 adjusted_insn_addr,
405f8e94
SS
1577 adjusted_insn_addr_end,
1578 err);
1579}
1580
1581/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1582 architectures. */
1583
809a0c35
TBA
1584int
1585x86_target::get_min_fast_tracepoint_insn_len ()
405f8e94
SS
1586{
1587 static int warned_about_fast_tracepoints = 0;
1588
1589#ifdef __x86_64__
1590 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1591 used for fast tracepoints. */
3aee8918 1592 if (is_64bit_tdesc ())
405f8e94
SS
1593 return 5;
1594#endif
1595
58b4daa5 1596 if (agent_loaded_p ())
405f8e94
SS
1597 {
1598 char errbuf[IPA_BUFSIZ];
1599
1600 errbuf[0] = '\0';
1601
1602 /* On x86, if trampolines are available, then 4-byte jump instructions
1603 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1604 with a 4-byte offset are used instead. */
1605 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1606 return 4;
1607 else
1608 {
1609 /* GDB has no channel to explain to user why a shorter fast
1610 tracepoint is not possible, but at least make GDBserver
1611 mention that something has gone awry. */
1612 if (!warned_about_fast_tracepoints)
1613 {
422186a9 1614 warning ("4-byte fast tracepoints not available; %s", errbuf);
405f8e94
SS
1615 warned_about_fast_tracepoints = 1;
1616 }
1617 return 5;
1618 }
1619 }
1620 else
1621 {
1622 /* Indicate that the minimum length is currently unknown since the IPA
1623 has not loaded yet. */
1624 return 0;
1625 }
fa593d66
PA
1626}
1627
6a271cae
PA
1628static void
1629add_insns (unsigned char *start, int len)
1630{
1631 CORE_ADDR buildaddr = current_insn_ptr;
1632
1633 if (debug_threads)
87ce2a04
DE
1634 debug_printf ("Adding %d bytes of insn at %s\n",
1635 len, paddress (buildaddr));
6a271cae
PA
1636
1637 append_insns (&buildaddr, len, start);
1638 current_insn_ptr = buildaddr;
1639}
1640
6a271cae
PA
1641/* Our general strategy for emitting code is to avoid specifying raw
1642 bytes whenever possible, and instead copy a block of inline asm
1643 that is embedded in the function. This is a little messy, because
1644 we need to keep the compiler from discarding what looks like dead
1645 code, plus suppress various warnings. */
1646
9e4344e5
PA
1647#define EMIT_ASM(NAME, INSNS) \
1648 do \
1649 { \
1650 extern unsigned char start_ ## NAME, end_ ## NAME; \
1651 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1652 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1653 "\t" "start_" #NAME ":" \
1654 "\t" INSNS "\n" \
1655 "\t" "end_" #NAME ":"); \
1656 } while (0)
6a271cae
PA
1657
1658#ifdef __x86_64__
1659
1660#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1661 do \
1662 { \
1663 extern unsigned char start_ ## NAME, end_ ## NAME; \
1664 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1665 __asm__ (".code32\n" \
1666 "\t" "jmp end_" #NAME "\n" \
1667 "\t" "start_" #NAME ":\n" \
1668 "\t" INSNS "\n" \
1669 "\t" "end_" #NAME ":\n" \
1670 ".code64\n"); \
1671 } while (0)
6a271cae
PA
1672
1673#else
1674
1675#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1676
1677#endif
1678
1679#ifdef __x86_64__
1680
1681static void
1682amd64_emit_prologue (void)
1683{
1684 EMIT_ASM (amd64_prologue,
1685 "pushq %rbp\n\t"
1686 "movq %rsp,%rbp\n\t"
1687 "sub $0x20,%rsp\n\t"
1688 "movq %rdi,-8(%rbp)\n\t"
1689 "movq %rsi,-16(%rbp)");
1690}
1691
1692
1693static void
1694amd64_emit_epilogue (void)
1695{
1696 EMIT_ASM (amd64_epilogue,
1697 "movq -16(%rbp),%rdi\n\t"
1698 "movq %rax,(%rdi)\n\t"
1699 "xor %rax,%rax\n\t"
1700 "leave\n\t"
1701 "ret");
1702}
1703
1704static void
1705amd64_emit_add (void)
1706{
1707 EMIT_ASM (amd64_add,
1708 "add (%rsp),%rax\n\t"
1709 "lea 0x8(%rsp),%rsp");
1710}
1711
1712static void
1713amd64_emit_sub (void)
1714{
1715 EMIT_ASM (amd64_sub,
1716 "sub %rax,(%rsp)\n\t"
1717 "pop %rax");
1718}
1719
1720static void
1721amd64_emit_mul (void)
1722{
1723 emit_error = 1;
1724}
1725
1726static void
1727amd64_emit_lsh (void)
1728{
1729 emit_error = 1;
1730}
1731
1732static void
1733amd64_emit_rsh_signed (void)
1734{
1735 emit_error = 1;
1736}
1737
1738static void
1739amd64_emit_rsh_unsigned (void)
1740{
1741 emit_error = 1;
1742}
1743
1744static void
1745amd64_emit_ext (int arg)
1746{
1747 switch (arg)
1748 {
1749 case 8:
1750 EMIT_ASM (amd64_ext_8,
1751 "cbtw\n\t"
1752 "cwtl\n\t"
1753 "cltq");
1754 break;
1755 case 16:
1756 EMIT_ASM (amd64_ext_16,
1757 "cwtl\n\t"
1758 "cltq");
1759 break;
1760 case 32:
1761 EMIT_ASM (amd64_ext_32,
1762 "cltq");
1763 break;
1764 default:
1765 emit_error = 1;
1766 }
1767}
1768
1769static void
1770amd64_emit_log_not (void)
1771{
1772 EMIT_ASM (amd64_log_not,
1773 "test %rax,%rax\n\t"
1774 "sete %cl\n\t"
1775 "movzbq %cl,%rax");
1776}
1777
1778static void
1779amd64_emit_bit_and (void)
1780{
1781 EMIT_ASM (amd64_and,
1782 "and (%rsp),%rax\n\t"
1783 "lea 0x8(%rsp),%rsp");
1784}
1785
1786static void
1787amd64_emit_bit_or (void)
1788{
1789 EMIT_ASM (amd64_or,
1790 "or (%rsp),%rax\n\t"
1791 "lea 0x8(%rsp),%rsp");
1792}
1793
1794static void
1795amd64_emit_bit_xor (void)
1796{
1797 EMIT_ASM (amd64_xor,
1798 "xor (%rsp),%rax\n\t"
1799 "lea 0x8(%rsp),%rsp");
1800}
1801
1802static void
1803amd64_emit_bit_not (void)
1804{
1805 EMIT_ASM (amd64_bit_not,
1806 "xorq $0xffffffffffffffff,%rax");
1807}
1808
1809static void
1810amd64_emit_equal (void)
1811{
1812 EMIT_ASM (amd64_equal,
1813 "cmp %rax,(%rsp)\n\t"
1814 "je .Lamd64_equal_true\n\t"
1815 "xor %rax,%rax\n\t"
1816 "jmp .Lamd64_equal_end\n\t"
1817 ".Lamd64_equal_true:\n\t"
1818 "mov $0x1,%rax\n\t"
1819 ".Lamd64_equal_end:\n\t"
1820 "lea 0x8(%rsp),%rsp");
1821}
1822
1823static void
1824amd64_emit_less_signed (void)
1825{
1826 EMIT_ASM (amd64_less_signed,
1827 "cmp %rax,(%rsp)\n\t"
1828 "jl .Lamd64_less_signed_true\n\t"
1829 "xor %rax,%rax\n\t"
1830 "jmp .Lamd64_less_signed_end\n\t"
1831 ".Lamd64_less_signed_true:\n\t"
1832 "mov $1,%rax\n\t"
1833 ".Lamd64_less_signed_end:\n\t"
1834 "lea 0x8(%rsp),%rsp");
1835}
1836
1837static void
1838amd64_emit_less_unsigned (void)
1839{
1840 EMIT_ASM (amd64_less_unsigned,
1841 "cmp %rax,(%rsp)\n\t"
1842 "jb .Lamd64_less_unsigned_true\n\t"
1843 "xor %rax,%rax\n\t"
1844 "jmp .Lamd64_less_unsigned_end\n\t"
1845 ".Lamd64_less_unsigned_true:\n\t"
1846 "mov $1,%rax\n\t"
1847 ".Lamd64_less_unsigned_end:\n\t"
1848 "lea 0x8(%rsp),%rsp");
1849}
1850
1851static void
1852amd64_emit_ref (int size)
1853{
1854 switch (size)
1855 {
1856 case 1:
1857 EMIT_ASM (amd64_ref1,
1858 "movb (%rax),%al");
1859 break;
1860 case 2:
1861 EMIT_ASM (amd64_ref2,
1862 "movw (%rax),%ax");
1863 break;
1864 case 4:
1865 EMIT_ASM (amd64_ref4,
1866 "movl (%rax),%eax");
1867 break;
1868 case 8:
1869 EMIT_ASM (amd64_ref8,
1870 "movq (%rax),%rax");
1871 break;
1872 }
1873}
1874
1875static void
1876amd64_emit_if_goto (int *offset_p, int *size_p)
1877{
1878 EMIT_ASM (amd64_if_goto,
1879 "mov %rax,%rcx\n\t"
1880 "pop %rax\n\t"
1881 "cmp $0,%rcx\n\t"
1882 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1883 if (offset_p)
1884 *offset_p = 10;
1885 if (size_p)
1886 *size_p = 4;
1887}
1888
1889static void
1890amd64_emit_goto (int *offset_p, int *size_p)
1891{
1892 EMIT_ASM (amd64_goto,
1893 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1894 if (offset_p)
1895 *offset_p = 1;
1896 if (size_p)
1897 *size_p = 4;
1898}
1899
1900static void
1901amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1902{
1903 int diff = (to - (from + size));
1904 unsigned char buf[sizeof (int)];
1905
1906 if (size != 4)
1907 {
1908 emit_error = 1;
1909 return;
1910 }
1911
1912 memcpy (buf, &diff, sizeof (int));
4196ab2a 1913 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
1914}
1915
1916static void
4e29fb54 1917amd64_emit_const (LONGEST num)
6a271cae
PA
1918{
1919 unsigned char buf[16];
1920 int i;
1921 CORE_ADDR buildaddr = current_insn_ptr;
1922
1923 i = 0;
1924 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 1925 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
1926 i += 8;
1927 append_insns (&buildaddr, i, buf);
1928 current_insn_ptr = buildaddr;
1929}
1930
1931static void
1932amd64_emit_call (CORE_ADDR fn)
1933{
1934 unsigned char buf[16];
1935 int i;
1936 CORE_ADDR buildaddr;
4e29fb54 1937 LONGEST offset64;
6a271cae
PA
1938
1939 /* The destination function being in the shared library, may be
1940 >31-bits away off the compiled code pad. */
1941
1942 buildaddr = current_insn_ptr;
1943
1944 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1945
1946 i = 0;
1947
1948 if (offset64 > INT_MAX || offset64 < INT_MIN)
1949 {
1950 /* Offset is too large for a call. Use callq, but that requires
1951 a register, so avoid it if possible. Use r10, since it is
1952 call-clobbered, we don't have to push/pop it. */
1953 buf[i++] = 0x48; /* mov $fn,%r10 */
1954 buf[i++] = 0xba;
1955 memcpy (buf + i, &fn, 8);
1956 i += 8;
1957 buf[i++] = 0xff; /* callq *%r10 */
1958 buf[i++] = 0xd2;
1959 }
1960 else
1961 {
1962 int offset32 = offset64; /* we know we can't overflow here. */
ed036b40
PA
1963
1964 buf[i++] = 0xe8; /* call <reladdr> */
6a271cae
PA
1965 memcpy (buf + i, &offset32, 4);
1966 i += 4;
1967 }
1968
1969 append_insns (&buildaddr, i, buf);
1970 current_insn_ptr = buildaddr;
1971}
1972
1973static void
1974amd64_emit_reg (int reg)
1975{
1976 unsigned char buf[16];
1977 int i;
1978 CORE_ADDR buildaddr;
1979
1980 /* Assume raw_regs is still in %rdi. */
1981 buildaddr = current_insn_ptr;
1982 i = 0;
1983 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 1984 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
1985 i += 4;
1986 append_insns (&buildaddr, i, buf);
1987 current_insn_ptr = buildaddr;
1988 amd64_emit_call (get_raw_reg_func_addr ());
1989}
1990
1991static void
1992amd64_emit_pop (void)
1993{
1994 EMIT_ASM (amd64_pop,
1995 "pop %rax");
1996}
1997
1998static void
1999amd64_emit_stack_flush (void)
2000{
2001 EMIT_ASM (amd64_stack_flush,
2002 "push %rax");
2003}
2004
2005static void
2006amd64_emit_zero_ext (int arg)
2007{
2008 switch (arg)
2009 {
2010 case 8:
2011 EMIT_ASM (amd64_zero_ext_8,
2012 "and $0xff,%rax");
2013 break;
2014 case 16:
2015 EMIT_ASM (amd64_zero_ext_16,
2016 "and $0xffff,%rax");
2017 break;
2018 case 32:
2019 EMIT_ASM (amd64_zero_ext_32,
2020 "mov $0xffffffff,%rcx\n\t"
2021 "and %rcx,%rax");
2022 break;
2023 default:
2024 emit_error = 1;
2025 }
2026}
2027
2028static void
2029amd64_emit_swap (void)
2030{
2031 EMIT_ASM (amd64_swap,
2032 "mov %rax,%rcx\n\t"
2033 "pop %rax\n\t"
2034 "push %rcx");
2035}
2036
2037static void
2038amd64_emit_stack_adjust (int n)
2039{
2040 unsigned char buf[16];
2041 int i;
2042 CORE_ADDR buildaddr = current_insn_ptr;
2043
2044 i = 0;
2045 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2046 buf[i++] = 0x8d;
2047 buf[i++] = 0x64;
2048 buf[i++] = 0x24;
2049 /* This only handles adjustments up to 16, but we don't expect any more. */
2050 buf[i++] = n * 8;
2051 append_insns (&buildaddr, i, buf);
2052 current_insn_ptr = buildaddr;
2053}
2054
2055/* FN's prototype is `LONGEST(*fn)(int)'. */
2056
2057static void
2058amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2059{
2060 unsigned char buf[16];
2061 int i;
2062 CORE_ADDR buildaddr;
2063
2064 buildaddr = current_insn_ptr;
2065 i = 0;
2066 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2067 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2068 i += 4;
2069 append_insns (&buildaddr, i, buf);
2070 current_insn_ptr = buildaddr;
2071 amd64_emit_call (fn);
2072}
2073
4e29fb54 2074/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2075
2076static void
2077amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2078{
2079 unsigned char buf[16];
2080 int i;
2081 CORE_ADDR buildaddr;
2082
2083 buildaddr = current_insn_ptr;
2084 i = 0;
2085 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2086 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2087 i += 4;
2088 append_insns (&buildaddr, i, buf);
2089 current_insn_ptr = buildaddr;
2090 EMIT_ASM (amd64_void_call_2_a,
2091 /* Save away a copy of the stack top. */
2092 "push %rax\n\t"
2093 /* Also pass top as the second argument. */
2094 "mov %rax,%rsi");
2095 amd64_emit_call (fn);
2096 EMIT_ASM (amd64_void_call_2_b,
2097 /* Restore the stack top, %rax may have been trashed. */
2098 "pop %rax");
2099}
2100
df4a0200 2101static void
6b9801d4
SS
2102amd64_emit_eq_goto (int *offset_p, int *size_p)
2103{
2104 EMIT_ASM (amd64_eq,
2105 "cmp %rax,(%rsp)\n\t"
2106 "jne .Lamd64_eq_fallthru\n\t"
2107 "lea 0x8(%rsp),%rsp\n\t"
2108 "pop %rax\n\t"
2109 /* jmp, but don't trust the assembler to choose the right jump */
2110 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2111 ".Lamd64_eq_fallthru:\n\t"
2112 "lea 0x8(%rsp),%rsp\n\t"
2113 "pop %rax");
2114
2115 if (offset_p)
2116 *offset_p = 13;
2117 if (size_p)
2118 *size_p = 4;
2119}
2120
df4a0200 2121static void
6b9801d4
SS
2122amd64_emit_ne_goto (int *offset_p, int *size_p)
2123{
2124 EMIT_ASM (amd64_ne,
2125 "cmp %rax,(%rsp)\n\t"
2126 "je .Lamd64_ne_fallthru\n\t"
2127 "lea 0x8(%rsp),%rsp\n\t"
2128 "pop %rax\n\t"
2129 /* jmp, but don't trust the assembler to choose the right jump */
2130 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2131 ".Lamd64_ne_fallthru:\n\t"
2132 "lea 0x8(%rsp),%rsp\n\t"
2133 "pop %rax");
2134
2135 if (offset_p)
2136 *offset_p = 13;
2137 if (size_p)
2138 *size_p = 4;
2139}
2140
df4a0200 2141static void
6b9801d4
SS
2142amd64_emit_lt_goto (int *offset_p, int *size_p)
2143{
2144 EMIT_ASM (amd64_lt,
2145 "cmp %rax,(%rsp)\n\t"
2146 "jnl .Lamd64_lt_fallthru\n\t"
2147 "lea 0x8(%rsp),%rsp\n\t"
2148 "pop %rax\n\t"
2149 /* jmp, but don't trust the assembler to choose the right jump */
2150 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2151 ".Lamd64_lt_fallthru:\n\t"
2152 "lea 0x8(%rsp),%rsp\n\t"
2153 "pop %rax");
2154
2155 if (offset_p)
2156 *offset_p = 13;
2157 if (size_p)
2158 *size_p = 4;
2159}
2160
df4a0200 2161static void
6b9801d4
SS
2162amd64_emit_le_goto (int *offset_p, int *size_p)
2163{
2164 EMIT_ASM (amd64_le,
2165 "cmp %rax,(%rsp)\n\t"
2166 "jnle .Lamd64_le_fallthru\n\t"
2167 "lea 0x8(%rsp),%rsp\n\t"
2168 "pop %rax\n\t"
2169 /* jmp, but don't trust the assembler to choose the right jump */
2170 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2171 ".Lamd64_le_fallthru:\n\t"
2172 "lea 0x8(%rsp),%rsp\n\t"
2173 "pop %rax");
2174
2175 if (offset_p)
2176 *offset_p = 13;
2177 if (size_p)
2178 *size_p = 4;
2179}
2180
df4a0200 2181static void
6b9801d4
SS
2182amd64_emit_gt_goto (int *offset_p, int *size_p)
2183{
2184 EMIT_ASM (amd64_gt,
2185 "cmp %rax,(%rsp)\n\t"
2186 "jng .Lamd64_gt_fallthru\n\t"
2187 "lea 0x8(%rsp),%rsp\n\t"
2188 "pop %rax\n\t"
2189 /* jmp, but don't trust the assembler to choose the right jump */
2190 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2191 ".Lamd64_gt_fallthru:\n\t"
2192 "lea 0x8(%rsp),%rsp\n\t"
2193 "pop %rax");
2194
2195 if (offset_p)
2196 *offset_p = 13;
2197 if (size_p)
2198 *size_p = 4;
2199}
2200
df4a0200 2201static void
6b9801d4
SS
2202amd64_emit_ge_goto (int *offset_p, int *size_p)
2203{
2204 EMIT_ASM (amd64_ge,
2205 "cmp %rax,(%rsp)\n\t"
2206 "jnge .Lamd64_ge_fallthru\n\t"
2207 ".Lamd64_ge_jump:\n\t"
2208 "lea 0x8(%rsp),%rsp\n\t"
2209 "pop %rax\n\t"
2210 /* jmp, but don't trust the assembler to choose the right jump */
2211 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2212 ".Lamd64_ge_fallthru:\n\t"
2213 "lea 0x8(%rsp),%rsp\n\t"
2214 "pop %rax");
2215
2216 if (offset_p)
2217 *offset_p = 13;
2218 if (size_p)
2219 *size_p = 4;
2220}
2221
6bd434d6 2222static emit_ops amd64_emit_ops =
6a271cae
PA
2223 {
2224 amd64_emit_prologue,
2225 amd64_emit_epilogue,
2226 amd64_emit_add,
2227 amd64_emit_sub,
2228 amd64_emit_mul,
2229 amd64_emit_lsh,
2230 amd64_emit_rsh_signed,
2231 amd64_emit_rsh_unsigned,
2232 amd64_emit_ext,
2233 amd64_emit_log_not,
2234 amd64_emit_bit_and,
2235 amd64_emit_bit_or,
2236 amd64_emit_bit_xor,
2237 amd64_emit_bit_not,
2238 amd64_emit_equal,
2239 amd64_emit_less_signed,
2240 amd64_emit_less_unsigned,
2241 amd64_emit_ref,
2242 amd64_emit_if_goto,
2243 amd64_emit_goto,
2244 amd64_write_goto_address,
2245 amd64_emit_const,
2246 amd64_emit_call,
2247 amd64_emit_reg,
2248 amd64_emit_pop,
2249 amd64_emit_stack_flush,
2250 amd64_emit_zero_ext,
2251 amd64_emit_swap,
2252 amd64_emit_stack_adjust,
2253 amd64_emit_int_call_1,
6b9801d4
SS
2254 amd64_emit_void_call_2,
2255 amd64_emit_eq_goto,
2256 amd64_emit_ne_goto,
2257 amd64_emit_lt_goto,
2258 amd64_emit_le_goto,
2259 amd64_emit_gt_goto,
2260 amd64_emit_ge_goto
6a271cae
PA
2261 };
2262
2263#endif /* __x86_64__ */
2264
2265static void
2266i386_emit_prologue (void)
2267{
2268 EMIT_ASM32 (i386_prologue,
2269 "push %ebp\n\t"
bf15cbda
SS
2270 "mov %esp,%ebp\n\t"
2271 "push %ebx");
6a271cae
PA
2272 /* At this point, the raw regs base address is at 8(%ebp), and the
2273 value pointer is at 12(%ebp). */
2274}
2275
2276static void
2277i386_emit_epilogue (void)
2278{
2279 EMIT_ASM32 (i386_epilogue,
2280 "mov 12(%ebp),%ecx\n\t"
2281 "mov %eax,(%ecx)\n\t"
2282 "mov %ebx,0x4(%ecx)\n\t"
2283 "xor %eax,%eax\n\t"
bf15cbda 2284 "pop %ebx\n\t"
6a271cae
PA
2285 "pop %ebp\n\t"
2286 "ret");
2287}
2288
2289static void
2290i386_emit_add (void)
2291{
2292 EMIT_ASM32 (i386_add,
2293 "add (%esp),%eax\n\t"
2294 "adc 0x4(%esp),%ebx\n\t"
2295 "lea 0x8(%esp),%esp");
2296}
2297
2298static void
2299i386_emit_sub (void)
2300{
2301 EMIT_ASM32 (i386_sub,
2302 "subl %eax,(%esp)\n\t"
2303 "sbbl %ebx,4(%esp)\n\t"
2304 "pop %eax\n\t"
2305 "pop %ebx\n\t");
2306}
2307
2308static void
2309i386_emit_mul (void)
2310{
2311 emit_error = 1;
2312}
2313
2314static void
2315i386_emit_lsh (void)
2316{
2317 emit_error = 1;
2318}
2319
2320static void
2321i386_emit_rsh_signed (void)
2322{
2323 emit_error = 1;
2324}
2325
2326static void
2327i386_emit_rsh_unsigned (void)
2328{
2329 emit_error = 1;
2330}
2331
2332static void
2333i386_emit_ext (int arg)
2334{
2335 switch (arg)
2336 {
2337 case 8:
2338 EMIT_ASM32 (i386_ext_8,
2339 "cbtw\n\t"
2340 "cwtl\n\t"
2341 "movl %eax,%ebx\n\t"
2342 "sarl $31,%ebx");
2343 break;
2344 case 16:
2345 EMIT_ASM32 (i386_ext_16,
2346 "cwtl\n\t"
2347 "movl %eax,%ebx\n\t"
2348 "sarl $31,%ebx");
2349 break;
2350 case 32:
2351 EMIT_ASM32 (i386_ext_32,
2352 "movl %eax,%ebx\n\t"
2353 "sarl $31,%ebx");
2354 break;
2355 default:
2356 emit_error = 1;
2357 }
2358}
2359
2360static void
2361i386_emit_log_not (void)
2362{
2363 EMIT_ASM32 (i386_log_not,
2364 "or %ebx,%eax\n\t"
2365 "test %eax,%eax\n\t"
2366 "sete %cl\n\t"
2367 "xor %ebx,%ebx\n\t"
2368 "movzbl %cl,%eax");
2369}
2370
2371static void
2372i386_emit_bit_and (void)
2373{
2374 EMIT_ASM32 (i386_and,
2375 "and (%esp),%eax\n\t"
2376 "and 0x4(%esp),%ebx\n\t"
2377 "lea 0x8(%esp),%esp");
2378}
2379
2380static void
2381i386_emit_bit_or (void)
2382{
2383 EMIT_ASM32 (i386_or,
2384 "or (%esp),%eax\n\t"
2385 "or 0x4(%esp),%ebx\n\t"
2386 "lea 0x8(%esp),%esp");
2387}
2388
2389static void
2390i386_emit_bit_xor (void)
2391{
2392 EMIT_ASM32 (i386_xor,
2393 "xor (%esp),%eax\n\t"
2394 "xor 0x4(%esp),%ebx\n\t"
2395 "lea 0x8(%esp),%esp");
2396}
2397
2398static void
2399i386_emit_bit_not (void)
2400{
2401 EMIT_ASM32 (i386_bit_not,
2402 "xor $0xffffffff,%eax\n\t"
2403 "xor $0xffffffff,%ebx\n\t");
2404}
2405
2406static void
2407i386_emit_equal (void)
2408{
2409 EMIT_ASM32 (i386_equal,
2410 "cmpl %ebx,4(%esp)\n\t"
2411 "jne .Li386_equal_false\n\t"
2412 "cmpl %eax,(%esp)\n\t"
2413 "je .Li386_equal_true\n\t"
2414 ".Li386_equal_false:\n\t"
2415 "xor %eax,%eax\n\t"
2416 "jmp .Li386_equal_end\n\t"
2417 ".Li386_equal_true:\n\t"
2418 "mov $1,%eax\n\t"
2419 ".Li386_equal_end:\n\t"
2420 "xor %ebx,%ebx\n\t"
2421 "lea 0x8(%esp),%esp");
2422}
2423
2424static void
2425i386_emit_less_signed (void)
2426{
2427 EMIT_ASM32 (i386_less_signed,
2428 "cmpl %ebx,4(%esp)\n\t"
2429 "jl .Li386_less_signed_true\n\t"
2430 "jne .Li386_less_signed_false\n\t"
2431 "cmpl %eax,(%esp)\n\t"
2432 "jl .Li386_less_signed_true\n\t"
2433 ".Li386_less_signed_false:\n\t"
2434 "xor %eax,%eax\n\t"
2435 "jmp .Li386_less_signed_end\n\t"
2436 ".Li386_less_signed_true:\n\t"
2437 "mov $1,%eax\n\t"
2438 ".Li386_less_signed_end:\n\t"
2439 "xor %ebx,%ebx\n\t"
2440 "lea 0x8(%esp),%esp");
2441}
2442
2443static void
2444i386_emit_less_unsigned (void)
2445{
2446 EMIT_ASM32 (i386_less_unsigned,
2447 "cmpl %ebx,4(%esp)\n\t"
2448 "jb .Li386_less_unsigned_true\n\t"
2449 "jne .Li386_less_unsigned_false\n\t"
2450 "cmpl %eax,(%esp)\n\t"
2451 "jb .Li386_less_unsigned_true\n\t"
2452 ".Li386_less_unsigned_false:\n\t"
2453 "xor %eax,%eax\n\t"
2454 "jmp .Li386_less_unsigned_end\n\t"
2455 ".Li386_less_unsigned_true:\n\t"
2456 "mov $1,%eax\n\t"
2457 ".Li386_less_unsigned_end:\n\t"
2458 "xor %ebx,%ebx\n\t"
2459 "lea 0x8(%esp),%esp");
2460}
2461
2462static void
2463i386_emit_ref (int size)
2464{
2465 switch (size)
2466 {
2467 case 1:
2468 EMIT_ASM32 (i386_ref1,
2469 "movb (%eax),%al");
2470 break;
2471 case 2:
2472 EMIT_ASM32 (i386_ref2,
2473 "movw (%eax),%ax");
2474 break;
2475 case 4:
2476 EMIT_ASM32 (i386_ref4,
2477 "movl (%eax),%eax");
2478 break;
2479 case 8:
2480 EMIT_ASM32 (i386_ref8,
2481 "movl 4(%eax),%ebx\n\t"
2482 "movl (%eax),%eax");
2483 break;
2484 }
2485}
2486
2487static void
2488i386_emit_if_goto (int *offset_p, int *size_p)
2489{
2490 EMIT_ASM32 (i386_if_goto,
2491 "mov %eax,%ecx\n\t"
2492 "or %ebx,%ecx\n\t"
2493 "pop %eax\n\t"
2494 "pop %ebx\n\t"
2495 "cmpl $0,%ecx\n\t"
2496 /* Don't trust the assembler to choose the right jump */
2497 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2498
2499 if (offset_p)
2500 *offset_p = 11; /* be sure that this matches the sequence above */
2501 if (size_p)
2502 *size_p = 4;
2503}
2504
2505static void
2506i386_emit_goto (int *offset_p, int *size_p)
2507{
2508 EMIT_ASM32 (i386_goto,
2509 /* Don't trust the assembler to choose the right jump */
2510 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2511 if (offset_p)
2512 *offset_p = 1;
2513 if (size_p)
2514 *size_p = 4;
2515}
2516
2517static void
2518i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2519{
2520 int diff = (to - (from + size));
2521 unsigned char buf[sizeof (int)];
2522
2523 /* We're only doing 4-byte sizes at the moment. */
2524 if (size != 4)
2525 {
2526 emit_error = 1;
2527 return;
2528 }
2529
2530 memcpy (buf, &diff, sizeof (int));
4196ab2a 2531 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
2532}
2533
2534static void
4e29fb54 2535i386_emit_const (LONGEST num)
6a271cae
PA
2536{
2537 unsigned char buf[16];
b00ad6ff 2538 int i, hi, lo;
6a271cae
PA
2539 CORE_ADDR buildaddr = current_insn_ptr;
2540
2541 i = 0;
2542 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2543 lo = num & 0xffffffff;
2544 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2545 i += 4;
2546 hi = ((num >> 32) & 0xffffffff);
2547 if (hi)
2548 {
2549 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2550 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2551 i += 4;
2552 }
2553 else
2554 {
2555 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2556 }
2557 append_insns (&buildaddr, i, buf);
2558 current_insn_ptr = buildaddr;
2559}
2560
2561static void
2562i386_emit_call (CORE_ADDR fn)
2563{
2564 unsigned char buf[16];
2565 int i, offset;
2566 CORE_ADDR buildaddr;
2567
2568 buildaddr = current_insn_ptr;
2569 i = 0;
2570 buf[i++] = 0xe8; /* call <reladdr> */
2571 offset = ((int) fn) - (buildaddr + 5);
2572 memcpy (buf + 1, &offset, 4);
2573 append_insns (&buildaddr, 5, buf);
2574 current_insn_ptr = buildaddr;
2575}
2576
2577static void
2578i386_emit_reg (int reg)
2579{
2580 unsigned char buf[16];
2581 int i;
2582 CORE_ADDR buildaddr;
2583
2584 EMIT_ASM32 (i386_reg_a,
2585 "sub $0x8,%esp");
2586 buildaddr = current_insn_ptr;
2587 i = 0;
2588 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2589 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2590 i += 4;
2591 append_insns (&buildaddr, i, buf);
2592 current_insn_ptr = buildaddr;
2593 EMIT_ASM32 (i386_reg_b,
2594 "mov %eax,4(%esp)\n\t"
2595 "mov 8(%ebp),%eax\n\t"
2596 "mov %eax,(%esp)");
2597 i386_emit_call (get_raw_reg_func_addr ());
2598 EMIT_ASM32 (i386_reg_c,
2599 "xor %ebx,%ebx\n\t"
2600 "lea 0x8(%esp),%esp");
2601}
2602
2603static void
2604i386_emit_pop (void)
2605{
2606 EMIT_ASM32 (i386_pop,
2607 "pop %eax\n\t"
2608 "pop %ebx");
2609}
2610
2611static void
2612i386_emit_stack_flush (void)
2613{
2614 EMIT_ASM32 (i386_stack_flush,
2615 "push %ebx\n\t"
2616 "push %eax");
2617}
2618
2619static void
2620i386_emit_zero_ext (int arg)
2621{
2622 switch (arg)
2623 {
2624 case 8:
2625 EMIT_ASM32 (i386_zero_ext_8,
2626 "and $0xff,%eax\n\t"
2627 "xor %ebx,%ebx");
2628 break;
2629 case 16:
2630 EMIT_ASM32 (i386_zero_ext_16,
2631 "and $0xffff,%eax\n\t"
2632 "xor %ebx,%ebx");
2633 break;
2634 case 32:
2635 EMIT_ASM32 (i386_zero_ext_32,
2636 "xor %ebx,%ebx");
2637 break;
2638 default:
2639 emit_error = 1;
2640 }
2641}
2642
2643static void
2644i386_emit_swap (void)
2645{
2646 EMIT_ASM32 (i386_swap,
2647 "mov %eax,%ecx\n\t"
2648 "mov %ebx,%edx\n\t"
2649 "pop %eax\n\t"
2650 "pop %ebx\n\t"
2651 "push %edx\n\t"
2652 "push %ecx");
2653}
2654
2655static void
2656i386_emit_stack_adjust (int n)
2657{
2658 unsigned char buf[16];
2659 int i;
2660 CORE_ADDR buildaddr = current_insn_ptr;
2661
2662 i = 0;
2663 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2664 buf[i++] = 0x64;
2665 buf[i++] = 0x24;
2666 buf[i++] = n * 8;
2667 append_insns (&buildaddr, i, buf);
2668 current_insn_ptr = buildaddr;
2669}
2670
2671/* FN's prototype is `LONGEST(*fn)(int)'. */
2672
2673static void
2674i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2675{
2676 unsigned char buf[16];
2677 int i;
2678 CORE_ADDR buildaddr;
2679
2680 EMIT_ASM32 (i386_int_call_1_a,
2681 /* Reserve a bit of stack space. */
2682 "sub $0x8,%esp");
2683 /* Put the one argument on the stack. */
2684 buildaddr = current_insn_ptr;
2685 i = 0;
2686 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2687 buf[i++] = 0x04;
2688 buf[i++] = 0x24;
b00ad6ff 2689 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2690 i += 4;
2691 append_insns (&buildaddr, i, buf);
2692 current_insn_ptr = buildaddr;
2693 i386_emit_call (fn);
2694 EMIT_ASM32 (i386_int_call_1_c,
2695 "mov %edx,%ebx\n\t"
2696 "lea 0x8(%esp),%esp");
2697}
2698
4e29fb54 2699/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2700
2701static void
2702i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2703{
2704 unsigned char buf[16];
2705 int i;
2706 CORE_ADDR buildaddr;
2707
2708 EMIT_ASM32 (i386_void_call_2_a,
2709 /* Preserve %eax only; we don't have to worry about %ebx. */
2710 "push %eax\n\t"
2711 /* Reserve a bit of stack space for arguments. */
2712 "sub $0x10,%esp\n\t"
2713 /* Copy "top" to the second argument position. (Note that
2714 we can't assume function won't scribble on its
2715 arguments, so don't try to restore from this.) */
2716 "mov %eax,4(%esp)\n\t"
2717 "mov %ebx,8(%esp)");
2718 /* Put the first argument on the stack. */
2719 buildaddr = current_insn_ptr;
2720 i = 0;
2721 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2722 buf[i++] = 0x04;
2723 buf[i++] = 0x24;
b00ad6ff 2724 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2725 i += 4;
2726 append_insns (&buildaddr, i, buf);
2727 current_insn_ptr = buildaddr;
2728 i386_emit_call (fn);
2729 EMIT_ASM32 (i386_void_call_2_b,
2730 "lea 0x10(%esp),%esp\n\t"
2731 /* Restore original stack top. */
2732 "pop %eax");
2733}
2734
6b9801d4 2735
df4a0200 2736static void
6b9801d4
SS
2737i386_emit_eq_goto (int *offset_p, int *size_p)
2738{
2739 EMIT_ASM32 (eq,
2740 /* Check low half first, more likely to be decider */
2741 "cmpl %eax,(%esp)\n\t"
2742 "jne .Leq_fallthru\n\t"
2743 "cmpl %ebx,4(%esp)\n\t"
2744 "jne .Leq_fallthru\n\t"
2745 "lea 0x8(%esp),%esp\n\t"
2746 "pop %eax\n\t"
2747 "pop %ebx\n\t"
2748 /* jmp, but don't trust the assembler to choose the right jump */
2749 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2750 ".Leq_fallthru:\n\t"
2751 "lea 0x8(%esp),%esp\n\t"
2752 "pop %eax\n\t"
2753 "pop %ebx");
2754
2755 if (offset_p)
2756 *offset_p = 18;
2757 if (size_p)
2758 *size_p = 4;
2759}
2760
df4a0200 2761static void
6b9801d4
SS
2762i386_emit_ne_goto (int *offset_p, int *size_p)
2763{
2764 EMIT_ASM32 (ne,
2765 /* Check low half first, more likely to be decider */
2766 "cmpl %eax,(%esp)\n\t"
2767 "jne .Lne_jump\n\t"
2768 "cmpl %ebx,4(%esp)\n\t"
2769 "je .Lne_fallthru\n\t"
2770 ".Lne_jump:\n\t"
2771 "lea 0x8(%esp),%esp\n\t"
2772 "pop %eax\n\t"
2773 "pop %ebx\n\t"
2774 /* jmp, but don't trust the assembler to choose the right jump */
2775 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2776 ".Lne_fallthru:\n\t"
2777 "lea 0x8(%esp),%esp\n\t"
2778 "pop %eax\n\t"
2779 "pop %ebx");
2780
2781 if (offset_p)
2782 *offset_p = 18;
2783 if (size_p)
2784 *size_p = 4;
2785}
2786
df4a0200 2787static void
6b9801d4
SS
2788i386_emit_lt_goto (int *offset_p, int *size_p)
2789{
2790 EMIT_ASM32 (lt,
2791 "cmpl %ebx,4(%esp)\n\t"
2792 "jl .Llt_jump\n\t"
2793 "jne .Llt_fallthru\n\t"
2794 "cmpl %eax,(%esp)\n\t"
2795 "jnl .Llt_fallthru\n\t"
2796 ".Llt_jump:\n\t"
2797 "lea 0x8(%esp),%esp\n\t"
2798 "pop %eax\n\t"
2799 "pop %ebx\n\t"
2800 /* jmp, but don't trust the assembler to choose the right jump */
2801 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2802 ".Llt_fallthru:\n\t"
2803 "lea 0x8(%esp),%esp\n\t"
2804 "pop %eax\n\t"
2805 "pop %ebx");
2806
2807 if (offset_p)
2808 *offset_p = 20;
2809 if (size_p)
2810 *size_p = 4;
2811}
2812
df4a0200 2813static void
6b9801d4
SS
2814i386_emit_le_goto (int *offset_p, int *size_p)
2815{
2816 EMIT_ASM32 (le,
2817 "cmpl %ebx,4(%esp)\n\t"
2818 "jle .Lle_jump\n\t"
2819 "jne .Lle_fallthru\n\t"
2820 "cmpl %eax,(%esp)\n\t"
2821 "jnle .Lle_fallthru\n\t"
2822 ".Lle_jump:\n\t"
2823 "lea 0x8(%esp),%esp\n\t"
2824 "pop %eax\n\t"
2825 "pop %ebx\n\t"
2826 /* jmp, but don't trust the assembler to choose the right jump */
2827 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2828 ".Lle_fallthru:\n\t"
2829 "lea 0x8(%esp),%esp\n\t"
2830 "pop %eax\n\t"
2831 "pop %ebx");
2832
2833 if (offset_p)
2834 *offset_p = 20;
2835 if (size_p)
2836 *size_p = 4;
2837}
2838
df4a0200 2839static void
6b9801d4
SS
2840i386_emit_gt_goto (int *offset_p, int *size_p)
2841{
2842 EMIT_ASM32 (gt,
2843 "cmpl %ebx,4(%esp)\n\t"
2844 "jg .Lgt_jump\n\t"
2845 "jne .Lgt_fallthru\n\t"
2846 "cmpl %eax,(%esp)\n\t"
2847 "jng .Lgt_fallthru\n\t"
2848 ".Lgt_jump:\n\t"
2849 "lea 0x8(%esp),%esp\n\t"
2850 "pop %eax\n\t"
2851 "pop %ebx\n\t"
2852 /* jmp, but don't trust the assembler to choose the right jump */
2853 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2854 ".Lgt_fallthru:\n\t"
2855 "lea 0x8(%esp),%esp\n\t"
2856 "pop %eax\n\t"
2857 "pop %ebx");
2858
2859 if (offset_p)
2860 *offset_p = 20;
2861 if (size_p)
2862 *size_p = 4;
2863}
2864
df4a0200 2865static void
6b9801d4
SS
2866i386_emit_ge_goto (int *offset_p, int *size_p)
2867{
2868 EMIT_ASM32 (ge,
2869 "cmpl %ebx,4(%esp)\n\t"
2870 "jge .Lge_jump\n\t"
2871 "jne .Lge_fallthru\n\t"
2872 "cmpl %eax,(%esp)\n\t"
2873 "jnge .Lge_fallthru\n\t"
2874 ".Lge_jump:\n\t"
2875 "lea 0x8(%esp),%esp\n\t"
2876 "pop %eax\n\t"
2877 "pop %ebx\n\t"
2878 /* jmp, but don't trust the assembler to choose the right jump */
2879 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2880 ".Lge_fallthru:\n\t"
2881 "lea 0x8(%esp),%esp\n\t"
2882 "pop %eax\n\t"
2883 "pop %ebx");
2884
2885 if (offset_p)
2886 *offset_p = 20;
2887 if (size_p)
2888 *size_p = 4;
2889}
2890
6bd434d6 2891static emit_ops i386_emit_ops =
6a271cae
PA
2892 {
2893 i386_emit_prologue,
2894 i386_emit_epilogue,
2895 i386_emit_add,
2896 i386_emit_sub,
2897 i386_emit_mul,
2898 i386_emit_lsh,
2899 i386_emit_rsh_signed,
2900 i386_emit_rsh_unsigned,
2901 i386_emit_ext,
2902 i386_emit_log_not,
2903 i386_emit_bit_and,
2904 i386_emit_bit_or,
2905 i386_emit_bit_xor,
2906 i386_emit_bit_not,
2907 i386_emit_equal,
2908 i386_emit_less_signed,
2909 i386_emit_less_unsigned,
2910 i386_emit_ref,
2911 i386_emit_if_goto,
2912 i386_emit_goto,
2913 i386_write_goto_address,
2914 i386_emit_const,
2915 i386_emit_call,
2916 i386_emit_reg,
2917 i386_emit_pop,
2918 i386_emit_stack_flush,
2919 i386_emit_zero_ext,
2920 i386_emit_swap,
2921 i386_emit_stack_adjust,
2922 i386_emit_int_call_1,
6b9801d4
SS
2923 i386_emit_void_call_2,
2924 i386_emit_eq_goto,
2925 i386_emit_ne_goto,
2926 i386_emit_lt_goto,
2927 i386_emit_le_goto,
2928 i386_emit_gt_goto,
2929 i386_emit_ge_goto
6a271cae
PA
2930 };
2931
2932
ab64c999
TBA
2933emit_ops *
2934x86_target::emit_ops ()
6a271cae
PA
2935{
2936#ifdef __x86_64__
3aee8918 2937 if (is_64bit_tdesc ())
6a271cae
PA
2938 return &amd64_emit_ops;
2939 else
2940#endif
2941 return &i386_emit_ops;
2942}
2943
3ca4edb6 2944/* Implementation of target ops method "sw_breakpoint_from_kind". */
dd373349 2945
3ca4edb6
TBA
2946const gdb_byte *
2947x86_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349
AT
2948{
2949 *size = x86_breakpoint_len;
2950 return x86_breakpoint;
2951}
2952
9cfd8715
TBA
2953bool
2954x86_target::low_supports_range_stepping ()
c2d6af84 2955{
9cfd8715 2956 return true;
c2d6af84
PA
2957}
2958
fc5ecdb6
TBA
2959int
2960x86_target::get_ipa_tdesc_idx ()
ae91f625
MK
2961{
2962 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2963 const struct target_desc *tdesc = regcache->tdesc;
2964
2965#ifdef __x86_64__
b4570e4b 2966 return amd64_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2967#endif
2968
51a948fd 2969 if (tdesc == tdesc_i386_linux_no_xml.get ())
ae91f625 2970 return X86_TDESC_SSE;
ae91f625 2971
f49ff000 2972 return i386_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2973}
2974
ef0478f6
TBA
2975/* The linux target ops object. */
2976
2977linux_process_target *the_linux_target = &the_x86_target;
2978
3aee8918
PA
2979void
2980initialize_low_arch (void)
2981{
2982 /* Initialize the Linux target descriptions. */
2983#ifdef __x86_64__
cc397f3a 2984 tdesc_amd64_linux_no_xml = allocate_target_description ();
51a948fd 2985 copy_target_description (tdesc_amd64_linux_no_xml.get (),
b4570e4b
YQ
2986 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2987 false));
3aee8918
PA
2988 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2989#endif
f49ff000 2990
cc397f3a 2991 tdesc_i386_linux_no_xml = allocate_target_description ();
51a948fd 2992 copy_target_description (tdesc_i386_linux_no_xml.get (),
f49ff000 2993 i386_linux_read_description (X86_XSTATE_SSE_MASK));
3aee8918
PA
2994 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2995
2996 initialize_regsets_info (&x86_regsets_info);
2997}
This page took 1.101674 seconds and 4 git commands to generate.