Revert "Add a more helpful warning message to explain why some AArch64 relocations...
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
618f726f 3 Copyright (C) 2002-2016 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
d41f6d8e 20#include "server.h"
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "linux-low.h"
25#include "i387-fp.h"
df7e5265
GB
26#include "x86-low.h"
27#include "x86-xstate.h"
5826e159 28#include "nat/gdb_ptrace.h"
d0722149 29
93813b37
WT
30#ifdef __x86_64__
31#include "nat/amd64-linux-siginfo.h"
32#endif
33
d0722149 34#include "gdb_proc_service.h"
b5737fa9
PA
35/* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37#ifndef ELFMAG0
38#include "elf/common.h"
39#endif
40
58b4daa5 41#include "agent.h"
3aee8918 42#include "tdesc.h"
c144c7a0 43#include "tracepoint.h"
f699aaba 44#include "ax.h"
7b669087 45#include "nat/linux-nat.h"
4b134ca1 46#include "nat/x86-linux.h"
8e5d4070 47#include "nat/x86-linux-dregs.h"
d0722149 48
3aee8918 49#ifdef __x86_64__
90884b2b
L
50/* Defined in auto-generated file amd64-linux.c. */
51void init_registers_amd64_linux (void);
3aee8918
PA
52extern const struct target_desc *tdesc_amd64_linux;
53
1570b33e
L
54/* Defined in auto-generated file amd64-avx-linux.c. */
55void init_registers_amd64_avx_linux (void);
3aee8918
PA
56extern const struct target_desc *tdesc_amd64_avx_linux;
57
01f9f808
MS
58/* Defined in auto-generated file amd64-avx512-linux.c. */
59void init_registers_amd64_avx512_linux (void);
60extern const struct target_desc *tdesc_amd64_avx512_linux;
61
a196ebeb
WT
62/* Defined in auto-generated file amd64-mpx-linux.c. */
63void init_registers_amd64_mpx_linux (void);
64extern const struct target_desc *tdesc_amd64_mpx_linux;
65
4d47af5c
L
66/* Defined in auto-generated file x32-linux.c. */
67void init_registers_x32_linux (void);
3aee8918
PA
68extern const struct target_desc *tdesc_x32_linux;
69
4d47af5c
L
70/* Defined in auto-generated file x32-avx-linux.c. */
71void init_registers_x32_avx_linux (void);
3aee8918 72extern const struct target_desc *tdesc_x32_avx_linux;
a196ebeb 73
01f9f808
MS
74/* Defined in auto-generated file x32-avx512-linux.c. */
75void init_registers_x32_avx512_linux (void);
76extern const struct target_desc *tdesc_x32_avx512_linux;
77
3aee8918
PA
78#endif
79
80/* Defined in auto-generated file i386-linux.c. */
81void init_registers_i386_linux (void);
82extern const struct target_desc *tdesc_i386_linux;
83
84/* Defined in auto-generated file i386-mmx-linux.c. */
85void init_registers_i386_mmx_linux (void);
86extern const struct target_desc *tdesc_i386_mmx_linux;
87
88/* Defined in auto-generated file i386-avx-linux.c. */
89void init_registers_i386_avx_linux (void);
90extern const struct target_desc *tdesc_i386_avx_linux;
91
01f9f808
MS
92/* Defined in auto-generated file i386-avx512-linux.c. */
93void init_registers_i386_avx512_linux (void);
94extern const struct target_desc *tdesc_i386_avx512_linux;
95
a196ebeb
WT
96/* Defined in auto-generated file i386-mpx-linux.c. */
97void init_registers_i386_mpx_linux (void);
98extern const struct target_desc *tdesc_i386_mpx_linux;
99
3aee8918
PA
100#ifdef __x86_64__
101static struct target_desc *tdesc_amd64_linux_no_xml;
102#endif
103static struct target_desc *tdesc_i386_linux_no_xml;
104
1570b33e 105
fa593d66 106static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 107static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 108
1570b33e
L
109/* Backward compatibility for gdb without XML support. */
110
111static const char *xmltarget_i386_linux_no_xml = "@<target>\
112<architecture>i386</architecture>\
113<osabi>GNU/Linux</osabi>\
114</target>";
f6d1620c
L
115
116#ifdef __x86_64__
1570b33e
L
117static const char *xmltarget_amd64_linux_no_xml = "@<target>\
118<architecture>i386:x86-64</architecture>\
119<osabi>GNU/Linux</osabi>\
120</target>";
f6d1620c 121#endif
d0722149
DE
122
123#include <sys/reg.h>
124#include <sys/procfs.h>
5826e159 125#include "nat/gdb_ptrace.h"
1570b33e
L
126#include <sys/uio.h>
127
d0722149
DE
128#ifndef PTRACE_GET_THREAD_AREA
129#define PTRACE_GET_THREAD_AREA 25
130#endif
131
132/* This definition comes from prctl.h, but some kernels may not have it. */
133#ifndef PTRACE_ARCH_PRCTL
134#define PTRACE_ARCH_PRCTL 30
135#endif
136
137/* The following definitions come from prctl.h, but may be absent
138 for certain configurations. */
139#ifndef ARCH_GET_FS
140#define ARCH_SET_GS 0x1001
141#define ARCH_SET_FS 0x1002
142#define ARCH_GET_FS 0x1003
143#define ARCH_GET_GS 0x1004
144#endif
145
aa5ca48f
DE
146/* Per-process arch-specific data we want to keep. */
147
148struct arch_process_info
149{
df7e5265 150 struct x86_debug_reg_state debug_reg_state;
aa5ca48f
DE
151};
152
d0722149
DE
153#ifdef __x86_64__
154
155/* Mapping between the general-purpose registers in `struct user'
156 format and GDB's register array layout.
157 Note that the transfer layout uses 64-bit regs. */
158static /*const*/ int i386_regmap[] =
159{
160 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
161 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
162 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
163 DS * 8, ES * 8, FS * 8, GS * 8
164};
165
166#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
167
168/* So code below doesn't have to care, i386 or amd64. */
169#define ORIG_EAX ORIG_RAX
bc9540e8 170#define REGSIZE 8
d0722149
DE
171
172static const int x86_64_regmap[] =
173{
174 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
175 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
176 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
177 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
178 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
179 DS * 8, ES * 8, FS * 8, GS * 8,
180 -1, -1, -1, -1, -1, -1, -1, -1,
181 -1, -1, -1, -1, -1, -1, -1, -1,
182 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
183 -1,
184 -1, -1, -1, -1, -1, -1, -1, -1,
185 ORIG_RAX * 8,
186 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
01f9f808
MS
187 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
188 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
189 -1, -1, -1, -1, -1, -1, -1, -1,
190 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
191 -1, -1, -1, -1, -1, -1, -1, -1,
192 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
193 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
194 -1, -1, -1, -1, -1, -1, -1, -1,
195 -1, -1, -1, -1, -1, -1, -1, -1,
196 -1, -1, -1, -1, -1, -1, -1, -1
d0722149
DE
197};
198
199#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
9e0aa64f 200#define X86_64_USER_REGS (GS + 1)
d0722149
DE
201
202#else /* ! __x86_64__ */
203
204/* Mapping between the general-purpose registers in `struct user'
205 format and GDB's register array layout. */
206static /*const*/ int i386_regmap[] =
207{
208 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
209 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
210 EIP * 4, EFL * 4, CS * 4, SS * 4,
211 DS * 4, ES * 4, FS * 4, GS * 4
212};
213
214#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
215
bc9540e8
PA
216#define REGSIZE 4
217
d0722149 218#endif
3aee8918
PA
219
220#ifdef __x86_64__
221
222/* Returns true if the current inferior belongs to a x86-64 process,
223 per the tdesc. */
224
225static int
226is_64bit_tdesc (void)
227{
0bfdf32f 228 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3aee8918
PA
229
230 return register_size (regcache->tdesc, 0) == 8;
231}
232
233#endif
234
d0722149
DE
235\f
236/* Called by libthread_db. */
237
238ps_err_e
239ps_get_thread_area (const struct ps_prochandle *ph,
240 lwpid_t lwpid, int idx, void **base)
241{
242#ifdef __x86_64__
3aee8918 243 int use_64bit = is_64bit_tdesc ();
d0722149
DE
244
245 if (use_64bit)
246 {
247 switch (idx)
248 {
249 case FS:
250 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
251 return PS_OK;
252 break;
253 case GS:
254 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
255 return PS_OK;
256 break;
257 default:
258 return PS_BADADDR;
259 }
260 return PS_ERR;
261 }
262#endif
263
264 {
265 unsigned int desc[4];
266
267 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
268 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
269 return PS_ERR;
270
d1ec4ce7
DE
271 /* Ensure we properly extend the value to 64-bits for x86_64. */
272 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
273 return PS_OK;
274 }
275}
fa593d66
PA
276
277/* Get the thread area address. This is used to recognize which
278 thread is which when tracing with the in-process agent library. We
279 don't read anything from the address, and treat it as opaque; it's
280 the address itself that we assume is unique per-thread. */
281
282static int
283x86_get_thread_area (int lwpid, CORE_ADDR *addr)
284{
285#ifdef __x86_64__
3aee8918 286 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
287
288 if (use_64bit)
289 {
290 void *base;
291 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
292 {
293 *addr = (CORE_ADDR) (uintptr_t) base;
294 return 0;
295 }
296
297 return -1;
298 }
299#endif
300
301 {
302 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
d86d4aaf
DE
303 struct thread_info *thr = get_lwp_thread (lwp);
304 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
305 unsigned int desc[4];
306 ULONGEST gs = 0;
307 const int reg_thread_area = 3; /* bits to scale down register value. */
308 int idx;
309
310 collect_register_by_name (regcache, "gs", &gs);
311
312 idx = gs >> reg_thread_area;
313
314 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 315 lwpid_of (thr),
493e2a69 316 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
317 return -1;
318
319 *addr = desc[1];
320 return 0;
321 }
322}
323
324
d0722149
DE
325\f
326static int
3aee8918 327x86_cannot_store_register (int regno)
d0722149 328{
3aee8918
PA
329#ifdef __x86_64__
330 if (is_64bit_tdesc ())
331 return 0;
332#endif
333
d0722149
DE
334 return regno >= I386_NUM_REGS;
335}
336
337static int
3aee8918 338x86_cannot_fetch_register (int regno)
d0722149 339{
3aee8918
PA
340#ifdef __x86_64__
341 if (is_64bit_tdesc ())
342 return 0;
343#endif
344
d0722149
DE
345 return regno >= I386_NUM_REGS;
346}
347
348static void
442ea881 349x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
350{
351 int i;
352
353#ifdef __x86_64__
3aee8918 354 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
355 {
356 for (i = 0; i < X86_64_NUM_REGS; i++)
357 if (x86_64_regmap[i] != -1)
442ea881 358 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
359 return;
360 }
9e0aa64f
JK
361
362 /* 32-bit inferior registers need to be zero-extended.
363 Callers would read uninitialized memory otherwise. */
364 memset (buf, 0x00, X86_64_USER_REGS * 8);
d0722149
DE
365#endif
366
367 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 368 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 369
442ea881 370 collect_register_by_name (regcache, "orig_eax",
bc9540e8 371 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
372}
373
374static void
442ea881 375x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
376{
377 int i;
378
379#ifdef __x86_64__
3aee8918 380 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
381 {
382 for (i = 0; i < X86_64_NUM_REGS; i++)
383 if (x86_64_regmap[i] != -1)
442ea881 384 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
385 return;
386 }
387#endif
388
389 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 390 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 391
442ea881 392 supply_register_by_name (regcache, "orig_eax",
bc9540e8 393 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
394}
395
396static void
442ea881 397x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
398{
399#ifdef __x86_64__
442ea881 400 i387_cache_to_fxsave (regcache, buf);
d0722149 401#else
442ea881 402 i387_cache_to_fsave (regcache, buf);
d0722149
DE
403#endif
404}
405
406static void
442ea881 407x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
408{
409#ifdef __x86_64__
442ea881 410 i387_fxsave_to_cache (regcache, buf);
d0722149 411#else
442ea881 412 i387_fsave_to_cache (regcache, buf);
d0722149
DE
413#endif
414}
415
416#ifndef __x86_64__
417
418static void
442ea881 419x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 420{
442ea881 421 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
422}
423
424static void
442ea881 425x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 426{
442ea881 427 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
428}
429
430#endif
431
1570b33e
L
432static void
433x86_fill_xstateregset (struct regcache *regcache, void *buf)
434{
435 i387_cache_to_xsave (regcache, buf);
436}
437
438static void
439x86_store_xstateregset (struct regcache *regcache, const void *buf)
440{
441 i387_xsave_to_cache (regcache, buf);
442}
443
d0722149
DE
444/* ??? The non-biarch i386 case stores all the i387 regs twice.
445 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
446 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
447 doesn't work. IWBN to avoid the duplication in the case where it
448 does work. Maybe the arch_setup routine could check whether it works
3aee8918 449 and update the supported regsets accordingly. */
d0722149 450
3aee8918 451static struct regset_info x86_regsets[] =
d0722149
DE
452{
453#ifdef HAVE_PTRACE_GETREGS
1570b33e 454 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
455 GENERAL_REGS,
456 x86_fill_gregset, x86_store_gregset },
1570b33e
L
457 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
458 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
459# ifndef __x86_64__
460# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 461 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
462 EXTENDED_REGS,
463 x86_fill_fpxregset, x86_store_fpxregset },
464# endif
465# endif
1570b33e 466 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
467 FP_REGS,
468 x86_fill_fpregset, x86_store_fpregset },
469#endif /* HAVE_PTRACE_GETREGS */
50bc912a 470 NULL_REGSET
d0722149
DE
471};
472
473static CORE_ADDR
442ea881 474x86_get_pc (struct regcache *regcache)
d0722149 475{
3aee8918 476 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
477
478 if (use_64bit)
479 {
480 unsigned long pc;
442ea881 481 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
482 return (CORE_ADDR) pc;
483 }
484 else
485 {
486 unsigned int pc;
442ea881 487 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
488 return (CORE_ADDR) pc;
489 }
490}
491
492static void
442ea881 493x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
d0722149 494{
3aee8918 495 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
496
497 if (use_64bit)
498 {
499 unsigned long newpc = pc;
442ea881 500 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
501 }
502 else
503 {
504 unsigned int newpc = pc;
442ea881 505 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
506 }
507}
508\f
dd373349 509static const gdb_byte x86_breakpoint[] = { 0xCC };
d0722149
DE
510#define x86_breakpoint_len 1
511
512static int
513x86_breakpoint_at (CORE_ADDR pc)
514{
515 unsigned char c;
516
fc7238bb 517 (*the_target->read_memory) (pc, &c, 1);
d0722149
DE
518 if (c == 0xCC)
519 return 1;
520
521 return 0;
522}
523\f
42995dbd 524/* Low-level function vector. */
df7e5265 525struct x86_dr_low_type x86_dr_low =
42995dbd 526 {
d33472ad
GB
527 x86_linux_dr_set_control,
528 x86_linux_dr_set_addr,
529 x86_linux_dr_get_addr,
530 x86_linux_dr_get_status,
531 x86_linux_dr_get_control,
42995dbd
GB
532 sizeof (void *),
533 };
aa5ca48f 534\f
90d74c30 535/* Breakpoint/Watchpoint support. */
aa5ca48f
DE
536
537static int
802e8e6d
PA
538x86_supports_z_point_type (char z_type)
539{
540 switch (z_type)
541 {
542 case Z_PACKET_SW_BP:
543 case Z_PACKET_HW_BP:
544 case Z_PACKET_WRITE_WP:
545 case Z_PACKET_ACCESS_WP:
546 return 1;
547 default:
548 return 0;
549 }
550}
551
552static int
553x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
554 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
555{
556 struct process_info *proc = current_process ();
802e8e6d 557
aa5ca48f
DE
558 switch (type)
559 {
802e8e6d
PA
560 case raw_bkpt_type_hw:
561 case raw_bkpt_type_write_wp:
562 case raw_bkpt_type_access_wp:
a4165e94 563 {
802e8e6d
PA
564 enum target_hw_bp_type hw_type
565 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 566 struct x86_debug_reg_state *state
fe978cb0 567 = &proc->priv->arch_private->debug_reg_state;
a4165e94 568
df7e5265 569 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
a4165e94 570 }
961bd387 571
aa5ca48f
DE
572 default:
573 /* Unsupported. */
574 return 1;
575 }
576}
577
578static int
802e8e6d
PA
579x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
580 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
581{
582 struct process_info *proc = current_process ();
802e8e6d 583
aa5ca48f
DE
584 switch (type)
585 {
802e8e6d
PA
586 case raw_bkpt_type_hw:
587 case raw_bkpt_type_write_wp:
588 case raw_bkpt_type_access_wp:
a4165e94 589 {
802e8e6d
PA
590 enum target_hw_bp_type hw_type
591 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 592 struct x86_debug_reg_state *state
fe978cb0 593 = &proc->priv->arch_private->debug_reg_state;
a4165e94 594
df7e5265 595 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
a4165e94 596 }
aa5ca48f
DE
597 default:
598 /* Unsupported. */
599 return 1;
600 }
601}
602
603static int
604x86_stopped_by_watchpoint (void)
605{
606 struct process_info *proc = current_process ();
fe978cb0 607 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
aa5ca48f
DE
608}
609
610static CORE_ADDR
611x86_stopped_data_address (void)
612{
613 struct process_info *proc = current_process ();
614 CORE_ADDR addr;
fe978cb0 615 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
df7e5265 616 &addr))
aa5ca48f
DE
617 return addr;
618 return 0;
619}
620\f
621/* Called when a new process is created. */
622
623static struct arch_process_info *
624x86_linux_new_process (void)
625{
ed859da7 626 struct arch_process_info *info = XCNEW (struct arch_process_info);
aa5ca48f 627
df7e5265 628 x86_low_init_dregs (&info->debug_reg_state);
aa5ca48f
DE
629
630 return info;
631}
632
3a8a0396
DB
633/* Target routine for linux_new_fork. */
634
635static void
636x86_linux_new_fork (struct process_info *parent, struct process_info *child)
637{
638 /* These are allocated by linux_add_process. */
639 gdb_assert (parent->priv != NULL
640 && parent->priv->arch_private != NULL);
641 gdb_assert (child->priv != NULL
642 && child->priv->arch_private != NULL);
643
644 /* Linux kernel before 2.6.33 commit
645 72f674d203cd230426437cdcf7dd6f681dad8b0d
646 will inherit hardware debug registers from parent
647 on fork/vfork/clone. Newer Linux kernels create such tasks with
648 zeroed debug registers.
649
650 GDB core assumes the child inherits the watchpoints/hw
651 breakpoints of the parent, and will remove them all from the
652 forked off process. Copy the debug registers mirrors into the
653 new process so that all breakpoints and watchpoints can be
654 removed together. The debug registers mirror will become zeroed
655 in the end before detaching the forked off process, thus making
656 this compatible with older Linux kernels too. */
657
658 *child->priv->arch_private = *parent->priv->arch_private;
659}
660
70a0bb6b
GB
661/* See nat/x86-dregs.h. */
662
663struct x86_debug_reg_state *
664x86_debug_reg_state (pid_t pid)
665{
666 struct process_info *proc = find_process_pid (pid);
667
668 return &proc->priv->arch_private->debug_reg_state;
669}
aa5ca48f 670\f
d0722149
DE
671/* When GDBSERVER is built as a 64-bit application on linux, the
672 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
673 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
674 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
675 conversion in-place ourselves. */
676
d0722149
DE
677/* Convert a native/host siginfo object, into/from the siginfo in the
678 layout of the inferiors' architecture. Returns true if any
679 conversion was done; false otherwise. If DIRECTION is 1, then copy
680 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
681 INF. */
682
683static int
a5362b9a 684x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
d0722149
DE
685{
686#ifdef __x86_64__
760256f9 687 unsigned int machine;
0bfdf32f 688 int tid = lwpid_of (current_thread);
760256f9
PA
689 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
690
d0722149 691 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 692 if (!is_64bit_tdesc ())
222cab58
WT
693 return amd64_linux_siginfo_fixup_common (native, (gdb_byte *) inf,
694 direction, FIXUP_32);
c92b5177 695 /* No fixup for native x32 GDB. */
760256f9 696 else if (!is_elf64 && sizeof (void *) == 8)
222cab58
WT
697 return amd64_linux_siginfo_fixup_common (native, (gdb_byte *) inf,
698 direction, FIXUP_X32);
d0722149
DE
699#endif
700
701 return 0;
702}
703\f
1570b33e
L
704static int use_xml;
705
3aee8918
PA
706/* Format of XSAVE extended state is:
707 struct
708 {
709 fxsave_bytes[0..463]
710 sw_usable_bytes[464..511]
711 xstate_hdr_bytes[512..575]
712 avx_bytes[576..831]
713 future_state etc
714 };
715
716 Same memory layout will be used for the coredump NT_X86_XSTATE
717 representing the XSAVE extended state registers.
718
719 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
720 extended state mask, which is the same as the extended control register
721 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
722 together with the mask saved in the xstate_hdr_bytes to determine what
723 states the processor/OS supports and what state, used or initialized,
724 the process/thread is in. */
725#define I386_LINUX_XSAVE_XCR0_OFFSET 464
726
727/* Does the current host support the GETFPXREGS request? The header
728 file may or may not define it, and even if it is defined, the
729 kernel will return EIO if it's running on a pre-SSE processor. */
730int have_ptrace_getfpxregs =
731#ifdef HAVE_PTRACE_GETFPXREGS
732 -1
733#else
734 0
735#endif
736;
1570b33e 737
3aee8918
PA
738/* Get Linux/x86 target description from running target. */
739
740static const struct target_desc *
741x86_linux_read_description (void)
1570b33e 742{
3aee8918
PA
743 unsigned int machine;
744 int is_elf64;
a196ebeb 745 int xcr0_features;
3aee8918
PA
746 int tid;
747 static uint64_t xcr0;
3a13a53b 748 struct regset_info *regset;
1570b33e 749
0bfdf32f 750 tid = lwpid_of (current_thread);
1570b33e 751
3aee8918 752 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 753
3aee8918 754 if (sizeof (void *) == 4)
3a13a53b 755 {
3aee8918
PA
756 if (is_elf64 > 0)
757 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
758#ifndef __x86_64__
759 else if (machine == EM_X86_64)
760 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
761#endif
762 }
3a13a53b 763
3aee8918
PA
764#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
765 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
766 {
767 elf_fpxregset_t fpxregs;
3a13a53b 768
3aee8918 769 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 770 {
3aee8918
PA
771 have_ptrace_getfpxregs = 0;
772 have_ptrace_getregset = 0;
773 return tdesc_i386_mmx_linux;
3a13a53b 774 }
3aee8918
PA
775 else
776 have_ptrace_getfpxregs = 1;
3a13a53b 777 }
1570b33e
L
778#endif
779
780 if (!use_xml)
781 {
df7e5265 782 x86_xcr0 = X86_XSTATE_SSE_MASK;
3aee8918 783
1570b33e
L
784 /* Don't use XML. */
785#ifdef __x86_64__
3aee8918
PA
786 if (machine == EM_X86_64)
787 return tdesc_amd64_linux_no_xml;
1570b33e 788 else
1570b33e 789#endif
3aee8918 790 return tdesc_i386_linux_no_xml;
1570b33e
L
791 }
792
1570b33e
L
793 if (have_ptrace_getregset == -1)
794 {
df7e5265 795 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 796 struct iovec iov;
1570b33e
L
797
798 iov.iov_base = xstateregs;
799 iov.iov_len = sizeof (xstateregs);
800
801 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
802 if (ptrace (PTRACE_GETREGSET, tid,
803 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
804 have_ptrace_getregset = 0;
805 else
1570b33e 806 {
3aee8918
PA
807 have_ptrace_getregset = 1;
808
809 /* Get XCR0 from XSAVE extended state. */
810 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
811 / sizeof (uint64_t))];
812
813 /* Use PTRACE_GETREGSET if it is available. */
814 for (regset = x86_regsets;
815 regset->fill_function != NULL; regset++)
816 if (regset->get_request == PTRACE_GETREGSET)
df7e5265 817 regset->size = X86_XSTATE_SIZE (xcr0);
3aee8918
PA
818 else if (regset->type != GENERAL_REGS)
819 regset->size = 0;
1570b33e 820 }
1570b33e
L
821 }
822
3aee8918 823 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb 824 xcr0_features = (have_ptrace_getregset
df7e5265 825 && (xcr0 & X86_XSTATE_ALL_MASK));
3aee8918 826
a196ebeb 827 if (xcr0_features)
3aee8918 828 x86_xcr0 = xcr0;
1570b33e 829
3aee8918
PA
830 if (machine == EM_X86_64)
831 {
1570b33e 832#ifdef __x86_64__
a196ebeb 833 if (is_elf64)
3aee8918 834 {
a196ebeb
WT
835 if (xcr0_features)
836 {
df7e5265 837 switch (xcr0 & X86_XSTATE_ALL_MASK)
a196ebeb 838 {
df7e5265 839 case X86_XSTATE_AVX512_MASK:
01f9f808
MS
840 return tdesc_amd64_avx512_linux;
841
df7e5265 842 case X86_XSTATE_MPX_MASK:
a196ebeb
WT
843 return tdesc_amd64_mpx_linux;
844
df7e5265 845 case X86_XSTATE_AVX_MASK:
a196ebeb
WT
846 return tdesc_amd64_avx_linux;
847
848 default:
849 return tdesc_amd64_linux;
850 }
851 }
4d47af5c 852 else
a196ebeb 853 return tdesc_amd64_linux;
3aee8918
PA
854 }
855 else
856 {
a196ebeb
WT
857 if (xcr0_features)
858 {
df7e5265 859 switch (xcr0 & X86_XSTATE_ALL_MASK)
a196ebeb 860 {
df7e5265 861 case X86_XSTATE_AVX512_MASK:
01f9f808
MS
862 return tdesc_x32_avx512_linux;
863
df7e5265
GB
864 case X86_XSTATE_MPX_MASK: /* No MPX on x32. */
865 case X86_XSTATE_AVX_MASK:
a196ebeb
WT
866 return tdesc_x32_avx_linux;
867
868 default:
869 return tdesc_x32_linux;
870 }
871 }
3aee8918 872 else
a196ebeb 873 return tdesc_x32_linux;
1570b33e 874 }
3aee8918 875#endif
1570b33e 876 }
3aee8918
PA
877 else
878 {
a196ebeb
WT
879 if (xcr0_features)
880 {
df7e5265 881 switch (xcr0 & X86_XSTATE_ALL_MASK)
a196ebeb 882 {
df7e5265 883 case (X86_XSTATE_AVX512_MASK):
01f9f808
MS
884 return tdesc_i386_avx512_linux;
885
df7e5265 886 case (X86_XSTATE_MPX_MASK):
a196ebeb
WT
887 return tdesc_i386_mpx_linux;
888
df7e5265 889 case (X86_XSTATE_AVX_MASK):
a196ebeb
WT
890 return tdesc_i386_avx_linux;
891
892 default:
893 return tdesc_i386_linux;
894 }
895 }
3aee8918
PA
896 else
897 return tdesc_i386_linux;
898 }
899
900 gdb_assert_not_reached ("failed to return tdesc");
901}
902
903/* Callback for find_inferior. Stops iteration when a thread with a
904 given PID is found. */
905
906static int
907same_process_callback (struct inferior_list_entry *entry, void *data)
908{
909 int pid = *(int *) data;
910
911 return (ptid_get_pid (entry->id) == pid);
912}
913
914/* Callback for for_each_inferior. Calls the arch_setup routine for
915 each process. */
916
917static void
918x86_arch_setup_process_callback (struct inferior_list_entry *entry)
919{
920 int pid = ptid_get_pid (entry->id);
921
922 /* Look up any thread of this processes. */
0bfdf32f 923 current_thread
3aee8918
PA
924 = (struct thread_info *) find_inferior (&all_threads,
925 same_process_callback, &pid);
926
927 the_low_target.arch_setup ();
928}
929
930/* Update all the target description of all processes; a new GDB
931 connected, and it may or not support xml target descriptions. */
932
933static void
934x86_linux_update_xmltarget (void)
935{
0bfdf32f 936 struct thread_info *saved_thread = current_thread;
3aee8918
PA
937
938 /* Before changing the register cache's internal layout, flush the
939 contents of the current valid caches back to the threads, and
940 release the current regcache objects. */
941 regcache_release ();
942
943 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
944
0bfdf32f 945 current_thread = saved_thread;
1570b33e
L
946}
947
948/* Process qSupported query, "xmlRegisters=". Update the buffer size for
949 PTRACE_GETREGSET. */
950
951static void
06e03fff 952x86_linux_process_qsupported (char **features, int count)
1570b33e 953{
06e03fff
PA
954 int i;
955
1570b33e
L
956 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
957 with "i386" in qSupported query, it supports x86 XML target
958 descriptions. */
959 use_xml = 0;
06e03fff 960 for (i = 0; i < count; i++)
1570b33e 961 {
06e03fff 962 const char *feature = features[i];
1570b33e 963
06e03fff 964 if (startswith (feature, "xmlRegisters="))
1570b33e 965 {
06e03fff
PA
966 char *copy = xstrdup (feature + 13);
967 char *p;
968
969 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1570b33e 970 {
06e03fff
PA
971 if (strcmp (p, "i386") == 0)
972 {
973 use_xml = 1;
974 break;
975 }
1570b33e 976 }
1570b33e 977
06e03fff
PA
978 free (copy);
979 }
1570b33e 980 }
1570b33e
L
981 x86_linux_update_xmltarget ();
982}
983
3aee8918 984/* Common for x86/x86-64. */
d0722149 985
3aee8918
PA
986static struct regsets_info x86_regsets_info =
987 {
988 x86_regsets, /* regsets */
989 0, /* num_regsets */
990 NULL, /* disabled_regsets */
991 };
214d508e
L
992
993#ifdef __x86_64__
3aee8918
PA
994static struct regs_info amd64_linux_regs_info =
995 {
996 NULL, /* regset_bitmap */
997 NULL, /* usrregs_info */
998 &x86_regsets_info
999 };
d0722149 1000#endif
3aee8918
PA
1001static struct usrregs_info i386_linux_usrregs_info =
1002 {
1003 I386_NUM_REGS,
1004 i386_regmap,
1005 };
d0722149 1006
3aee8918
PA
1007static struct regs_info i386_linux_regs_info =
1008 {
1009 NULL, /* regset_bitmap */
1010 &i386_linux_usrregs_info,
1011 &x86_regsets_info
1012 };
d0722149 1013
3aee8918
PA
1014const struct regs_info *
1015x86_linux_regs_info (void)
1016{
1017#ifdef __x86_64__
1018 if (is_64bit_tdesc ())
1019 return &amd64_linux_regs_info;
1020 else
1021#endif
1022 return &i386_linux_regs_info;
1023}
d0722149 1024
3aee8918
PA
1025/* Initialize the target description for the architecture of the
1026 inferior. */
1570b33e 1027
3aee8918
PA
1028static void
1029x86_arch_setup (void)
1030{
1031 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1032}
1033
82075af2
JS
1034/* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1035 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1036
1037static void
1038x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno, int *sysret)
1039{
1040 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1041
1042 if (use_64bit)
1043 {
1044 long l_sysno;
1045 long l_sysret;
1046
1047 collect_register_by_name (regcache, "orig_rax", &l_sysno);
1048 collect_register_by_name (regcache, "rax", &l_sysret);
1049 *sysno = (int) l_sysno;
1050 *sysret = (int) l_sysret;
1051 }
1052 else
1053 {
1054 collect_register_by_name (regcache, "orig_eax", sysno);
1055 collect_register_by_name (regcache, "eax", sysret);
1056 }
1057}
1058
219f2f23
PA
1059static int
1060x86_supports_tracepoints (void)
1061{
1062 return 1;
1063}
1064
fa593d66
PA
1065static void
1066append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1067{
1068 write_inferior_memory (*to, buf, len);
1069 *to += len;
1070}
1071
1072static int
1073push_opcode (unsigned char *buf, char *op)
1074{
1075 unsigned char *buf_org = buf;
1076
1077 while (1)
1078 {
1079 char *endptr;
1080 unsigned long ul = strtoul (op, &endptr, 16);
1081
1082 if (endptr == op)
1083 break;
1084
1085 *buf++ = ul;
1086 op = endptr;
1087 }
1088
1089 return buf - buf_org;
1090}
1091
1092#ifdef __x86_64__
1093
1094/* Build a jump pad that saves registers and calls a collection
1095 function. Writes a jump instruction to the jump pad to
1096 JJUMPAD_INSN. The caller is responsible to write it in at the
1097 tracepoint address. */
1098
1099static int
1100amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1101 CORE_ADDR collector,
1102 CORE_ADDR lockaddr,
1103 ULONGEST orig_size,
1104 CORE_ADDR *jump_entry,
405f8e94
SS
1105 CORE_ADDR *trampoline,
1106 ULONGEST *trampoline_size,
fa593d66
PA
1107 unsigned char *jjump_pad_insn,
1108 ULONGEST *jjump_pad_insn_size,
1109 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1110 CORE_ADDR *adjusted_insn_addr_end,
1111 char *err)
fa593d66
PA
1112{
1113 unsigned char buf[40];
1114 int i, offset;
f4647387
YQ
1115 int64_t loffset;
1116
fa593d66
PA
1117 CORE_ADDR buildaddr = *jump_entry;
1118
1119 /* Build the jump pad. */
1120
1121 /* First, do tracepoint data collection. Save registers. */
1122 i = 0;
1123 /* Need to ensure stack pointer saved first. */
1124 buf[i++] = 0x54; /* push %rsp */
1125 buf[i++] = 0x55; /* push %rbp */
1126 buf[i++] = 0x57; /* push %rdi */
1127 buf[i++] = 0x56; /* push %rsi */
1128 buf[i++] = 0x52; /* push %rdx */
1129 buf[i++] = 0x51; /* push %rcx */
1130 buf[i++] = 0x53; /* push %rbx */
1131 buf[i++] = 0x50; /* push %rax */
1132 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1133 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1134 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1135 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1136 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1137 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1138 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1139 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1140 buf[i++] = 0x9c; /* pushfq */
1141 buf[i++] = 0x48; /* movl <addr>,%rdi */
1142 buf[i++] = 0xbf;
1143 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1144 i += sizeof (unsigned long);
1145 buf[i++] = 0x57; /* push %rdi */
1146 append_insns (&buildaddr, i, buf);
1147
1148 /* Stack space for the collecting_t object. */
1149 i = 0;
1150 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1151 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1152 memcpy (buf + i, &tpoint, 8);
1153 i += 8;
1154 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1155 i += push_opcode (&buf[i],
1156 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1157 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1158 append_insns (&buildaddr, i, buf);
1159
1160 /* spin-lock. */
1161 i = 0;
1162 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1163 memcpy (&buf[i], (void *) &lockaddr, 8);
1164 i += 8;
1165 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1166 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1167 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1168 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1169 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1170 append_insns (&buildaddr, i, buf);
1171
1172 /* Set up the gdb_collect call. */
1173 /* At this point, (stack pointer + 0x18) is the base of our saved
1174 register block. */
1175
1176 i = 0;
1177 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1178 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1179
1180 /* tpoint address may be 64-bit wide. */
1181 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1182 memcpy (buf + i, &tpoint, 8);
1183 i += 8;
1184 append_insns (&buildaddr, i, buf);
1185
1186 /* The collector function being in the shared library, may be
1187 >31-bits away off the jump pad. */
1188 i = 0;
1189 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1190 memcpy (buf + i, &collector, 8);
1191 i += 8;
1192 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1193 append_insns (&buildaddr, i, buf);
1194
1195 /* Clear the spin-lock. */
1196 i = 0;
1197 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1198 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1199 memcpy (buf + i, &lockaddr, 8);
1200 i += 8;
1201 append_insns (&buildaddr, i, buf);
1202
1203 /* Remove stack that had been used for the collect_t object. */
1204 i = 0;
1205 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1206 append_insns (&buildaddr, i, buf);
1207
1208 /* Restore register state. */
1209 i = 0;
1210 buf[i++] = 0x48; /* add $0x8,%rsp */
1211 buf[i++] = 0x83;
1212 buf[i++] = 0xc4;
1213 buf[i++] = 0x08;
1214 buf[i++] = 0x9d; /* popfq */
1215 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1216 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1217 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1218 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1219 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1220 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1221 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1222 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1223 buf[i++] = 0x58; /* pop %rax */
1224 buf[i++] = 0x5b; /* pop %rbx */
1225 buf[i++] = 0x59; /* pop %rcx */
1226 buf[i++] = 0x5a; /* pop %rdx */
1227 buf[i++] = 0x5e; /* pop %rsi */
1228 buf[i++] = 0x5f; /* pop %rdi */
1229 buf[i++] = 0x5d; /* pop %rbp */
1230 buf[i++] = 0x5c; /* pop %rsp */
1231 append_insns (&buildaddr, i, buf);
1232
1233 /* Now, adjust the original instruction to execute in the jump
1234 pad. */
1235 *adjusted_insn_addr = buildaddr;
1236 relocate_instruction (&buildaddr, tpaddr);
1237 *adjusted_insn_addr_end = buildaddr;
1238
1239 /* Finally, write a jump back to the program. */
f4647387
YQ
1240
1241 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1242 if (loffset > INT_MAX || loffset < INT_MIN)
1243 {
1244 sprintf (err,
1245 "E.Jump back from jump pad too far from tracepoint "
1246 "(offset 0x%" PRIx64 " > int32).", loffset);
1247 return 1;
1248 }
1249
1250 offset = (int) loffset;
fa593d66
PA
1251 memcpy (buf, jump_insn, sizeof (jump_insn));
1252 memcpy (buf + 1, &offset, 4);
1253 append_insns (&buildaddr, sizeof (jump_insn), buf);
1254
1255 /* The jump pad is now built. Wire in a jump to our jump pad. This
1256 is always done last (by our caller actually), so that we can
1257 install fast tracepoints with threads running. This relies on
1258 the agent's atomic write support. */
f4647387
YQ
1259 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1260 if (loffset > INT_MAX || loffset < INT_MIN)
1261 {
1262 sprintf (err,
1263 "E.Jump pad too far from tracepoint "
1264 "(offset 0x%" PRIx64 " > int32).", loffset);
1265 return 1;
1266 }
1267
1268 offset = (int) loffset;
1269
fa593d66
PA
1270 memcpy (buf, jump_insn, sizeof (jump_insn));
1271 memcpy (buf + 1, &offset, 4);
1272 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1273 *jjump_pad_insn_size = sizeof (jump_insn);
1274
1275 /* Return the end address of our pad. */
1276 *jump_entry = buildaddr;
1277
1278 return 0;
1279}
1280
1281#endif /* __x86_64__ */
1282
1283/* Build a jump pad that saves registers and calls a collection
1284 function. Writes a jump instruction to the jump pad to
1285 JJUMPAD_INSN. The caller is responsible to write it in at the
1286 tracepoint address. */
1287
1288static int
1289i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1290 CORE_ADDR collector,
1291 CORE_ADDR lockaddr,
1292 ULONGEST orig_size,
1293 CORE_ADDR *jump_entry,
405f8e94
SS
1294 CORE_ADDR *trampoline,
1295 ULONGEST *trampoline_size,
fa593d66
PA
1296 unsigned char *jjump_pad_insn,
1297 ULONGEST *jjump_pad_insn_size,
1298 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1299 CORE_ADDR *adjusted_insn_addr_end,
1300 char *err)
fa593d66
PA
1301{
1302 unsigned char buf[0x100];
1303 int i, offset;
1304 CORE_ADDR buildaddr = *jump_entry;
1305
1306 /* Build the jump pad. */
1307
1308 /* First, do tracepoint data collection. Save registers. */
1309 i = 0;
1310 buf[i++] = 0x60; /* pushad */
1311 buf[i++] = 0x68; /* push tpaddr aka $pc */
1312 *((int *)(buf + i)) = (int) tpaddr;
1313 i += 4;
1314 buf[i++] = 0x9c; /* pushf */
1315 buf[i++] = 0x1e; /* push %ds */
1316 buf[i++] = 0x06; /* push %es */
1317 buf[i++] = 0x0f; /* push %fs */
1318 buf[i++] = 0xa0;
1319 buf[i++] = 0x0f; /* push %gs */
1320 buf[i++] = 0xa8;
1321 buf[i++] = 0x16; /* push %ss */
1322 buf[i++] = 0x0e; /* push %cs */
1323 append_insns (&buildaddr, i, buf);
1324
1325 /* Stack space for the collecting_t object. */
1326 i = 0;
1327 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1328
1329 /* Build the object. */
1330 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1331 memcpy (buf + i, &tpoint, 4);
1332 i += 4;
1333 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1334
1335 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1336 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1337 append_insns (&buildaddr, i, buf);
1338
1339 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1340 If we cared for it, this could be using xchg alternatively. */
1341
1342 i = 0;
1343 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1344 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1345 %esp,<lockaddr> */
1346 memcpy (&buf[i], (void *) &lockaddr, 4);
1347 i += 4;
1348 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1349 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1350 append_insns (&buildaddr, i, buf);
1351
1352
1353 /* Set up arguments to the gdb_collect call. */
1354 i = 0;
1355 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1356 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1357 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1358 append_insns (&buildaddr, i, buf);
1359
1360 i = 0;
1361 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1362 append_insns (&buildaddr, i, buf);
1363
1364 i = 0;
1365 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1366 memcpy (&buf[i], (void *) &tpoint, 4);
1367 i += 4;
1368 append_insns (&buildaddr, i, buf);
1369
1370 buf[0] = 0xe8; /* call <reladdr> */
1371 offset = collector - (buildaddr + sizeof (jump_insn));
1372 memcpy (buf + 1, &offset, 4);
1373 append_insns (&buildaddr, 5, buf);
1374 /* Clean up after the call. */
1375 buf[0] = 0x83; /* add $0x8,%esp */
1376 buf[1] = 0xc4;
1377 buf[2] = 0x08;
1378 append_insns (&buildaddr, 3, buf);
1379
1380
1381 /* Clear the spin-lock. This would need the LOCK prefix on older
1382 broken archs. */
1383 i = 0;
1384 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1385 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1386 memcpy (buf + i, &lockaddr, 4);
1387 i += 4;
1388 append_insns (&buildaddr, i, buf);
1389
1390
1391 /* Remove stack that had been used for the collect_t object. */
1392 i = 0;
1393 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1394 append_insns (&buildaddr, i, buf);
1395
1396 i = 0;
1397 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1398 buf[i++] = 0xc4;
1399 buf[i++] = 0x04;
1400 buf[i++] = 0x17; /* pop %ss */
1401 buf[i++] = 0x0f; /* pop %gs */
1402 buf[i++] = 0xa9;
1403 buf[i++] = 0x0f; /* pop %fs */
1404 buf[i++] = 0xa1;
1405 buf[i++] = 0x07; /* pop %es */
405f8e94 1406 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1407 buf[i++] = 0x9d; /* popf */
1408 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1409 buf[i++] = 0xc4;
1410 buf[i++] = 0x04;
1411 buf[i++] = 0x61; /* popad */
1412 append_insns (&buildaddr, i, buf);
1413
1414 /* Now, adjust the original instruction to execute in the jump
1415 pad. */
1416 *adjusted_insn_addr = buildaddr;
1417 relocate_instruction (&buildaddr, tpaddr);
1418 *adjusted_insn_addr_end = buildaddr;
1419
1420 /* Write the jump back to the program. */
1421 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1422 memcpy (buf, jump_insn, sizeof (jump_insn));
1423 memcpy (buf + 1, &offset, 4);
1424 append_insns (&buildaddr, sizeof (jump_insn), buf);
1425
1426 /* The jump pad is now built. Wire in a jump to our jump pad. This
1427 is always done last (by our caller actually), so that we can
1428 install fast tracepoints with threads running. This relies on
1429 the agent's atomic write support. */
405f8e94
SS
1430 if (orig_size == 4)
1431 {
1432 /* Create a trampoline. */
1433 *trampoline_size = sizeof (jump_insn);
1434 if (!claim_trampoline_space (*trampoline_size, trampoline))
1435 {
1436 /* No trampoline space available. */
1437 strcpy (err,
1438 "E.Cannot allocate trampoline space needed for fast "
1439 "tracepoints on 4-byte instructions.");
1440 return 1;
1441 }
1442
1443 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1444 memcpy (buf, jump_insn, sizeof (jump_insn));
1445 memcpy (buf + 1, &offset, 4);
1446 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1447
1448 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1449 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1450 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1451 memcpy (buf + 2, &offset, 2);
1452 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1453 *jjump_pad_insn_size = sizeof (small_jump_insn);
1454 }
1455 else
1456 {
1457 /* Else use a 32-bit relative jump instruction. */
1458 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1459 memcpy (buf, jump_insn, sizeof (jump_insn));
1460 memcpy (buf + 1, &offset, 4);
1461 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1462 *jjump_pad_insn_size = sizeof (jump_insn);
1463 }
fa593d66
PA
1464
1465 /* Return the end address of our pad. */
1466 *jump_entry = buildaddr;
1467
1468 return 0;
1469}
1470
1471static int
1472x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1473 CORE_ADDR collector,
1474 CORE_ADDR lockaddr,
1475 ULONGEST orig_size,
1476 CORE_ADDR *jump_entry,
405f8e94
SS
1477 CORE_ADDR *trampoline,
1478 ULONGEST *trampoline_size,
fa593d66
PA
1479 unsigned char *jjump_pad_insn,
1480 ULONGEST *jjump_pad_insn_size,
1481 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1482 CORE_ADDR *adjusted_insn_addr_end,
1483 char *err)
fa593d66
PA
1484{
1485#ifdef __x86_64__
3aee8918 1486 if (is_64bit_tdesc ())
fa593d66
PA
1487 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1488 collector, lockaddr,
1489 orig_size, jump_entry,
405f8e94 1490 trampoline, trampoline_size,
fa593d66
PA
1491 jjump_pad_insn,
1492 jjump_pad_insn_size,
1493 adjusted_insn_addr,
405f8e94
SS
1494 adjusted_insn_addr_end,
1495 err);
fa593d66
PA
1496#endif
1497
1498 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1499 collector, lockaddr,
1500 orig_size, jump_entry,
405f8e94 1501 trampoline, trampoline_size,
fa593d66
PA
1502 jjump_pad_insn,
1503 jjump_pad_insn_size,
1504 adjusted_insn_addr,
405f8e94
SS
1505 adjusted_insn_addr_end,
1506 err);
1507}
1508
1509/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1510 architectures. */
1511
1512static int
1513x86_get_min_fast_tracepoint_insn_len (void)
1514{
1515 static int warned_about_fast_tracepoints = 0;
1516
1517#ifdef __x86_64__
1518 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1519 used for fast tracepoints. */
3aee8918 1520 if (is_64bit_tdesc ())
405f8e94
SS
1521 return 5;
1522#endif
1523
58b4daa5 1524 if (agent_loaded_p ())
405f8e94
SS
1525 {
1526 char errbuf[IPA_BUFSIZ];
1527
1528 errbuf[0] = '\0';
1529
1530 /* On x86, if trampolines are available, then 4-byte jump instructions
1531 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1532 with a 4-byte offset are used instead. */
1533 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1534 return 4;
1535 else
1536 {
1537 /* GDB has no channel to explain to user why a shorter fast
1538 tracepoint is not possible, but at least make GDBserver
1539 mention that something has gone awry. */
1540 if (!warned_about_fast_tracepoints)
1541 {
1542 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
1543 warned_about_fast_tracepoints = 1;
1544 }
1545 return 5;
1546 }
1547 }
1548 else
1549 {
1550 /* Indicate that the minimum length is currently unknown since the IPA
1551 has not loaded yet. */
1552 return 0;
1553 }
fa593d66
PA
1554}
1555
6a271cae
PA
1556static void
1557add_insns (unsigned char *start, int len)
1558{
1559 CORE_ADDR buildaddr = current_insn_ptr;
1560
1561 if (debug_threads)
87ce2a04
DE
1562 debug_printf ("Adding %d bytes of insn at %s\n",
1563 len, paddress (buildaddr));
6a271cae
PA
1564
1565 append_insns (&buildaddr, len, start);
1566 current_insn_ptr = buildaddr;
1567}
1568
6a271cae
PA
1569/* Our general strategy for emitting code is to avoid specifying raw
1570 bytes whenever possible, and instead copy a block of inline asm
1571 that is embedded in the function. This is a little messy, because
1572 we need to keep the compiler from discarding what looks like dead
1573 code, plus suppress various warnings. */
1574
9e4344e5
PA
1575#define EMIT_ASM(NAME, INSNS) \
1576 do \
1577 { \
1578 extern unsigned char start_ ## NAME, end_ ## NAME; \
1579 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1580 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1581 "\t" "start_" #NAME ":" \
1582 "\t" INSNS "\n" \
1583 "\t" "end_" #NAME ":"); \
1584 } while (0)
6a271cae
PA
1585
1586#ifdef __x86_64__
1587
1588#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1589 do \
1590 { \
1591 extern unsigned char start_ ## NAME, end_ ## NAME; \
1592 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1593 __asm__ (".code32\n" \
1594 "\t" "jmp end_" #NAME "\n" \
1595 "\t" "start_" #NAME ":\n" \
1596 "\t" INSNS "\n" \
1597 "\t" "end_" #NAME ":\n" \
1598 ".code64\n"); \
1599 } while (0)
6a271cae
PA
1600
1601#else
1602
1603#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1604
1605#endif
1606
1607#ifdef __x86_64__
1608
1609static void
1610amd64_emit_prologue (void)
1611{
1612 EMIT_ASM (amd64_prologue,
1613 "pushq %rbp\n\t"
1614 "movq %rsp,%rbp\n\t"
1615 "sub $0x20,%rsp\n\t"
1616 "movq %rdi,-8(%rbp)\n\t"
1617 "movq %rsi,-16(%rbp)");
1618}
1619
1620
1621static void
1622amd64_emit_epilogue (void)
1623{
1624 EMIT_ASM (amd64_epilogue,
1625 "movq -16(%rbp),%rdi\n\t"
1626 "movq %rax,(%rdi)\n\t"
1627 "xor %rax,%rax\n\t"
1628 "leave\n\t"
1629 "ret");
1630}
1631
1632static void
1633amd64_emit_add (void)
1634{
1635 EMIT_ASM (amd64_add,
1636 "add (%rsp),%rax\n\t"
1637 "lea 0x8(%rsp),%rsp");
1638}
1639
1640static void
1641amd64_emit_sub (void)
1642{
1643 EMIT_ASM (amd64_sub,
1644 "sub %rax,(%rsp)\n\t"
1645 "pop %rax");
1646}
1647
1648static void
1649amd64_emit_mul (void)
1650{
1651 emit_error = 1;
1652}
1653
1654static void
1655amd64_emit_lsh (void)
1656{
1657 emit_error = 1;
1658}
1659
1660static void
1661amd64_emit_rsh_signed (void)
1662{
1663 emit_error = 1;
1664}
1665
1666static void
1667amd64_emit_rsh_unsigned (void)
1668{
1669 emit_error = 1;
1670}
1671
1672static void
1673amd64_emit_ext (int arg)
1674{
1675 switch (arg)
1676 {
1677 case 8:
1678 EMIT_ASM (amd64_ext_8,
1679 "cbtw\n\t"
1680 "cwtl\n\t"
1681 "cltq");
1682 break;
1683 case 16:
1684 EMIT_ASM (amd64_ext_16,
1685 "cwtl\n\t"
1686 "cltq");
1687 break;
1688 case 32:
1689 EMIT_ASM (amd64_ext_32,
1690 "cltq");
1691 break;
1692 default:
1693 emit_error = 1;
1694 }
1695}
1696
1697static void
1698amd64_emit_log_not (void)
1699{
1700 EMIT_ASM (amd64_log_not,
1701 "test %rax,%rax\n\t"
1702 "sete %cl\n\t"
1703 "movzbq %cl,%rax");
1704}
1705
1706static void
1707amd64_emit_bit_and (void)
1708{
1709 EMIT_ASM (amd64_and,
1710 "and (%rsp),%rax\n\t"
1711 "lea 0x8(%rsp),%rsp");
1712}
1713
1714static void
1715amd64_emit_bit_or (void)
1716{
1717 EMIT_ASM (amd64_or,
1718 "or (%rsp),%rax\n\t"
1719 "lea 0x8(%rsp),%rsp");
1720}
1721
1722static void
1723amd64_emit_bit_xor (void)
1724{
1725 EMIT_ASM (amd64_xor,
1726 "xor (%rsp),%rax\n\t"
1727 "lea 0x8(%rsp),%rsp");
1728}
1729
1730static void
1731amd64_emit_bit_not (void)
1732{
1733 EMIT_ASM (amd64_bit_not,
1734 "xorq $0xffffffffffffffff,%rax");
1735}
1736
1737static void
1738amd64_emit_equal (void)
1739{
1740 EMIT_ASM (amd64_equal,
1741 "cmp %rax,(%rsp)\n\t"
1742 "je .Lamd64_equal_true\n\t"
1743 "xor %rax,%rax\n\t"
1744 "jmp .Lamd64_equal_end\n\t"
1745 ".Lamd64_equal_true:\n\t"
1746 "mov $0x1,%rax\n\t"
1747 ".Lamd64_equal_end:\n\t"
1748 "lea 0x8(%rsp),%rsp");
1749}
1750
1751static void
1752amd64_emit_less_signed (void)
1753{
1754 EMIT_ASM (amd64_less_signed,
1755 "cmp %rax,(%rsp)\n\t"
1756 "jl .Lamd64_less_signed_true\n\t"
1757 "xor %rax,%rax\n\t"
1758 "jmp .Lamd64_less_signed_end\n\t"
1759 ".Lamd64_less_signed_true:\n\t"
1760 "mov $1,%rax\n\t"
1761 ".Lamd64_less_signed_end:\n\t"
1762 "lea 0x8(%rsp),%rsp");
1763}
1764
1765static void
1766amd64_emit_less_unsigned (void)
1767{
1768 EMIT_ASM (amd64_less_unsigned,
1769 "cmp %rax,(%rsp)\n\t"
1770 "jb .Lamd64_less_unsigned_true\n\t"
1771 "xor %rax,%rax\n\t"
1772 "jmp .Lamd64_less_unsigned_end\n\t"
1773 ".Lamd64_less_unsigned_true:\n\t"
1774 "mov $1,%rax\n\t"
1775 ".Lamd64_less_unsigned_end:\n\t"
1776 "lea 0x8(%rsp),%rsp");
1777}
1778
1779static void
1780amd64_emit_ref (int size)
1781{
1782 switch (size)
1783 {
1784 case 1:
1785 EMIT_ASM (amd64_ref1,
1786 "movb (%rax),%al");
1787 break;
1788 case 2:
1789 EMIT_ASM (amd64_ref2,
1790 "movw (%rax),%ax");
1791 break;
1792 case 4:
1793 EMIT_ASM (amd64_ref4,
1794 "movl (%rax),%eax");
1795 break;
1796 case 8:
1797 EMIT_ASM (amd64_ref8,
1798 "movq (%rax),%rax");
1799 break;
1800 }
1801}
1802
1803static void
1804amd64_emit_if_goto (int *offset_p, int *size_p)
1805{
1806 EMIT_ASM (amd64_if_goto,
1807 "mov %rax,%rcx\n\t"
1808 "pop %rax\n\t"
1809 "cmp $0,%rcx\n\t"
1810 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1811 if (offset_p)
1812 *offset_p = 10;
1813 if (size_p)
1814 *size_p = 4;
1815}
1816
1817static void
1818amd64_emit_goto (int *offset_p, int *size_p)
1819{
1820 EMIT_ASM (amd64_goto,
1821 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1822 if (offset_p)
1823 *offset_p = 1;
1824 if (size_p)
1825 *size_p = 4;
1826}
1827
1828static void
1829amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1830{
1831 int diff = (to - (from + size));
1832 unsigned char buf[sizeof (int)];
1833
1834 if (size != 4)
1835 {
1836 emit_error = 1;
1837 return;
1838 }
1839
1840 memcpy (buf, &diff, sizeof (int));
1841 write_inferior_memory (from, buf, sizeof (int));
1842}
1843
1844static void
4e29fb54 1845amd64_emit_const (LONGEST num)
6a271cae
PA
1846{
1847 unsigned char buf[16];
1848 int i;
1849 CORE_ADDR buildaddr = current_insn_ptr;
1850
1851 i = 0;
1852 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 1853 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
1854 i += 8;
1855 append_insns (&buildaddr, i, buf);
1856 current_insn_ptr = buildaddr;
1857}
1858
1859static void
1860amd64_emit_call (CORE_ADDR fn)
1861{
1862 unsigned char buf[16];
1863 int i;
1864 CORE_ADDR buildaddr;
4e29fb54 1865 LONGEST offset64;
6a271cae
PA
1866
1867 /* The destination function being in the shared library, may be
1868 >31-bits away off the compiled code pad. */
1869
1870 buildaddr = current_insn_ptr;
1871
1872 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1873
1874 i = 0;
1875
1876 if (offset64 > INT_MAX || offset64 < INT_MIN)
1877 {
1878 /* Offset is too large for a call. Use callq, but that requires
1879 a register, so avoid it if possible. Use r10, since it is
1880 call-clobbered, we don't have to push/pop it. */
1881 buf[i++] = 0x48; /* mov $fn,%r10 */
1882 buf[i++] = 0xba;
1883 memcpy (buf + i, &fn, 8);
1884 i += 8;
1885 buf[i++] = 0xff; /* callq *%r10 */
1886 buf[i++] = 0xd2;
1887 }
1888 else
1889 {
1890 int offset32 = offset64; /* we know we can't overflow here. */
1891 memcpy (buf + i, &offset32, 4);
1892 i += 4;
1893 }
1894
1895 append_insns (&buildaddr, i, buf);
1896 current_insn_ptr = buildaddr;
1897}
1898
1899static void
1900amd64_emit_reg (int reg)
1901{
1902 unsigned char buf[16];
1903 int i;
1904 CORE_ADDR buildaddr;
1905
1906 /* Assume raw_regs is still in %rdi. */
1907 buildaddr = current_insn_ptr;
1908 i = 0;
1909 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 1910 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
1911 i += 4;
1912 append_insns (&buildaddr, i, buf);
1913 current_insn_ptr = buildaddr;
1914 amd64_emit_call (get_raw_reg_func_addr ());
1915}
1916
1917static void
1918amd64_emit_pop (void)
1919{
1920 EMIT_ASM (amd64_pop,
1921 "pop %rax");
1922}
1923
1924static void
1925amd64_emit_stack_flush (void)
1926{
1927 EMIT_ASM (amd64_stack_flush,
1928 "push %rax");
1929}
1930
1931static void
1932amd64_emit_zero_ext (int arg)
1933{
1934 switch (arg)
1935 {
1936 case 8:
1937 EMIT_ASM (amd64_zero_ext_8,
1938 "and $0xff,%rax");
1939 break;
1940 case 16:
1941 EMIT_ASM (amd64_zero_ext_16,
1942 "and $0xffff,%rax");
1943 break;
1944 case 32:
1945 EMIT_ASM (amd64_zero_ext_32,
1946 "mov $0xffffffff,%rcx\n\t"
1947 "and %rcx,%rax");
1948 break;
1949 default:
1950 emit_error = 1;
1951 }
1952}
1953
1954static void
1955amd64_emit_swap (void)
1956{
1957 EMIT_ASM (amd64_swap,
1958 "mov %rax,%rcx\n\t"
1959 "pop %rax\n\t"
1960 "push %rcx");
1961}
1962
1963static void
1964amd64_emit_stack_adjust (int n)
1965{
1966 unsigned char buf[16];
1967 int i;
1968 CORE_ADDR buildaddr = current_insn_ptr;
1969
1970 i = 0;
1971 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1972 buf[i++] = 0x8d;
1973 buf[i++] = 0x64;
1974 buf[i++] = 0x24;
1975 /* This only handles adjustments up to 16, but we don't expect any more. */
1976 buf[i++] = n * 8;
1977 append_insns (&buildaddr, i, buf);
1978 current_insn_ptr = buildaddr;
1979}
1980
1981/* FN's prototype is `LONGEST(*fn)(int)'. */
1982
1983static void
1984amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1985{
1986 unsigned char buf[16];
1987 int i;
1988 CORE_ADDR buildaddr;
1989
1990 buildaddr = current_insn_ptr;
1991 i = 0;
1992 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 1993 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
1994 i += 4;
1995 append_insns (&buildaddr, i, buf);
1996 current_insn_ptr = buildaddr;
1997 amd64_emit_call (fn);
1998}
1999
4e29fb54 2000/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2001
2002static void
2003amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2004{
2005 unsigned char buf[16];
2006 int i;
2007 CORE_ADDR buildaddr;
2008
2009 buildaddr = current_insn_ptr;
2010 i = 0;
2011 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2012 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2013 i += 4;
2014 append_insns (&buildaddr, i, buf);
2015 current_insn_ptr = buildaddr;
2016 EMIT_ASM (amd64_void_call_2_a,
2017 /* Save away a copy of the stack top. */
2018 "push %rax\n\t"
2019 /* Also pass top as the second argument. */
2020 "mov %rax,%rsi");
2021 amd64_emit_call (fn);
2022 EMIT_ASM (amd64_void_call_2_b,
2023 /* Restore the stack top, %rax may have been trashed. */
2024 "pop %rax");
2025}
2026
6b9801d4
SS
2027void
2028amd64_emit_eq_goto (int *offset_p, int *size_p)
2029{
2030 EMIT_ASM (amd64_eq,
2031 "cmp %rax,(%rsp)\n\t"
2032 "jne .Lamd64_eq_fallthru\n\t"
2033 "lea 0x8(%rsp),%rsp\n\t"
2034 "pop %rax\n\t"
2035 /* jmp, but don't trust the assembler to choose the right jump */
2036 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2037 ".Lamd64_eq_fallthru:\n\t"
2038 "lea 0x8(%rsp),%rsp\n\t"
2039 "pop %rax");
2040
2041 if (offset_p)
2042 *offset_p = 13;
2043 if (size_p)
2044 *size_p = 4;
2045}
2046
2047void
2048amd64_emit_ne_goto (int *offset_p, int *size_p)
2049{
2050 EMIT_ASM (amd64_ne,
2051 "cmp %rax,(%rsp)\n\t"
2052 "je .Lamd64_ne_fallthru\n\t"
2053 "lea 0x8(%rsp),%rsp\n\t"
2054 "pop %rax\n\t"
2055 /* jmp, but don't trust the assembler to choose the right jump */
2056 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2057 ".Lamd64_ne_fallthru:\n\t"
2058 "lea 0x8(%rsp),%rsp\n\t"
2059 "pop %rax");
2060
2061 if (offset_p)
2062 *offset_p = 13;
2063 if (size_p)
2064 *size_p = 4;
2065}
2066
2067void
2068amd64_emit_lt_goto (int *offset_p, int *size_p)
2069{
2070 EMIT_ASM (amd64_lt,
2071 "cmp %rax,(%rsp)\n\t"
2072 "jnl .Lamd64_lt_fallthru\n\t"
2073 "lea 0x8(%rsp),%rsp\n\t"
2074 "pop %rax\n\t"
2075 /* jmp, but don't trust the assembler to choose the right jump */
2076 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2077 ".Lamd64_lt_fallthru:\n\t"
2078 "lea 0x8(%rsp),%rsp\n\t"
2079 "pop %rax");
2080
2081 if (offset_p)
2082 *offset_p = 13;
2083 if (size_p)
2084 *size_p = 4;
2085}
2086
2087void
2088amd64_emit_le_goto (int *offset_p, int *size_p)
2089{
2090 EMIT_ASM (amd64_le,
2091 "cmp %rax,(%rsp)\n\t"
2092 "jnle .Lamd64_le_fallthru\n\t"
2093 "lea 0x8(%rsp),%rsp\n\t"
2094 "pop %rax\n\t"
2095 /* jmp, but don't trust the assembler to choose the right jump */
2096 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2097 ".Lamd64_le_fallthru:\n\t"
2098 "lea 0x8(%rsp),%rsp\n\t"
2099 "pop %rax");
2100
2101 if (offset_p)
2102 *offset_p = 13;
2103 if (size_p)
2104 *size_p = 4;
2105}
2106
2107void
2108amd64_emit_gt_goto (int *offset_p, int *size_p)
2109{
2110 EMIT_ASM (amd64_gt,
2111 "cmp %rax,(%rsp)\n\t"
2112 "jng .Lamd64_gt_fallthru\n\t"
2113 "lea 0x8(%rsp),%rsp\n\t"
2114 "pop %rax\n\t"
2115 /* jmp, but don't trust the assembler to choose the right jump */
2116 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2117 ".Lamd64_gt_fallthru:\n\t"
2118 "lea 0x8(%rsp),%rsp\n\t"
2119 "pop %rax");
2120
2121 if (offset_p)
2122 *offset_p = 13;
2123 if (size_p)
2124 *size_p = 4;
2125}
2126
2127void
2128amd64_emit_ge_goto (int *offset_p, int *size_p)
2129{
2130 EMIT_ASM (amd64_ge,
2131 "cmp %rax,(%rsp)\n\t"
2132 "jnge .Lamd64_ge_fallthru\n\t"
2133 ".Lamd64_ge_jump:\n\t"
2134 "lea 0x8(%rsp),%rsp\n\t"
2135 "pop %rax\n\t"
2136 /* jmp, but don't trust the assembler to choose the right jump */
2137 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2138 ".Lamd64_ge_fallthru:\n\t"
2139 "lea 0x8(%rsp),%rsp\n\t"
2140 "pop %rax");
2141
2142 if (offset_p)
2143 *offset_p = 13;
2144 if (size_p)
2145 *size_p = 4;
2146}
2147
6a271cae
PA
2148struct emit_ops amd64_emit_ops =
2149 {
2150 amd64_emit_prologue,
2151 amd64_emit_epilogue,
2152 amd64_emit_add,
2153 amd64_emit_sub,
2154 amd64_emit_mul,
2155 amd64_emit_lsh,
2156 amd64_emit_rsh_signed,
2157 amd64_emit_rsh_unsigned,
2158 amd64_emit_ext,
2159 amd64_emit_log_not,
2160 amd64_emit_bit_and,
2161 amd64_emit_bit_or,
2162 amd64_emit_bit_xor,
2163 amd64_emit_bit_not,
2164 amd64_emit_equal,
2165 amd64_emit_less_signed,
2166 amd64_emit_less_unsigned,
2167 amd64_emit_ref,
2168 amd64_emit_if_goto,
2169 amd64_emit_goto,
2170 amd64_write_goto_address,
2171 amd64_emit_const,
2172 amd64_emit_call,
2173 amd64_emit_reg,
2174 amd64_emit_pop,
2175 amd64_emit_stack_flush,
2176 amd64_emit_zero_ext,
2177 amd64_emit_swap,
2178 amd64_emit_stack_adjust,
2179 amd64_emit_int_call_1,
6b9801d4
SS
2180 amd64_emit_void_call_2,
2181 amd64_emit_eq_goto,
2182 amd64_emit_ne_goto,
2183 amd64_emit_lt_goto,
2184 amd64_emit_le_goto,
2185 amd64_emit_gt_goto,
2186 amd64_emit_ge_goto
6a271cae
PA
2187 };
2188
2189#endif /* __x86_64__ */
2190
2191static void
2192i386_emit_prologue (void)
2193{
2194 EMIT_ASM32 (i386_prologue,
2195 "push %ebp\n\t"
bf15cbda
SS
2196 "mov %esp,%ebp\n\t"
2197 "push %ebx");
6a271cae
PA
2198 /* At this point, the raw regs base address is at 8(%ebp), and the
2199 value pointer is at 12(%ebp). */
2200}
2201
2202static void
2203i386_emit_epilogue (void)
2204{
2205 EMIT_ASM32 (i386_epilogue,
2206 "mov 12(%ebp),%ecx\n\t"
2207 "mov %eax,(%ecx)\n\t"
2208 "mov %ebx,0x4(%ecx)\n\t"
2209 "xor %eax,%eax\n\t"
bf15cbda 2210 "pop %ebx\n\t"
6a271cae
PA
2211 "pop %ebp\n\t"
2212 "ret");
2213}
2214
2215static void
2216i386_emit_add (void)
2217{
2218 EMIT_ASM32 (i386_add,
2219 "add (%esp),%eax\n\t"
2220 "adc 0x4(%esp),%ebx\n\t"
2221 "lea 0x8(%esp),%esp");
2222}
2223
2224static void
2225i386_emit_sub (void)
2226{
2227 EMIT_ASM32 (i386_sub,
2228 "subl %eax,(%esp)\n\t"
2229 "sbbl %ebx,4(%esp)\n\t"
2230 "pop %eax\n\t"
2231 "pop %ebx\n\t");
2232}
2233
2234static void
2235i386_emit_mul (void)
2236{
2237 emit_error = 1;
2238}
2239
2240static void
2241i386_emit_lsh (void)
2242{
2243 emit_error = 1;
2244}
2245
2246static void
2247i386_emit_rsh_signed (void)
2248{
2249 emit_error = 1;
2250}
2251
2252static void
2253i386_emit_rsh_unsigned (void)
2254{
2255 emit_error = 1;
2256}
2257
2258static void
2259i386_emit_ext (int arg)
2260{
2261 switch (arg)
2262 {
2263 case 8:
2264 EMIT_ASM32 (i386_ext_8,
2265 "cbtw\n\t"
2266 "cwtl\n\t"
2267 "movl %eax,%ebx\n\t"
2268 "sarl $31,%ebx");
2269 break;
2270 case 16:
2271 EMIT_ASM32 (i386_ext_16,
2272 "cwtl\n\t"
2273 "movl %eax,%ebx\n\t"
2274 "sarl $31,%ebx");
2275 break;
2276 case 32:
2277 EMIT_ASM32 (i386_ext_32,
2278 "movl %eax,%ebx\n\t"
2279 "sarl $31,%ebx");
2280 break;
2281 default:
2282 emit_error = 1;
2283 }
2284}
2285
2286static void
2287i386_emit_log_not (void)
2288{
2289 EMIT_ASM32 (i386_log_not,
2290 "or %ebx,%eax\n\t"
2291 "test %eax,%eax\n\t"
2292 "sete %cl\n\t"
2293 "xor %ebx,%ebx\n\t"
2294 "movzbl %cl,%eax");
2295}
2296
2297static void
2298i386_emit_bit_and (void)
2299{
2300 EMIT_ASM32 (i386_and,
2301 "and (%esp),%eax\n\t"
2302 "and 0x4(%esp),%ebx\n\t"
2303 "lea 0x8(%esp),%esp");
2304}
2305
2306static void
2307i386_emit_bit_or (void)
2308{
2309 EMIT_ASM32 (i386_or,
2310 "or (%esp),%eax\n\t"
2311 "or 0x4(%esp),%ebx\n\t"
2312 "lea 0x8(%esp),%esp");
2313}
2314
2315static void
2316i386_emit_bit_xor (void)
2317{
2318 EMIT_ASM32 (i386_xor,
2319 "xor (%esp),%eax\n\t"
2320 "xor 0x4(%esp),%ebx\n\t"
2321 "lea 0x8(%esp),%esp");
2322}
2323
2324static void
2325i386_emit_bit_not (void)
2326{
2327 EMIT_ASM32 (i386_bit_not,
2328 "xor $0xffffffff,%eax\n\t"
2329 "xor $0xffffffff,%ebx\n\t");
2330}
2331
2332static void
2333i386_emit_equal (void)
2334{
2335 EMIT_ASM32 (i386_equal,
2336 "cmpl %ebx,4(%esp)\n\t"
2337 "jne .Li386_equal_false\n\t"
2338 "cmpl %eax,(%esp)\n\t"
2339 "je .Li386_equal_true\n\t"
2340 ".Li386_equal_false:\n\t"
2341 "xor %eax,%eax\n\t"
2342 "jmp .Li386_equal_end\n\t"
2343 ".Li386_equal_true:\n\t"
2344 "mov $1,%eax\n\t"
2345 ".Li386_equal_end:\n\t"
2346 "xor %ebx,%ebx\n\t"
2347 "lea 0x8(%esp),%esp");
2348}
2349
2350static void
2351i386_emit_less_signed (void)
2352{
2353 EMIT_ASM32 (i386_less_signed,
2354 "cmpl %ebx,4(%esp)\n\t"
2355 "jl .Li386_less_signed_true\n\t"
2356 "jne .Li386_less_signed_false\n\t"
2357 "cmpl %eax,(%esp)\n\t"
2358 "jl .Li386_less_signed_true\n\t"
2359 ".Li386_less_signed_false:\n\t"
2360 "xor %eax,%eax\n\t"
2361 "jmp .Li386_less_signed_end\n\t"
2362 ".Li386_less_signed_true:\n\t"
2363 "mov $1,%eax\n\t"
2364 ".Li386_less_signed_end:\n\t"
2365 "xor %ebx,%ebx\n\t"
2366 "lea 0x8(%esp),%esp");
2367}
2368
2369static void
2370i386_emit_less_unsigned (void)
2371{
2372 EMIT_ASM32 (i386_less_unsigned,
2373 "cmpl %ebx,4(%esp)\n\t"
2374 "jb .Li386_less_unsigned_true\n\t"
2375 "jne .Li386_less_unsigned_false\n\t"
2376 "cmpl %eax,(%esp)\n\t"
2377 "jb .Li386_less_unsigned_true\n\t"
2378 ".Li386_less_unsigned_false:\n\t"
2379 "xor %eax,%eax\n\t"
2380 "jmp .Li386_less_unsigned_end\n\t"
2381 ".Li386_less_unsigned_true:\n\t"
2382 "mov $1,%eax\n\t"
2383 ".Li386_less_unsigned_end:\n\t"
2384 "xor %ebx,%ebx\n\t"
2385 "lea 0x8(%esp),%esp");
2386}
2387
2388static void
2389i386_emit_ref (int size)
2390{
2391 switch (size)
2392 {
2393 case 1:
2394 EMIT_ASM32 (i386_ref1,
2395 "movb (%eax),%al");
2396 break;
2397 case 2:
2398 EMIT_ASM32 (i386_ref2,
2399 "movw (%eax),%ax");
2400 break;
2401 case 4:
2402 EMIT_ASM32 (i386_ref4,
2403 "movl (%eax),%eax");
2404 break;
2405 case 8:
2406 EMIT_ASM32 (i386_ref8,
2407 "movl 4(%eax),%ebx\n\t"
2408 "movl (%eax),%eax");
2409 break;
2410 }
2411}
2412
2413static void
2414i386_emit_if_goto (int *offset_p, int *size_p)
2415{
2416 EMIT_ASM32 (i386_if_goto,
2417 "mov %eax,%ecx\n\t"
2418 "or %ebx,%ecx\n\t"
2419 "pop %eax\n\t"
2420 "pop %ebx\n\t"
2421 "cmpl $0,%ecx\n\t"
2422 /* Don't trust the assembler to choose the right jump */
2423 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2424
2425 if (offset_p)
2426 *offset_p = 11; /* be sure that this matches the sequence above */
2427 if (size_p)
2428 *size_p = 4;
2429}
2430
2431static void
2432i386_emit_goto (int *offset_p, int *size_p)
2433{
2434 EMIT_ASM32 (i386_goto,
2435 /* Don't trust the assembler to choose the right jump */
2436 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2437 if (offset_p)
2438 *offset_p = 1;
2439 if (size_p)
2440 *size_p = 4;
2441}
2442
2443static void
2444i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2445{
2446 int diff = (to - (from + size));
2447 unsigned char buf[sizeof (int)];
2448
2449 /* We're only doing 4-byte sizes at the moment. */
2450 if (size != 4)
2451 {
2452 emit_error = 1;
2453 return;
2454 }
2455
2456 memcpy (buf, &diff, sizeof (int));
2457 write_inferior_memory (from, buf, sizeof (int));
2458}
2459
2460static void
4e29fb54 2461i386_emit_const (LONGEST num)
6a271cae
PA
2462{
2463 unsigned char buf[16];
b00ad6ff 2464 int i, hi, lo;
6a271cae
PA
2465 CORE_ADDR buildaddr = current_insn_ptr;
2466
2467 i = 0;
2468 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2469 lo = num & 0xffffffff;
2470 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2471 i += 4;
2472 hi = ((num >> 32) & 0xffffffff);
2473 if (hi)
2474 {
2475 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2476 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2477 i += 4;
2478 }
2479 else
2480 {
2481 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2482 }
2483 append_insns (&buildaddr, i, buf);
2484 current_insn_ptr = buildaddr;
2485}
2486
2487static void
2488i386_emit_call (CORE_ADDR fn)
2489{
2490 unsigned char buf[16];
2491 int i, offset;
2492 CORE_ADDR buildaddr;
2493
2494 buildaddr = current_insn_ptr;
2495 i = 0;
2496 buf[i++] = 0xe8; /* call <reladdr> */
2497 offset = ((int) fn) - (buildaddr + 5);
2498 memcpy (buf + 1, &offset, 4);
2499 append_insns (&buildaddr, 5, buf);
2500 current_insn_ptr = buildaddr;
2501}
2502
2503static void
2504i386_emit_reg (int reg)
2505{
2506 unsigned char buf[16];
2507 int i;
2508 CORE_ADDR buildaddr;
2509
2510 EMIT_ASM32 (i386_reg_a,
2511 "sub $0x8,%esp");
2512 buildaddr = current_insn_ptr;
2513 i = 0;
2514 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2515 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2516 i += 4;
2517 append_insns (&buildaddr, i, buf);
2518 current_insn_ptr = buildaddr;
2519 EMIT_ASM32 (i386_reg_b,
2520 "mov %eax,4(%esp)\n\t"
2521 "mov 8(%ebp),%eax\n\t"
2522 "mov %eax,(%esp)");
2523 i386_emit_call (get_raw_reg_func_addr ());
2524 EMIT_ASM32 (i386_reg_c,
2525 "xor %ebx,%ebx\n\t"
2526 "lea 0x8(%esp),%esp");
2527}
2528
2529static void
2530i386_emit_pop (void)
2531{
2532 EMIT_ASM32 (i386_pop,
2533 "pop %eax\n\t"
2534 "pop %ebx");
2535}
2536
2537static void
2538i386_emit_stack_flush (void)
2539{
2540 EMIT_ASM32 (i386_stack_flush,
2541 "push %ebx\n\t"
2542 "push %eax");
2543}
2544
2545static void
2546i386_emit_zero_ext (int arg)
2547{
2548 switch (arg)
2549 {
2550 case 8:
2551 EMIT_ASM32 (i386_zero_ext_8,
2552 "and $0xff,%eax\n\t"
2553 "xor %ebx,%ebx");
2554 break;
2555 case 16:
2556 EMIT_ASM32 (i386_zero_ext_16,
2557 "and $0xffff,%eax\n\t"
2558 "xor %ebx,%ebx");
2559 break;
2560 case 32:
2561 EMIT_ASM32 (i386_zero_ext_32,
2562 "xor %ebx,%ebx");
2563 break;
2564 default:
2565 emit_error = 1;
2566 }
2567}
2568
2569static void
2570i386_emit_swap (void)
2571{
2572 EMIT_ASM32 (i386_swap,
2573 "mov %eax,%ecx\n\t"
2574 "mov %ebx,%edx\n\t"
2575 "pop %eax\n\t"
2576 "pop %ebx\n\t"
2577 "push %edx\n\t"
2578 "push %ecx");
2579}
2580
2581static void
2582i386_emit_stack_adjust (int n)
2583{
2584 unsigned char buf[16];
2585 int i;
2586 CORE_ADDR buildaddr = current_insn_ptr;
2587
2588 i = 0;
2589 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2590 buf[i++] = 0x64;
2591 buf[i++] = 0x24;
2592 buf[i++] = n * 8;
2593 append_insns (&buildaddr, i, buf);
2594 current_insn_ptr = buildaddr;
2595}
2596
2597/* FN's prototype is `LONGEST(*fn)(int)'. */
2598
2599static void
2600i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2601{
2602 unsigned char buf[16];
2603 int i;
2604 CORE_ADDR buildaddr;
2605
2606 EMIT_ASM32 (i386_int_call_1_a,
2607 /* Reserve a bit of stack space. */
2608 "sub $0x8,%esp");
2609 /* Put the one argument on the stack. */
2610 buildaddr = current_insn_ptr;
2611 i = 0;
2612 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2613 buf[i++] = 0x04;
2614 buf[i++] = 0x24;
b00ad6ff 2615 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2616 i += 4;
2617 append_insns (&buildaddr, i, buf);
2618 current_insn_ptr = buildaddr;
2619 i386_emit_call (fn);
2620 EMIT_ASM32 (i386_int_call_1_c,
2621 "mov %edx,%ebx\n\t"
2622 "lea 0x8(%esp),%esp");
2623}
2624
4e29fb54 2625/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2626
2627static void
2628i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2629{
2630 unsigned char buf[16];
2631 int i;
2632 CORE_ADDR buildaddr;
2633
2634 EMIT_ASM32 (i386_void_call_2_a,
2635 /* Preserve %eax only; we don't have to worry about %ebx. */
2636 "push %eax\n\t"
2637 /* Reserve a bit of stack space for arguments. */
2638 "sub $0x10,%esp\n\t"
2639 /* Copy "top" to the second argument position. (Note that
2640 we can't assume function won't scribble on its
2641 arguments, so don't try to restore from this.) */
2642 "mov %eax,4(%esp)\n\t"
2643 "mov %ebx,8(%esp)");
2644 /* Put the first argument on the stack. */
2645 buildaddr = current_insn_ptr;
2646 i = 0;
2647 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2648 buf[i++] = 0x04;
2649 buf[i++] = 0x24;
b00ad6ff 2650 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2651 i += 4;
2652 append_insns (&buildaddr, i, buf);
2653 current_insn_ptr = buildaddr;
2654 i386_emit_call (fn);
2655 EMIT_ASM32 (i386_void_call_2_b,
2656 "lea 0x10(%esp),%esp\n\t"
2657 /* Restore original stack top. */
2658 "pop %eax");
2659}
2660
6b9801d4
SS
2661
2662void
2663i386_emit_eq_goto (int *offset_p, int *size_p)
2664{
2665 EMIT_ASM32 (eq,
2666 /* Check low half first, more likely to be decider */
2667 "cmpl %eax,(%esp)\n\t"
2668 "jne .Leq_fallthru\n\t"
2669 "cmpl %ebx,4(%esp)\n\t"
2670 "jne .Leq_fallthru\n\t"
2671 "lea 0x8(%esp),%esp\n\t"
2672 "pop %eax\n\t"
2673 "pop %ebx\n\t"
2674 /* jmp, but don't trust the assembler to choose the right jump */
2675 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2676 ".Leq_fallthru:\n\t"
2677 "lea 0x8(%esp),%esp\n\t"
2678 "pop %eax\n\t"
2679 "pop %ebx");
2680
2681 if (offset_p)
2682 *offset_p = 18;
2683 if (size_p)
2684 *size_p = 4;
2685}
2686
2687void
2688i386_emit_ne_goto (int *offset_p, int *size_p)
2689{
2690 EMIT_ASM32 (ne,
2691 /* Check low half first, more likely to be decider */
2692 "cmpl %eax,(%esp)\n\t"
2693 "jne .Lne_jump\n\t"
2694 "cmpl %ebx,4(%esp)\n\t"
2695 "je .Lne_fallthru\n\t"
2696 ".Lne_jump:\n\t"
2697 "lea 0x8(%esp),%esp\n\t"
2698 "pop %eax\n\t"
2699 "pop %ebx\n\t"
2700 /* jmp, but don't trust the assembler to choose the right jump */
2701 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2702 ".Lne_fallthru:\n\t"
2703 "lea 0x8(%esp),%esp\n\t"
2704 "pop %eax\n\t"
2705 "pop %ebx");
2706
2707 if (offset_p)
2708 *offset_p = 18;
2709 if (size_p)
2710 *size_p = 4;
2711}
2712
2713void
2714i386_emit_lt_goto (int *offset_p, int *size_p)
2715{
2716 EMIT_ASM32 (lt,
2717 "cmpl %ebx,4(%esp)\n\t"
2718 "jl .Llt_jump\n\t"
2719 "jne .Llt_fallthru\n\t"
2720 "cmpl %eax,(%esp)\n\t"
2721 "jnl .Llt_fallthru\n\t"
2722 ".Llt_jump:\n\t"
2723 "lea 0x8(%esp),%esp\n\t"
2724 "pop %eax\n\t"
2725 "pop %ebx\n\t"
2726 /* jmp, but don't trust the assembler to choose the right jump */
2727 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2728 ".Llt_fallthru:\n\t"
2729 "lea 0x8(%esp),%esp\n\t"
2730 "pop %eax\n\t"
2731 "pop %ebx");
2732
2733 if (offset_p)
2734 *offset_p = 20;
2735 if (size_p)
2736 *size_p = 4;
2737}
2738
2739void
2740i386_emit_le_goto (int *offset_p, int *size_p)
2741{
2742 EMIT_ASM32 (le,
2743 "cmpl %ebx,4(%esp)\n\t"
2744 "jle .Lle_jump\n\t"
2745 "jne .Lle_fallthru\n\t"
2746 "cmpl %eax,(%esp)\n\t"
2747 "jnle .Lle_fallthru\n\t"
2748 ".Lle_jump:\n\t"
2749 "lea 0x8(%esp),%esp\n\t"
2750 "pop %eax\n\t"
2751 "pop %ebx\n\t"
2752 /* jmp, but don't trust the assembler to choose the right jump */
2753 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2754 ".Lle_fallthru:\n\t"
2755 "lea 0x8(%esp),%esp\n\t"
2756 "pop %eax\n\t"
2757 "pop %ebx");
2758
2759 if (offset_p)
2760 *offset_p = 20;
2761 if (size_p)
2762 *size_p = 4;
2763}
2764
2765void
2766i386_emit_gt_goto (int *offset_p, int *size_p)
2767{
2768 EMIT_ASM32 (gt,
2769 "cmpl %ebx,4(%esp)\n\t"
2770 "jg .Lgt_jump\n\t"
2771 "jne .Lgt_fallthru\n\t"
2772 "cmpl %eax,(%esp)\n\t"
2773 "jng .Lgt_fallthru\n\t"
2774 ".Lgt_jump:\n\t"
2775 "lea 0x8(%esp),%esp\n\t"
2776 "pop %eax\n\t"
2777 "pop %ebx\n\t"
2778 /* jmp, but don't trust the assembler to choose the right jump */
2779 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2780 ".Lgt_fallthru:\n\t"
2781 "lea 0x8(%esp),%esp\n\t"
2782 "pop %eax\n\t"
2783 "pop %ebx");
2784
2785 if (offset_p)
2786 *offset_p = 20;
2787 if (size_p)
2788 *size_p = 4;
2789}
2790
2791void
2792i386_emit_ge_goto (int *offset_p, int *size_p)
2793{
2794 EMIT_ASM32 (ge,
2795 "cmpl %ebx,4(%esp)\n\t"
2796 "jge .Lge_jump\n\t"
2797 "jne .Lge_fallthru\n\t"
2798 "cmpl %eax,(%esp)\n\t"
2799 "jnge .Lge_fallthru\n\t"
2800 ".Lge_jump:\n\t"
2801 "lea 0x8(%esp),%esp\n\t"
2802 "pop %eax\n\t"
2803 "pop %ebx\n\t"
2804 /* jmp, but don't trust the assembler to choose the right jump */
2805 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2806 ".Lge_fallthru:\n\t"
2807 "lea 0x8(%esp),%esp\n\t"
2808 "pop %eax\n\t"
2809 "pop %ebx");
2810
2811 if (offset_p)
2812 *offset_p = 20;
2813 if (size_p)
2814 *size_p = 4;
2815}
2816
6a271cae
PA
2817struct emit_ops i386_emit_ops =
2818 {
2819 i386_emit_prologue,
2820 i386_emit_epilogue,
2821 i386_emit_add,
2822 i386_emit_sub,
2823 i386_emit_mul,
2824 i386_emit_lsh,
2825 i386_emit_rsh_signed,
2826 i386_emit_rsh_unsigned,
2827 i386_emit_ext,
2828 i386_emit_log_not,
2829 i386_emit_bit_and,
2830 i386_emit_bit_or,
2831 i386_emit_bit_xor,
2832 i386_emit_bit_not,
2833 i386_emit_equal,
2834 i386_emit_less_signed,
2835 i386_emit_less_unsigned,
2836 i386_emit_ref,
2837 i386_emit_if_goto,
2838 i386_emit_goto,
2839 i386_write_goto_address,
2840 i386_emit_const,
2841 i386_emit_call,
2842 i386_emit_reg,
2843 i386_emit_pop,
2844 i386_emit_stack_flush,
2845 i386_emit_zero_ext,
2846 i386_emit_swap,
2847 i386_emit_stack_adjust,
2848 i386_emit_int_call_1,
6b9801d4
SS
2849 i386_emit_void_call_2,
2850 i386_emit_eq_goto,
2851 i386_emit_ne_goto,
2852 i386_emit_lt_goto,
2853 i386_emit_le_goto,
2854 i386_emit_gt_goto,
2855 i386_emit_ge_goto
6a271cae
PA
2856 };
2857
2858
2859static struct emit_ops *
2860x86_emit_ops (void)
2861{
2862#ifdef __x86_64__
3aee8918 2863 if (is_64bit_tdesc ())
6a271cae
PA
2864 return &amd64_emit_ops;
2865 else
2866#endif
2867 return &i386_emit_ops;
2868}
2869
dd373349
AT
2870/* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2871
2872static const gdb_byte *
2873x86_sw_breakpoint_from_kind (int kind, int *size)
2874{
2875 *size = x86_breakpoint_len;
2876 return x86_breakpoint;
2877}
2878
c2d6af84
PA
2879static int
2880x86_supports_range_stepping (void)
2881{
2882 return 1;
2883}
2884
7d00775e
AT
2885/* Implementation of linux_target_ops method "supports_hardware_single_step".
2886 */
2887
2888static int
2889x86_supports_hardware_single_step (void)
2890{
2891 return 1;
2892}
2893
d0722149
DE
2894/* This is initialized assuming an amd64 target.
2895 x86_arch_setup will correct it for i386 or amd64 targets. */
2896
2897struct linux_target_ops the_low_target =
2898{
2899 x86_arch_setup,
3aee8918
PA
2900 x86_linux_regs_info,
2901 x86_cannot_fetch_register,
2902 x86_cannot_store_register,
c14dfd32 2903 NULL, /* fetch_register */
d0722149
DE
2904 x86_get_pc,
2905 x86_set_pc,
dd373349
AT
2906 NULL, /* breakpoint_kind_from_pc */
2907 x86_sw_breakpoint_from_kind,
d0722149
DE
2908 NULL,
2909 1,
2910 x86_breakpoint_at,
802e8e6d 2911 x86_supports_z_point_type,
aa5ca48f
DE
2912 x86_insert_point,
2913 x86_remove_point,
2914 x86_stopped_by_watchpoint,
2915 x86_stopped_data_address,
d0722149
DE
2916 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2917 native i386 case (no registers smaller than an xfer unit), and are not
2918 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2919 NULL,
2920 NULL,
2921 /* need to fix up i386 siginfo if host is amd64 */
2922 x86_siginfo_fixup,
aa5ca48f
DE
2923 x86_linux_new_process,
2924 x86_linux_new_thread,
3a8a0396 2925 x86_linux_new_fork,
1570b33e 2926 x86_linux_prepare_to_resume,
219f2f23 2927 x86_linux_process_qsupported,
fa593d66
PA
2928 x86_supports_tracepoints,
2929 x86_get_thread_area,
6a271cae 2930 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
2931 x86_emit_ops,
2932 x86_get_min_fast_tracepoint_insn_len,
c2d6af84 2933 x86_supports_range_stepping,
7d00775e
AT
2934 NULL, /* breakpoint_kind_from_current_state */
2935 x86_supports_hardware_single_step,
82075af2 2936 x86_get_syscall_trapinfo,
d0722149 2937};
3aee8918
PA
2938
2939void
2940initialize_low_arch (void)
2941{
2942 /* Initialize the Linux target descriptions. */
2943#ifdef __x86_64__
2944 init_registers_amd64_linux ();
2945 init_registers_amd64_avx_linux ();
01f9f808 2946 init_registers_amd64_avx512_linux ();
a196ebeb
WT
2947 init_registers_amd64_mpx_linux ();
2948
3aee8918 2949 init_registers_x32_linux ();
7e5aaa09 2950 init_registers_x32_avx_linux ();
01f9f808 2951 init_registers_x32_avx512_linux ();
3aee8918 2952
8d749320 2953 tdesc_amd64_linux_no_xml = XNEW (struct target_desc);
3aee8918
PA
2954 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
2955 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2956#endif
2957 init_registers_i386_linux ();
2958 init_registers_i386_mmx_linux ();
2959 init_registers_i386_avx_linux ();
01f9f808 2960 init_registers_i386_avx512_linux ();
a196ebeb 2961 init_registers_i386_mpx_linux ();
3aee8918 2962
8d749320 2963 tdesc_i386_linux_no_xml = XNEW (struct target_desc);
3aee8918
PA
2964 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
2965 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2966
2967 initialize_regsets_info (&x86_regsets_info);
2968}
This page took 0.724252 seconds and 4 git commands to generate.