Linux x86 low-level debug register comment synchronization
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
32d0add0 3 Copyright (C) 2002-2015 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
d41f6d8e 20#include "server.h"
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "linux-low.h"
25#include "i387-fp.h"
df7e5265
GB
26#include "x86-low.h"
27#include "x86-xstate.h"
d0722149
DE
28
29#include "gdb_proc_service.h"
b5737fa9
PA
30/* Don't include elf/common.h if linux/elf.h got included by
31 gdb_proc_service.h. */
32#ifndef ELFMAG0
33#include "elf/common.h"
34#endif
35
58b4daa5 36#include "agent.h"
3aee8918 37#include "tdesc.h"
c144c7a0 38#include "tracepoint.h"
f699aaba 39#include "ax.h"
7b669087 40#include "nat/linux-nat.h"
4b134ca1 41#include "nat/x86-linux.h"
d0722149 42
3aee8918 43#ifdef __x86_64__
90884b2b
L
44/* Defined in auto-generated file amd64-linux.c. */
45void init_registers_amd64_linux (void);
3aee8918
PA
46extern const struct target_desc *tdesc_amd64_linux;
47
1570b33e
L
48/* Defined in auto-generated file amd64-avx-linux.c. */
49void init_registers_amd64_avx_linux (void);
3aee8918
PA
50extern const struct target_desc *tdesc_amd64_avx_linux;
51
01f9f808
MS
52/* Defined in auto-generated file amd64-avx512-linux.c. */
53void init_registers_amd64_avx512_linux (void);
54extern const struct target_desc *tdesc_amd64_avx512_linux;
55
a196ebeb
WT
56/* Defined in auto-generated file amd64-mpx-linux.c. */
57void init_registers_amd64_mpx_linux (void);
58extern const struct target_desc *tdesc_amd64_mpx_linux;
59
4d47af5c
L
60/* Defined in auto-generated file x32-linux.c. */
61void init_registers_x32_linux (void);
3aee8918
PA
62extern const struct target_desc *tdesc_x32_linux;
63
4d47af5c
L
64/* Defined in auto-generated file x32-avx-linux.c. */
65void init_registers_x32_avx_linux (void);
3aee8918 66extern const struct target_desc *tdesc_x32_avx_linux;
a196ebeb 67
01f9f808
MS
68/* Defined in auto-generated file x32-avx512-linux.c. */
69void init_registers_x32_avx512_linux (void);
70extern const struct target_desc *tdesc_x32_avx512_linux;
71
3aee8918
PA
72#endif
73
74/* Defined in auto-generated file i386-linux.c. */
75void init_registers_i386_linux (void);
76extern const struct target_desc *tdesc_i386_linux;
77
78/* Defined in auto-generated file i386-mmx-linux.c. */
79void init_registers_i386_mmx_linux (void);
80extern const struct target_desc *tdesc_i386_mmx_linux;
81
82/* Defined in auto-generated file i386-avx-linux.c. */
83void init_registers_i386_avx_linux (void);
84extern const struct target_desc *tdesc_i386_avx_linux;
85
01f9f808
MS
86/* Defined in auto-generated file i386-avx512-linux.c. */
87void init_registers_i386_avx512_linux (void);
88extern const struct target_desc *tdesc_i386_avx512_linux;
89
a196ebeb
WT
90/* Defined in auto-generated file i386-mpx-linux.c. */
91void init_registers_i386_mpx_linux (void);
92extern const struct target_desc *tdesc_i386_mpx_linux;
93
3aee8918
PA
94#ifdef __x86_64__
95static struct target_desc *tdesc_amd64_linux_no_xml;
96#endif
97static struct target_desc *tdesc_i386_linux_no_xml;
98
1570b33e 99
fa593d66 100static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 101static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 102
1570b33e
L
103/* Backward compatibility for gdb without XML support. */
104
105static const char *xmltarget_i386_linux_no_xml = "@<target>\
106<architecture>i386</architecture>\
107<osabi>GNU/Linux</osabi>\
108</target>";
f6d1620c
L
109
110#ifdef __x86_64__
1570b33e
L
111static const char *xmltarget_amd64_linux_no_xml = "@<target>\
112<architecture>i386:x86-64</architecture>\
113<osabi>GNU/Linux</osabi>\
114</target>";
f6d1620c 115#endif
d0722149
DE
116
117#include <sys/reg.h>
118#include <sys/procfs.h>
119#include <sys/ptrace.h>
1570b33e
L
120#include <sys/uio.h>
121
122#ifndef PTRACE_GETREGSET
123#define PTRACE_GETREGSET 0x4204
124#endif
125
126#ifndef PTRACE_SETREGSET
127#define PTRACE_SETREGSET 0x4205
128#endif
129
d0722149
DE
130
131#ifndef PTRACE_GET_THREAD_AREA
132#define PTRACE_GET_THREAD_AREA 25
133#endif
134
135/* This definition comes from prctl.h, but some kernels may not have it. */
136#ifndef PTRACE_ARCH_PRCTL
137#define PTRACE_ARCH_PRCTL 30
138#endif
139
140/* The following definitions come from prctl.h, but may be absent
141 for certain configurations. */
142#ifndef ARCH_GET_FS
143#define ARCH_SET_GS 0x1001
144#define ARCH_SET_FS 0x1002
145#define ARCH_GET_FS 0x1003
146#define ARCH_GET_GS 0x1004
147#endif
148
aa5ca48f
DE
149/* Per-process arch-specific data we want to keep. */
150
151struct arch_process_info
152{
df7e5265 153 struct x86_debug_reg_state debug_reg_state;
aa5ca48f
DE
154};
155
d0722149
DE
156#ifdef __x86_64__
157
158/* Mapping between the general-purpose registers in `struct user'
159 format and GDB's register array layout.
160 Note that the transfer layout uses 64-bit regs. */
161static /*const*/ int i386_regmap[] =
162{
163 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
164 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
165 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
166 DS * 8, ES * 8, FS * 8, GS * 8
167};
168
169#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
170
171/* So code below doesn't have to care, i386 or amd64. */
172#define ORIG_EAX ORIG_RAX
bc9540e8 173#define REGSIZE 8
d0722149
DE
174
175static const int x86_64_regmap[] =
176{
177 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
178 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
179 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
180 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
181 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
182 DS * 8, ES * 8, FS * 8, GS * 8,
183 -1, -1, -1, -1, -1, -1, -1, -1,
184 -1, -1, -1, -1, -1, -1, -1, -1,
185 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
186 -1,
187 -1, -1, -1, -1, -1, -1, -1, -1,
188 ORIG_RAX * 8,
189 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
01f9f808
MS
190 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
191 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
192 -1, -1, -1, -1, -1, -1, -1, -1,
193 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
194 -1, -1, -1, -1, -1, -1, -1, -1,
195 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
196 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
197 -1, -1, -1, -1, -1, -1, -1, -1,
198 -1, -1, -1, -1, -1, -1, -1, -1,
199 -1, -1, -1, -1, -1, -1, -1, -1
d0722149
DE
200};
201
202#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
9e0aa64f 203#define X86_64_USER_REGS (GS + 1)
d0722149
DE
204
205#else /* ! __x86_64__ */
206
207/* Mapping between the general-purpose registers in `struct user'
208 format and GDB's register array layout. */
209static /*const*/ int i386_regmap[] =
210{
211 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
212 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
213 EIP * 4, EFL * 4, CS * 4, SS * 4,
214 DS * 4, ES * 4, FS * 4, GS * 4
215};
216
217#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
218
bc9540e8
PA
219#define REGSIZE 4
220
d0722149 221#endif
3aee8918
PA
222
223#ifdef __x86_64__
224
225/* Returns true if the current inferior belongs to a x86-64 process,
226 per the tdesc. */
227
228static int
229is_64bit_tdesc (void)
230{
0bfdf32f 231 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3aee8918
PA
232
233 return register_size (regcache->tdesc, 0) == 8;
234}
235
236#endif
237
d0722149
DE
238\f
239/* Called by libthread_db. */
240
241ps_err_e
242ps_get_thread_area (const struct ps_prochandle *ph,
243 lwpid_t lwpid, int idx, void **base)
244{
245#ifdef __x86_64__
3aee8918 246 int use_64bit = is_64bit_tdesc ();
d0722149
DE
247
248 if (use_64bit)
249 {
250 switch (idx)
251 {
252 case FS:
253 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
254 return PS_OK;
255 break;
256 case GS:
257 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
258 return PS_OK;
259 break;
260 default:
261 return PS_BADADDR;
262 }
263 return PS_ERR;
264 }
265#endif
266
267 {
268 unsigned int desc[4];
269
270 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
271 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
272 return PS_ERR;
273
d1ec4ce7
DE
274 /* Ensure we properly extend the value to 64-bits for x86_64. */
275 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
276 return PS_OK;
277 }
278}
fa593d66
PA
279
280/* Get the thread area address. This is used to recognize which
281 thread is which when tracing with the in-process agent library. We
282 don't read anything from the address, and treat it as opaque; it's
283 the address itself that we assume is unique per-thread. */
284
285static int
286x86_get_thread_area (int lwpid, CORE_ADDR *addr)
287{
288#ifdef __x86_64__
3aee8918 289 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
290
291 if (use_64bit)
292 {
293 void *base;
294 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
295 {
296 *addr = (CORE_ADDR) (uintptr_t) base;
297 return 0;
298 }
299
300 return -1;
301 }
302#endif
303
304 {
305 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
d86d4aaf
DE
306 struct thread_info *thr = get_lwp_thread (lwp);
307 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
308 unsigned int desc[4];
309 ULONGEST gs = 0;
310 const int reg_thread_area = 3; /* bits to scale down register value. */
311 int idx;
312
313 collect_register_by_name (regcache, "gs", &gs);
314
315 idx = gs >> reg_thread_area;
316
317 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 318 lwpid_of (thr),
493e2a69 319 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
320 return -1;
321
322 *addr = desc[1];
323 return 0;
324 }
325}
326
327
d0722149
DE
328\f
329static int
3aee8918 330x86_cannot_store_register (int regno)
d0722149 331{
3aee8918
PA
332#ifdef __x86_64__
333 if (is_64bit_tdesc ())
334 return 0;
335#endif
336
d0722149
DE
337 return regno >= I386_NUM_REGS;
338}
339
340static int
3aee8918 341x86_cannot_fetch_register (int regno)
d0722149 342{
3aee8918
PA
343#ifdef __x86_64__
344 if (is_64bit_tdesc ())
345 return 0;
346#endif
347
d0722149
DE
348 return regno >= I386_NUM_REGS;
349}
350
351static void
442ea881 352x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
353{
354 int i;
355
356#ifdef __x86_64__
3aee8918 357 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
358 {
359 for (i = 0; i < X86_64_NUM_REGS; i++)
360 if (x86_64_regmap[i] != -1)
442ea881 361 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
362 return;
363 }
9e0aa64f
JK
364
365 /* 32-bit inferior registers need to be zero-extended.
366 Callers would read uninitialized memory otherwise. */
367 memset (buf, 0x00, X86_64_USER_REGS * 8);
d0722149
DE
368#endif
369
370 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 371 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 372
442ea881 373 collect_register_by_name (regcache, "orig_eax",
bc9540e8 374 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
375}
376
377static void
442ea881 378x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
379{
380 int i;
381
382#ifdef __x86_64__
3aee8918 383 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
384 {
385 for (i = 0; i < X86_64_NUM_REGS; i++)
386 if (x86_64_regmap[i] != -1)
442ea881 387 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
388 return;
389 }
390#endif
391
392 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 393 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 394
442ea881 395 supply_register_by_name (regcache, "orig_eax",
bc9540e8 396 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
397}
398
399static void
442ea881 400x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
401{
402#ifdef __x86_64__
442ea881 403 i387_cache_to_fxsave (regcache, buf);
d0722149 404#else
442ea881 405 i387_cache_to_fsave (regcache, buf);
d0722149
DE
406#endif
407}
408
409static void
442ea881 410x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
411{
412#ifdef __x86_64__
442ea881 413 i387_fxsave_to_cache (regcache, buf);
d0722149 414#else
442ea881 415 i387_fsave_to_cache (regcache, buf);
d0722149
DE
416#endif
417}
418
419#ifndef __x86_64__
420
421static void
442ea881 422x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 423{
442ea881 424 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
425}
426
427static void
442ea881 428x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 429{
442ea881 430 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
431}
432
433#endif
434
1570b33e
L
435static void
436x86_fill_xstateregset (struct regcache *regcache, void *buf)
437{
438 i387_cache_to_xsave (regcache, buf);
439}
440
441static void
442x86_store_xstateregset (struct regcache *regcache, const void *buf)
443{
444 i387_xsave_to_cache (regcache, buf);
445}
446
d0722149
DE
447/* ??? The non-biarch i386 case stores all the i387 regs twice.
448 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
449 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
450 doesn't work. IWBN to avoid the duplication in the case where it
451 does work. Maybe the arch_setup routine could check whether it works
3aee8918 452 and update the supported regsets accordingly. */
d0722149 453
3aee8918 454static struct regset_info x86_regsets[] =
d0722149
DE
455{
456#ifdef HAVE_PTRACE_GETREGS
1570b33e 457 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
458 GENERAL_REGS,
459 x86_fill_gregset, x86_store_gregset },
1570b33e
L
460 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
461 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
462# ifndef __x86_64__
463# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 464 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
465 EXTENDED_REGS,
466 x86_fill_fpxregset, x86_store_fpxregset },
467# endif
468# endif
1570b33e 469 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
470 FP_REGS,
471 x86_fill_fpregset, x86_store_fpregset },
472#endif /* HAVE_PTRACE_GETREGS */
1570b33e 473 { 0, 0, 0, -1, -1, NULL, NULL }
d0722149
DE
474};
475
476static CORE_ADDR
442ea881 477x86_get_pc (struct regcache *regcache)
d0722149 478{
3aee8918 479 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
480
481 if (use_64bit)
482 {
483 unsigned long pc;
442ea881 484 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
485 return (CORE_ADDR) pc;
486 }
487 else
488 {
489 unsigned int pc;
442ea881 490 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
491 return (CORE_ADDR) pc;
492 }
493}
494
495static void
442ea881 496x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
d0722149 497{
3aee8918 498 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
499
500 if (use_64bit)
501 {
502 unsigned long newpc = pc;
442ea881 503 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
504 }
505 else
506 {
507 unsigned int newpc = pc;
442ea881 508 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
509 }
510}
511\f
512static const unsigned char x86_breakpoint[] = { 0xCC };
513#define x86_breakpoint_len 1
514
515static int
516x86_breakpoint_at (CORE_ADDR pc)
517{
518 unsigned char c;
519
fc7238bb 520 (*the_target->read_memory) (pc, &c, 1);
d0722149
DE
521 if (c == 0xCC)
522 return 1;
523
524 return 0;
525}
526\f
4180215b
PA
527
528/* Return the offset of REGNUM in the u_debugreg field of struct
529 user. */
530
531static int
532u_debugreg_offset (int regnum)
533{
534 return (offsetof (struct user, u_debugreg)
535 + sizeof (((struct user *) 0)->u_debugreg[0]) * regnum);
536}
537
538
aa5ca48f
DE
539/* Support for debug registers. */
540
14b0bc68
GB
541/* Get debug register REGNUM value from the LWP specified by PTID. */
542
aa5ca48f
DE
543static unsigned long
544x86_linux_dr_get (ptid_t ptid, int regnum)
545{
546 int tid;
547 unsigned long value;
548
5dfe6ca8 549 gdb_assert (ptid_lwp_p (ptid));
aa5ca48f
DE
550 tid = ptid_get_lwp (ptid);
551
552 errno = 0;
4180215b 553 value = ptrace (PTRACE_PEEKUSER, tid, u_debugreg_offset (regnum), 0);
aa5ca48f 554 if (errno != 0)
5dfe6ca8 555 perror_with_name (_("Couldn't read debug register"));
aa5ca48f
DE
556
557 return value;
558}
559
14b0bc68
GB
560/* Set debug register REGNUM to VALUE in the LWP specified by PTID. */
561
aa5ca48f
DE
562static void
563x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
564{
565 int tid;
566
5dfe6ca8 567 gdb_assert (ptid_lwp_p (ptid));
aa5ca48f
DE
568 tid = ptid_get_lwp (ptid);
569
570 errno = 0;
4180215b 571 ptrace (PTRACE_POKEUSER, tid, u_debugreg_offset (regnum), value);
aa5ca48f 572 if (errno != 0)
5dfe6ca8 573 perror_with_name (_("Couldn't write debug register"));
aa5ca48f
DE
574}
575
14b0bc68
GB
576/* Callback for iterate_over_lwps. Mark that our local mirror of
577 LWP's debug registers has been changed, and cause LWP to stop if
578 it isn't already. Values are written from our local mirror to
579 the actual debug registers immediately prior to LWP resuming. */
580
964e4306 581static int
6d4ee8c6 582update_debug_registers_callback (struct lwp_info *lwp, void *arg)
964e4306 583{
4b134ca1 584 lwp_set_debug_registers_changed (lwp, 1);
964e4306 585
cff068da 586 if (!lwp_is_stopped (lwp))
6d4ee8c6 587 linux_stop_lwp (lwp);
964e4306 588
14b0bc68 589 /* Continue the iteration. */
964e4306
PA
590 return 0;
591}
592
14b0bc68
GB
593/* Store ADDR in debug register REGNUM of all LWPs of the current
594 inferior. */
aa5ca48f 595
42995dbd 596static void
d33472ad 597x86_linux_dr_set_addr (int regnum, CORE_ADDR addr)
aa5ca48f 598{
6d4ee8c6 599 ptid_t pid_ptid = pid_to_ptid (ptid_get_pid (current_lwp_ptid ()));
aa5ca48f 600
f7160e97 601 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
aa5ca48f 602
6d4ee8c6 603 iterate_over_lwps (pid_ptid, update_debug_registers_callback, NULL);
964e4306 604}
aa5ca48f 605
14b0bc68
GB
606/* Return the address stored in the current inferior's debug register
607 REGNUM. */
aa5ca48f 608
42995dbd 609static CORE_ADDR
d33472ad 610x86_linux_dr_get_addr (int regnum)
964e4306 611{
0a5b1e09 612 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
964e4306 613
7b669087 614 return x86_linux_dr_get (current_lwp_ptid (), regnum);
aa5ca48f
DE
615}
616
14b0bc68
GB
617/* Store CONTROL in the debug control registers of all LWPs of the
618 current inferior. */
aa5ca48f 619
42995dbd 620static void
d33472ad 621x86_linux_dr_set_control (unsigned long control)
aa5ca48f 622{
6d4ee8c6 623 ptid_t pid_ptid = pid_to_ptid (ptid_get_pid (current_lwp_ptid ()));
aa5ca48f 624
6d4ee8c6 625 iterate_over_lwps (pid_ptid, update_debug_registers_callback, NULL);
964e4306 626}
aa5ca48f 627
14b0bc68
GB
628/* Return the value stored in the current inferior's debug control
629 register. */
964e4306 630
42995dbd 631static unsigned long
d33472ad 632x86_linux_dr_get_control (void)
964e4306 633{
7b669087 634 return x86_linux_dr_get (current_lwp_ptid (), DR_CONTROL);
aa5ca48f
DE
635}
636
14b0bc68
GB
637/* Return the value stored in the current inferior's debug status
638 register. */
aa5ca48f 639
42995dbd 640static unsigned long
d33472ad 641x86_linux_dr_get_status (void)
aa5ca48f 642{
7b669087 643 return x86_linux_dr_get (current_lwp_ptid (), DR_STATUS);
aa5ca48f 644}
42995dbd
GB
645
646/* Low-level function vector. */
df7e5265 647struct x86_dr_low_type x86_dr_low =
42995dbd 648 {
d33472ad
GB
649 x86_linux_dr_set_control,
650 x86_linux_dr_set_addr,
651 x86_linux_dr_get_addr,
652 x86_linux_dr_get_status,
653 x86_linux_dr_get_control,
42995dbd
GB
654 sizeof (void *),
655 };
aa5ca48f 656\f
90d74c30 657/* Breakpoint/Watchpoint support. */
aa5ca48f
DE
658
659static int
802e8e6d
PA
660x86_supports_z_point_type (char z_type)
661{
662 switch (z_type)
663 {
664 case Z_PACKET_SW_BP:
665 case Z_PACKET_HW_BP:
666 case Z_PACKET_WRITE_WP:
667 case Z_PACKET_ACCESS_WP:
668 return 1;
669 default:
670 return 0;
671 }
672}
673
674static int
675x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
676 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
677{
678 struct process_info *proc = current_process ();
802e8e6d 679
aa5ca48f
DE
680 switch (type)
681 {
802e8e6d
PA
682 case raw_bkpt_type_sw:
683 return insert_memory_breakpoint (bp);
684
685 case raw_bkpt_type_hw:
686 case raw_bkpt_type_write_wp:
687 case raw_bkpt_type_access_wp:
a4165e94 688 {
802e8e6d
PA
689 enum target_hw_bp_type hw_type
690 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 691 struct x86_debug_reg_state *state
fe978cb0 692 = &proc->priv->arch_private->debug_reg_state;
a4165e94 693
df7e5265 694 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
a4165e94 695 }
961bd387 696
aa5ca48f
DE
697 default:
698 /* Unsupported. */
699 return 1;
700 }
701}
702
703static int
802e8e6d
PA
704x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
705 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
706{
707 struct process_info *proc = current_process ();
802e8e6d 708
aa5ca48f
DE
709 switch (type)
710 {
802e8e6d
PA
711 case raw_bkpt_type_sw:
712 return remove_memory_breakpoint (bp);
713
714 case raw_bkpt_type_hw:
715 case raw_bkpt_type_write_wp:
716 case raw_bkpt_type_access_wp:
a4165e94 717 {
802e8e6d
PA
718 enum target_hw_bp_type hw_type
719 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 720 struct x86_debug_reg_state *state
fe978cb0 721 = &proc->priv->arch_private->debug_reg_state;
a4165e94 722
df7e5265 723 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
a4165e94 724 }
aa5ca48f
DE
725 default:
726 /* Unsupported. */
727 return 1;
728 }
729}
730
731static int
732x86_stopped_by_watchpoint (void)
733{
734 struct process_info *proc = current_process ();
fe978cb0 735 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
aa5ca48f
DE
736}
737
738static CORE_ADDR
739x86_stopped_data_address (void)
740{
741 struct process_info *proc = current_process ();
742 CORE_ADDR addr;
fe978cb0 743 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
df7e5265 744 &addr))
aa5ca48f
DE
745 return addr;
746 return 0;
747}
748\f
749/* Called when a new process is created. */
750
751static struct arch_process_info *
752x86_linux_new_process (void)
753{
ed859da7 754 struct arch_process_info *info = XCNEW (struct arch_process_info);
aa5ca48f 755
df7e5265 756 x86_low_init_dregs (&info->debug_reg_state);
aa5ca48f
DE
757
758 return info;
759}
760
761/* Called when a new thread is detected. */
762
34c703da
GB
763static void
764x86_linux_new_thread (struct lwp_info *lwp)
aa5ca48f 765{
4b134ca1 766 lwp_set_debug_registers_changed (lwp, 1);
aa5ca48f
DE
767}
768
70a0bb6b
GB
769/* See nat/x86-dregs.h. */
770
771struct x86_debug_reg_state *
772x86_debug_reg_state (pid_t pid)
773{
774 struct process_info *proc = find_process_pid (pid);
775
776 return &proc->priv->arch_private->debug_reg_state;
777}
778
14b0bc68
GB
779/* Called prior to resuming a thread. Updates the thread's debug
780 registers if the values in our local mirror have been changed. */
aa5ca48f
DE
781
782static void
783x86_linux_prepare_to_resume (struct lwp_info *lwp)
784{
cff068da 785 ptid_t ptid = ptid_of_lwp (lwp);
6210a125 786 int clear_status = 0;
b9a881c2 787
4b134ca1 788 if (lwp_debug_registers_changed (lwp))
aa5ca48f 789 {
df7e5265 790 struct x86_debug_reg_state *state
70a0bb6b
GB
791 = x86_debug_reg_state (ptid_get_pid (ptid));
792 int i;
aa5ca48f 793
14b0bc68
GB
794 /* Prior to Linux kernel 2.6.33 commit
795 72f674d203cd230426437cdcf7dd6f681dad8b0d, setting DR0-3 to
796 a value that did not match what was enabled in DR_CONTROL
797 resulted in EINVAL. To avoid this we zero DR_CONTROL before
798 writing address registers, only writing DR_CONTROL's actual
799 value once all the addresses are in place. */
8e9db26e
PA
800 x86_linux_dr_set (ptid, DR_CONTROL, 0);
801
97ea6506 802 ALL_DEBUG_ADDRESS_REGISTERS (i)
6210a125
PA
803 if (state->dr_ref_count[i] > 0)
804 {
805 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
806
807 /* If we're setting a watchpoint, any change the inferior
14b0bc68
GB
808 has made to its debug registers needs to be discarded
809 to avoid x86_stopped_data_address getting confused. */
6210a125
PA
810 clear_status = 1;
811 }
aa5ca48f 812
14b0bc68 813 /* If DR_CONTROL is supposed to be zero then it's already set. */
8e9db26e
PA
814 if (state->dr_control_mirror != 0)
815 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
aa5ca48f 816
4b134ca1 817 lwp_set_debug_registers_changed (lwp, 0);
aa5ca48f 818 }
b9a881c2 819
cff068da
GB
820 if (clear_status
821 || lwp_stop_reason (lwp) == TARGET_STOPPED_BY_WATCHPOINT)
b9a881c2 822 x86_linux_dr_set (ptid, DR_STATUS, 0);
aa5ca48f
DE
823}
824\f
d0722149
DE
825/* When GDBSERVER is built as a 64-bit application on linux, the
826 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
827 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
828 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
829 conversion in-place ourselves. */
830
831/* These types below (compat_*) define a siginfo type that is layout
832 compatible with the siginfo type exported by the 32-bit userspace
833 support. */
834
835#ifdef __x86_64__
836
837typedef int compat_int_t;
838typedef unsigned int compat_uptr_t;
839
840typedef int compat_time_t;
841typedef int compat_timer_t;
842typedef int compat_clock_t;
843
844struct compat_timeval
845{
846 compat_time_t tv_sec;
847 int tv_usec;
848};
849
850typedef union compat_sigval
851{
852 compat_int_t sival_int;
853 compat_uptr_t sival_ptr;
854} compat_sigval_t;
855
856typedef struct compat_siginfo
857{
858 int si_signo;
859 int si_errno;
860 int si_code;
861
862 union
863 {
864 int _pad[((128 / sizeof (int)) - 3)];
865
866 /* kill() */
867 struct
868 {
869 unsigned int _pid;
870 unsigned int _uid;
871 } _kill;
872
873 /* POSIX.1b timers */
874 struct
875 {
876 compat_timer_t _tid;
877 int _overrun;
878 compat_sigval_t _sigval;
879 } _timer;
880
881 /* POSIX.1b signals */
882 struct
883 {
884 unsigned int _pid;
885 unsigned int _uid;
886 compat_sigval_t _sigval;
887 } _rt;
888
889 /* SIGCHLD */
890 struct
891 {
892 unsigned int _pid;
893 unsigned int _uid;
894 int _status;
895 compat_clock_t _utime;
896 compat_clock_t _stime;
897 } _sigchld;
898
899 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
900 struct
901 {
902 unsigned int _addr;
903 } _sigfault;
904
905 /* SIGPOLL */
906 struct
907 {
908 int _band;
909 int _fd;
910 } _sigpoll;
911 } _sifields;
912} compat_siginfo_t;
913
c92b5177
L
914/* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
915typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
916
917typedef struct compat_x32_siginfo
918{
919 int si_signo;
920 int si_errno;
921 int si_code;
922
923 union
924 {
925 int _pad[((128 / sizeof (int)) - 3)];
926
927 /* kill() */
928 struct
929 {
930 unsigned int _pid;
931 unsigned int _uid;
932 } _kill;
933
934 /* POSIX.1b timers */
935 struct
936 {
937 compat_timer_t _tid;
938 int _overrun;
939 compat_sigval_t _sigval;
940 } _timer;
941
942 /* POSIX.1b signals */
943 struct
944 {
945 unsigned int _pid;
946 unsigned int _uid;
947 compat_sigval_t _sigval;
948 } _rt;
949
950 /* SIGCHLD */
951 struct
952 {
953 unsigned int _pid;
954 unsigned int _uid;
955 int _status;
956 compat_x32_clock_t _utime;
957 compat_x32_clock_t _stime;
958 } _sigchld;
959
960 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
961 struct
962 {
963 unsigned int _addr;
964 } _sigfault;
965
966 /* SIGPOLL */
967 struct
968 {
969 int _band;
970 int _fd;
971 } _sigpoll;
972 } _sifields;
973} compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
974
d0722149
DE
975#define cpt_si_pid _sifields._kill._pid
976#define cpt_si_uid _sifields._kill._uid
977#define cpt_si_timerid _sifields._timer._tid
978#define cpt_si_overrun _sifields._timer._overrun
979#define cpt_si_status _sifields._sigchld._status
980#define cpt_si_utime _sifields._sigchld._utime
981#define cpt_si_stime _sifields._sigchld._stime
982#define cpt_si_ptr _sifields._rt._sigval.sival_ptr
983#define cpt_si_addr _sifields._sigfault._addr
984#define cpt_si_band _sifields._sigpoll._band
985#define cpt_si_fd _sifields._sigpoll._fd
986
987/* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
988 In their place is si_timer1,si_timer2. */
989#ifndef si_timerid
990#define si_timerid si_timer1
991#endif
992#ifndef si_overrun
993#define si_overrun si_timer2
994#endif
995
996static void
997compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
998{
999 memset (to, 0, sizeof (*to));
1000
1001 to->si_signo = from->si_signo;
1002 to->si_errno = from->si_errno;
1003 to->si_code = from->si_code;
1004
b53a1623 1005 if (to->si_code == SI_TIMER)
d0722149 1006 {
b53a1623
PA
1007 to->cpt_si_timerid = from->si_timerid;
1008 to->cpt_si_overrun = from->si_overrun;
d0722149
DE
1009 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1010 }
1011 else if (to->si_code == SI_USER)
1012 {
1013 to->cpt_si_pid = from->si_pid;
1014 to->cpt_si_uid = from->si_uid;
1015 }
b53a1623 1016 else if (to->si_code < 0)
d0722149 1017 {
b53a1623
PA
1018 to->cpt_si_pid = from->si_pid;
1019 to->cpt_si_uid = from->si_uid;
d0722149
DE
1020 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1021 }
1022 else
1023 {
1024 switch (to->si_signo)
1025 {
1026 case SIGCHLD:
1027 to->cpt_si_pid = from->si_pid;
1028 to->cpt_si_uid = from->si_uid;
1029 to->cpt_si_status = from->si_status;
1030 to->cpt_si_utime = from->si_utime;
1031 to->cpt_si_stime = from->si_stime;
1032 break;
1033 case SIGILL:
1034 case SIGFPE:
1035 case SIGSEGV:
1036 case SIGBUS:
1037 to->cpt_si_addr = (intptr_t) from->si_addr;
1038 break;
1039 case SIGPOLL:
1040 to->cpt_si_band = from->si_band;
1041 to->cpt_si_fd = from->si_fd;
1042 break;
1043 default:
1044 to->cpt_si_pid = from->si_pid;
1045 to->cpt_si_uid = from->si_uid;
1046 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1047 break;
1048 }
1049 }
1050}
1051
1052static void
1053siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
1054{
1055 memset (to, 0, sizeof (*to));
1056
1057 to->si_signo = from->si_signo;
1058 to->si_errno = from->si_errno;
1059 to->si_code = from->si_code;
1060
b53a1623 1061 if (to->si_code == SI_TIMER)
d0722149 1062 {
b53a1623
PA
1063 to->si_timerid = from->cpt_si_timerid;
1064 to->si_overrun = from->cpt_si_overrun;
d0722149
DE
1065 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1066 }
1067 else if (to->si_code == SI_USER)
1068 {
1069 to->si_pid = from->cpt_si_pid;
1070 to->si_uid = from->cpt_si_uid;
1071 }
b53a1623 1072 else if (to->si_code < 0)
d0722149 1073 {
b53a1623
PA
1074 to->si_pid = from->cpt_si_pid;
1075 to->si_uid = from->cpt_si_uid;
d0722149
DE
1076 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1077 }
1078 else
1079 {
1080 switch (to->si_signo)
1081 {
1082 case SIGCHLD:
1083 to->si_pid = from->cpt_si_pid;
1084 to->si_uid = from->cpt_si_uid;
1085 to->si_status = from->cpt_si_status;
1086 to->si_utime = from->cpt_si_utime;
1087 to->si_stime = from->cpt_si_stime;
1088 break;
1089 case SIGILL:
1090 case SIGFPE:
1091 case SIGSEGV:
1092 case SIGBUS:
1093 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1094 break;
1095 case SIGPOLL:
1096 to->si_band = from->cpt_si_band;
1097 to->si_fd = from->cpt_si_fd;
1098 break;
1099 default:
1100 to->si_pid = from->cpt_si_pid;
1101 to->si_uid = from->cpt_si_uid;
1102 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1103 break;
1104 }
1105 }
1106}
1107
c92b5177
L
1108static void
1109compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
1110 siginfo_t *from)
1111{
1112 memset (to, 0, sizeof (*to));
1113
1114 to->si_signo = from->si_signo;
1115 to->si_errno = from->si_errno;
1116 to->si_code = from->si_code;
1117
1118 if (to->si_code == SI_TIMER)
1119 {
1120 to->cpt_si_timerid = from->si_timerid;
1121 to->cpt_si_overrun = from->si_overrun;
1122 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1123 }
1124 else if (to->si_code == SI_USER)
1125 {
1126 to->cpt_si_pid = from->si_pid;
1127 to->cpt_si_uid = from->si_uid;
1128 }
1129 else if (to->si_code < 0)
1130 {
1131 to->cpt_si_pid = from->si_pid;
1132 to->cpt_si_uid = from->si_uid;
1133 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1134 }
1135 else
1136 {
1137 switch (to->si_signo)
1138 {
1139 case SIGCHLD:
1140 to->cpt_si_pid = from->si_pid;
1141 to->cpt_si_uid = from->si_uid;
1142 to->cpt_si_status = from->si_status;
1143 to->cpt_si_utime = from->si_utime;
1144 to->cpt_si_stime = from->si_stime;
1145 break;
1146 case SIGILL:
1147 case SIGFPE:
1148 case SIGSEGV:
1149 case SIGBUS:
1150 to->cpt_si_addr = (intptr_t) from->si_addr;
1151 break;
1152 case SIGPOLL:
1153 to->cpt_si_band = from->si_band;
1154 to->cpt_si_fd = from->si_fd;
1155 break;
1156 default:
1157 to->cpt_si_pid = from->si_pid;
1158 to->cpt_si_uid = from->si_uid;
1159 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1160 break;
1161 }
1162 }
1163}
1164
1165static void
1166siginfo_from_compat_x32_siginfo (siginfo_t *to,
1167 compat_x32_siginfo_t *from)
1168{
1169 memset (to, 0, sizeof (*to));
1170
1171 to->si_signo = from->si_signo;
1172 to->si_errno = from->si_errno;
1173 to->si_code = from->si_code;
1174
1175 if (to->si_code == SI_TIMER)
1176 {
1177 to->si_timerid = from->cpt_si_timerid;
1178 to->si_overrun = from->cpt_si_overrun;
1179 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1180 }
1181 else if (to->si_code == SI_USER)
1182 {
1183 to->si_pid = from->cpt_si_pid;
1184 to->si_uid = from->cpt_si_uid;
1185 }
1186 else if (to->si_code < 0)
1187 {
1188 to->si_pid = from->cpt_si_pid;
1189 to->si_uid = from->cpt_si_uid;
1190 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1191 }
1192 else
1193 {
1194 switch (to->si_signo)
1195 {
1196 case SIGCHLD:
1197 to->si_pid = from->cpt_si_pid;
1198 to->si_uid = from->cpt_si_uid;
1199 to->si_status = from->cpt_si_status;
1200 to->si_utime = from->cpt_si_utime;
1201 to->si_stime = from->cpt_si_stime;
1202 break;
1203 case SIGILL:
1204 case SIGFPE:
1205 case SIGSEGV:
1206 case SIGBUS:
1207 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1208 break;
1209 case SIGPOLL:
1210 to->si_band = from->cpt_si_band;
1211 to->si_fd = from->cpt_si_fd;
1212 break;
1213 default:
1214 to->si_pid = from->cpt_si_pid;
1215 to->si_uid = from->cpt_si_uid;
1216 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1217 break;
1218 }
1219 }
1220}
1221
d0722149
DE
1222#endif /* __x86_64__ */
1223
1224/* Convert a native/host siginfo object, into/from the siginfo in the
1225 layout of the inferiors' architecture. Returns true if any
1226 conversion was done; false otherwise. If DIRECTION is 1, then copy
1227 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1228 INF. */
1229
1230static int
a5362b9a 1231x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
d0722149
DE
1232{
1233#ifdef __x86_64__
760256f9 1234 unsigned int machine;
0bfdf32f 1235 int tid = lwpid_of (current_thread);
760256f9
PA
1236 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1237
d0722149 1238 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 1239 if (!is_64bit_tdesc ())
d0722149 1240 {
38e08fca 1241 gdb_assert (sizeof (siginfo_t) == sizeof (compat_siginfo_t));
d0722149
DE
1242
1243 if (direction == 0)
1244 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
1245 else
1246 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
1247
c92b5177
L
1248 return 1;
1249 }
1250 /* No fixup for native x32 GDB. */
760256f9 1251 else if (!is_elf64 && sizeof (void *) == 8)
c92b5177 1252 {
38e08fca 1253 gdb_assert (sizeof (siginfo_t) == sizeof (compat_x32_siginfo_t));
c92b5177
L
1254
1255 if (direction == 0)
1256 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
1257 native);
1258 else
1259 siginfo_from_compat_x32_siginfo (native,
1260 (struct compat_x32_siginfo *) inf);
1261
d0722149
DE
1262 return 1;
1263 }
1264#endif
1265
1266 return 0;
1267}
1268\f
1570b33e
L
1269static int use_xml;
1270
3aee8918
PA
1271/* Format of XSAVE extended state is:
1272 struct
1273 {
1274 fxsave_bytes[0..463]
1275 sw_usable_bytes[464..511]
1276 xstate_hdr_bytes[512..575]
1277 avx_bytes[576..831]
1278 future_state etc
1279 };
1280
1281 Same memory layout will be used for the coredump NT_X86_XSTATE
1282 representing the XSAVE extended state registers.
1283
1284 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1285 extended state mask, which is the same as the extended control register
1286 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1287 together with the mask saved in the xstate_hdr_bytes to determine what
1288 states the processor/OS supports and what state, used or initialized,
1289 the process/thread is in. */
1290#define I386_LINUX_XSAVE_XCR0_OFFSET 464
1291
1292/* Does the current host support the GETFPXREGS request? The header
1293 file may or may not define it, and even if it is defined, the
1294 kernel will return EIO if it's running on a pre-SSE processor. */
1295int have_ptrace_getfpxregs =
1296#ifdef HAVE_PTRACE_GETFPXREGS
1297 -1
1298#else
1299 0
1300#endif
1301;
1570b33e 1302
3aee8918
PA
1303/* Does the current host support PTRACE_GETREGSET? */
1304static int have_ptrace_getregset = -1;
1305
1306/* Get Linux/x86 target description from running target. */
1307
1308static const struct target_desc *
1309x86_linux_read_description (void)
1570b33e 1310{
3aee8918
PA
1311 unsigned int machine;
1312 int is_elf64;
a196ebeb 1313 int xcr0_features;
3aee8918
PA
1314 int tid;
1315 static uint64_t xcr0;
3a13a53b 1316 struct regset_info *regset;
1570b33e 1317
0bfdf32f 1318 tid = lwpid_of (current_thread);
1570b33e 1319
3aee8918 1320 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 1321
3aee8918 1322 if (sizeof (void *) == 4)
3a13a53b 1323 {
3aee8918
PA
1324 if (is_elf64 > 0)
1325 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1326#ifndef __x86_64__
1327 else if (machine == EM_X86_64)
1328 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1329#endif
1330 }
3a13a53b 1331
3aee8918
PA
1332#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1333 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
1334 {
1335 elf_fpxregset_t fpxregs;
3a13a53b 1336
3aee8918 1337 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 1338 {
3aee8918
PA
1339 have_ptrace_getfpxregs = 0;
1340 have_ptrace_getregset = 0;
1341 return tdesc_i386_mmx_linux;
3a13a53b 1342 }
3aee8918
PA
1343 else
1344 have_ptrace_getfpxregs = 1;
3a13a53b 1345 }
1570b33e
L
1346#endif
1347
1348 if (!use_xml)
1349 {
df7e5265 1350 x86_xcr0 = X86_XSTATE_SSE_MASK;
3aee8918 1351
1570b33e
L
1352 /* Don't use XML. */
1353#ifdef __x86_64__
3aee8918
PA
1354 if (machine == EM_X86_64)
1355 return tdesc_amd64_linux_no_xml;
1570b33e 1356 else
1570b33e 1357#endif
3aee8918 1358 return tdesc_i386_linux_no_xml;
1570b33e
L
1359 }
1360
1570b33e
L
1361 if (have_ptrace_getregset == -1)
1362 {
df7e5265 1363 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 1364 struct iovec iov;
1570b33e
L
1365
1366 iov.iov_base = xstateregs;
1367 iov.iov_len = sizeof (xstateregs);
1368
1369 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
1370 if (ptrace (PTRACE_GETREGSET, tid,
1371 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
1372 have_ptrace_getregset = 0;
1373 else
1570b33e 1374 {
3aee8918
PA
1375 have_ptrace_getregset = 1;
1376
1377 /* Get XCR0 from XSAVE extended state. */
1378 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
1379 / sizeof (uint64_t))];
1380
1381 /* Use PTRACE_GETREGSET if it is available. */
1382 for (regset = x86_regsets;
1383 regset->fill_function != NULL; regset++)
1384 if (regset->get_request == PTRACE_GETREGSET)
df7e5265 1385 regset->size = X86_XSTATE_SIZE (xcr0);
3aee8918
PA
1386 else if (regset->type != GENERAL_REGS)
1387 regset->size = 0;
1570b33e 1388 }
1570b33e
L
1389 }
1390
3aee8918 1391 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb 1392 xcr0_features = (have_ptrace_getregset
df7e5265 1393 && (xcr0 & X86_XSTATE_ALL_MASK));
3aee8918 1394
a196ebeb 1395 if (xcr0_features)
3aee8918 1396 x86_xcr0 = xcr0;
1570b33e 1397
3aee8918
PA
1398 if (machine == EM_X86_64)
1399 {
1570b33e 1400#ifdef __x86_64__
a196ebeb 1401 if (is_elf64)
3aee8918 1402 {
a196ebeb
WT
1403 if (xcr0_features)
1404 {
df7e5265 1405 switch (xcr0 & X86_XSTATE_ALL_MASK)
a196ebeb 1406 {
df7e5265 1407 case X86_XSTATE_AVX512_MASK:
01f9f808
MS
1408 return tdesc_amd64_avx512_linux;
1409
df7e5265 1410 case X86_XSTATE_MPX_MASK:
a196ebeb
WT
1411 return tdesc_amd64_mpx_linux;
1412
df7e5265 1413 case X86_XSTATE_AVX_MASK:
a196ebeb
WT
1414 return tdesc_amd64_avx_linux;
1415
1416 default:
1417 return tdesc_amd64_linux;
1418 }
1419 }
4d47af5c 1420 else
a196ebeb 1421 return tdesc_amd64_linux;
3aee8918
PA
1422 }
1423 else
1424 {
a196ebeb
WT
1425 if (xcr0_features)
1426 {
df7e5265 1427 switch (xcr0 & X86_XSTATE_ALL_MASK)
a196ebeb 1428 {
df7e5265 1429 case X86_XSTATE_AVX512_MASK:
01f9f808
MS
1430 return tdesc_x32_avx512_linux;
1431
df7e5265
GB
1432 case X86_XSTATE_MPX_MASK: /* No MPX on x32. */
1433 case X86_XSTATE_AVX_MASK:
a196ebeb
WT
1434 return tdesc_x32_avx_linux;
1435
1436 default:
1437 return tdesc_x32_linux;
1438 }
1439 }
3aee8918 1440 else
a196ebeb 1441 return tdesc_x32_linux;
1570b33e 1442 }
3aee8918 1443#endif
1570b33e 1444 }
3aee8918
PA
1445 else
1446 {
a196ebeb
WT
1447 if (xcr0_features)
1448 {
df7e5265 1449 switch (xcr0 & X86_XSTATE_ALL_MASK)
a196ebeb 1450 {
df7e5265 1451 case (X86_XSTATE_AVX512_MASK):
01f9f808
MS
1452 return tdesc_i386_avx512_linux;
1453
df7e5265 1454 case (X86_XSTATE_MPX_MASK):
a196ebeb
WT
1455 return tdesc_i386_mpx_linux;
1456
df7e5265 1457 case (X86_XSTATE_AVX_MASK):
a196ebeb
WT
1458 return tdesc_i386_avx_linux;
1459
1460 default:
1461 return tdesc_i386_linux;
1462 }
1463 }
3aee8918
PA
1464 else
1465 return tdesc_i386_linux;
1466 }
1467
1468 gdb_assert_not_reached ("failed to return tdesc");
1469}
1470
1471/* Callback for find_inferior. Stops iteration when a thread with a
1472 given PID is found. */
1473
1474static int
1475same_process_callback (struct inferior_list_entry *entry, void *data)
1476{
1477 int pid = *(int *) data;
1478
1479 return (ptid_get_pid (entry->id) == pid);
1480}
1481
1482/* Callback for for_each_inferior. Calls the arch_setup routine for
1483 each process. */
1484
1485static void
1486x86_arch_setup_process_callback (struct inferior_list_entry *entry)
1487{
1488 int pid = ptid_get_pid (entry->id);
1489
1490 /* Look up any thread of this processes. */
0bfdf32f 1491 current_thread
3aee8918
PA
1492 = (struct thread_info *) find_inferior (&all_threads,
1493 same_process_callback, &pid);
1494
1495 the_low_target.arch_setup ();
1496}
1497
1498/* Update all the target description of all processes; a new GDB
1499 connected, and it may or not support xml target descriptions. */
1500
1501static void
1502x86_linux_update_xmltarget (void)
1503{
0bfdf32f 1504 struct thread_info *saved_thread = current_thread;
3aee8918
PA
1505
1506 /* Before changing the register cache's internal layout, flush the
1507 contents of the current valid caches back to the threads, and
1508 release the current regcache objects. */
1509 regcache_release ();
1510
1511 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
1512
0bfdf32f 1513 current_thread = saved_thread;
1570b33e
L
1514}
1515
1516/* Process qSupported query, "xmlRegisters=". Update the buffer size for
1517 PTRACE_GETREGSET. */
1518
1519static void
1520x86_linux_process_qsupported (const char *query)
1521{
1522 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1523 with "i386" in qSupported query, it supports x86 XML target
1524 descriptions. */
1525 use_xml = 0;
61012eef 1526 if (query != NULL && startswith (query, "xmlRegisters="))
1570b33e
L
1527 {
1528 char *copy = xstrdup (query + 13);
1529 char *p;
1530
1531 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1532 {
1533 if (strcmp (p, "i386") == 0)
1534 {
1535 use_xml = 1;
1536 break;
1537 }
1538 }
1539
1540 free (copy);
1541 }
1542
1543 x86_linux_update_xmltarget ();
1544}
1545
3aee8918 1546/* Common for x86/x86-64. */
d0722149 1547
3aee8918
PA
1548static struct regsets_info x86_regsets_info =
1549 {
1550 x86_regsets, /* regsets */
1551 0, /* num_regsets */
1552 NULL, /* disabled_regsets */
1553 };
214d508e
L
1554
1555#ifdef __x86_64__
3aee8918
PA
1556static struct regs_info amd64_linux_regs_info =
1557 {
1558 NULL, /* regset_bitmap */
1559 NULL, /* usrregs_info */
1560 &x86_regsets_info
1561 };
d0722149 1562#endif
3aee8918
PA
1563static struct usrregs_info i386_linux_usrregs_info =
1564 {
1565 I386_NUM_REGS,
1566 i386_regmap,
1567 };
d0722149 1568
3aee8918
PA
1569static struct regs_info i386_linux_regs_info =
1570 {
1571 NULL, /* regset_bitmap */
1572 &i386_linux_usrregs_info,
1573 &x86_regsets_info
1574 };
d0722149 1575
3aee8918
PA
1576const struct regs_info *
1577x86_linux_regs_info (void)
1578{
1579#ifdef __x86_64__
1580 if (is_64bit_tdesc ())
1581 return &amd64_linux_regs_info;
1582 else
1583#endif
1584 return &i386_linux_regs_info;
1585}
d0722149 1586
3aee8918
PA
1587/* Initialize the target description for the architecture of the
1588 inferior. */
1570b33e 1589
3aee8918
PA
1590static void
1591x86_arch_setup (void)
1592{
1593 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1594}
1595
219f2f23
PA
1596static int
1597x86_supports_tracepoints (void)
1598{
1599 return 1;
1600}
1601
fa593d66
PA
1602static void
1603append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1604{
1605 write_inferior_memory (*to, buf, len);
1606 *to += len;
1607}
1608
1609static int
1610push_opcode (unsigned char *buf, char *op)
1611{
1612 unsigned char *buf_org = buf;
1613
1614 while (1)
1615 {
1616 char *endptr;
1617 unsigned long ul = strtoul (op, &endptr, 16);
1618
1619 if (endptr == op)
1620 break;
1621
1622 *buf++ = ul;
1623 op = endptr;
1624 }
1625
1626 return buf - buf_org;
1627}
1628
1629#ifdef __x86_64__
1630
1631/* Build a jump pad that saves registers and calls a collection
1632 function. Writes a jump instruction to the jump pad to
1633 JJUMPAD_INSN. The caller is responsible to write it in at the
1634 tracepoint address. */
1635
1636static int
1637amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1638 CORE_ADDR collector,
1639 CORE_ADDR lockaddr,
1640 ULONGEST orig_size,
1641 CORE_ADDR *jump_entry,
405f8e94
SS
1642 CORE_ADDR *trampoline,
1643 ULONGEST *trampoline_size,
fa593d66
PA
1644 unsigned char *jjump_pad_insn,
1645 ULONGEST *jjump_pad_insn_size,
1646 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1647 CORE_ADDR *adjusted_insn_addr_end,
1648 char *err)
fa593d66
PA
1649{
1650 unsigned char buf[40];
1651 int i, offset;
f4647387
YQ
1652 int64_t loffset;
1653
fa593d66
PA
1654 CORE_ADDR buildaddr = *jump_entry;
1655
1656 /* Build the jump pad. */
1657
1658 /* First, do tracepoint data collection. Save registers. */
1659 i = 0;
1660 /* Need to ensure stack pointer saved first. */
1661 buf[i++] = 0x54; /* push %rsp */
1662 buf[i++] = 0x55; /* push %rbp */
1663 buf[i++] = 0x57; /* push %rdi */
1664 buf[i++] = 0x56; /* push %rsi */
1665 buf[i++] = 0x52; /* push %rdx */
1666 buf[i++] = 0x51; /* push %rcx */
1667 buf[i++] = 0x53; /* push %rbx */
1668 buf[i++] = 0x50; /* push %rax */
1669 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1670 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1671 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1672 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1673 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1674 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1675 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1676 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1677 buf[i++] = 0x9c; /* pushfq */
1678 buf[i++] = 0x48; /* movl <addr>,%rdi */
1679 buf[i++] = 0xbf;
1680 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1681 i += sizeof (unsigned long);
1682 buf[i++] = 0x57; /* push %rdi */
1683 append_insns (&buildaddr, i, buf);
1684
1685 /* Stack space for the collecting_t object. */
1686 i = 0;
1687 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1688 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1689 memcpy (buf + i, &tpoint, 8);
1690 i += 8;
1691 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1692 i += push_opcode (&buf[i],
1693 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1694 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1695 append_insns (&buildaddr, i, buf);
1696
1697 /* spin-lock. */
1698 i = 0;
1699 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1700 memcpy (&buf[i], (void *) &lockaddr, 8);
1701 i += 8;
1702 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1703 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1704 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1705 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1706 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1707 append_insns (&buildaddr, i, buf);
1708
1709 /* Set up the gdb_collect call. */
1710 /* At this point, (stack pointer + 0x18) is the base of our saved
1711 register block. */
1712
1713 i = 0;
1714 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1715 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1716
1717 /* tpoint address may be 64-bit wide. */
1718 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1719 memcpy (buf + i, &tpoint, 8);
1720 i += 8;
1721 append_insns (&buildaddr, i, buf);
1722
1723 /* The collector function being in the shared library, may be
1724 >31-bits away off the jump pad. */
1725 i = 0;
1726 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1727 memcpy (buf + i, &collector, 8);
1728 i += 8;
1729 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1730 append_insns (&buildaddr, i, buf);
1731
1732 /* Clear the spin-lock. */
1733 i = 0;
1734 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1735 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1736 memcpy (buf + i, &lockaddr, 8);
1737 i += 8;
1738 append_insns (&buildaddr, i, buf);
1739
1740 /* Remove stack that had been used for the collect_t object. */
1741 i = 0;
1742 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1743 append_insns (&buildaddr, i, buf);
1744
1745 /* Restore register state. */
1746 i = 0;
1747 buf[i++] = 0x48; /* add $0x8,%rsp */
1748 buf[i++] = 0x83;
1749 buf[i++] = 0xc4;
1750 buf[i++] = 0x08;
1751 buf[i++] = 0x9d; /* popfq */
1752 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1753 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1754 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1755 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1756 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1757 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1758 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1759 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1760 buf[i++] = 0x58; /* pop %rax */
1761 buf[i++] = 0x5b; /* pop %rbx */
1762 buf[i++] = 0x59; /* pop %rcx */
1763 buf[i++] = 0x5a; /* pop %rdx */
1764 buf[i++] = 0x5e; /* pop %rsi */
1765 buf[i++] = 0x5f; /* pop %rdi */
1766 buf[i++] = 0x5d; /* pop %rbp */
1767 buf[i++] = 0x5c; /* pop %rsp */
1768 append_insns (&buildaddr, i, buf);
1769
1770 /* Now, adjust the original instruction to execute in the jump
1771 pad. */
1772 *adjusted_insn_addr = buildaddr;
1773 relocate_instruction (&buildaddr, tpaddr);
1774 *adjusted_insn_addr_end = buildaddr;
1775
1776 /* Finally, write a jump back to the program. */
f4647387
YQ
1777
1778 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1779 if (loffset > INT_MAX || loffset < INT_MIN)
1780 {
1781 sprintf (err,
1782 "E.Jump back from jump pad too far from tracepoint "
1783 "(offset 0x%" PRIx64 " > int32).", loffset);
1784 return 1;
1785 }
1786
1787 offset = (int) loffset;
fa593d66
PA
1788 memcpy (buf, jump_insn, sizeof (jump_insn));
1789 memcpy (buf + 1, &offset, 4);
1790 append_insns (&buildaddr, sizeof (jump_insn), buf);
1791
1792 /* The jump pad is now built. Wire in a jump to our jump pad. This
1793 is always done last (by our caller actually), so that we can
1794 install fast tracepoints with threads running. This relies on
1795 the agent's atomic write support. */
f4647387
YQ
1796 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1797 if (loffset > INT_MAX || loffset < INT_MIN)
1798 {
1799 sprintf (err,
1800 "E.Jump pad too far from tracepoint "
1801 "(offset 0x%" PRIx64 " > int32).", loffset);
1802 return 1;
1803 }
1804
1805 offset = (int) loffset;
1806
fa593d66
PA
1807 memcpy (buf, jump_insn, sizeof (jump_insn));
1808 memcpy (buf + 1, &offset, 4);
1809 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1810 *jjump_pad_insn_size = sizeof (jump_insn);
1811
1812 /* Return the end address of our pad. */
1813 *jump_entry = buildaddr;
1814
1815 return 0;
1816}
1817
1818#endif /* __x86_64__ */
1819
1820/* Build a jump pad that saves registers and calls a collection
1821 function. Writes a jump instruction to the jump pad to
1822 JJUMPAD_INSN. The caller is responsible to write it in at the
1823 tracepoint address. */
1824
1825static int
1826i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1827 CORE_ADDR collector,
1828 CORE_ADDR lockaddr,
1829 ULONGEST orig_size,
1830 CORE_ADDR *jump_entry,
405f8e94
SS
1831 CORE_ADDR *trampoline,
1832 ULONGEST *trampoline_size,
fa593d66
PA
1833 unsigned char *jjump_pad_insn,
1834 ULONGEST *jjump_pad_insn_size,
1835 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1836 CORE_ADDR *adjusted_insn_addr_end,
1837 char *err)
fa593d66
PA
1838{
1839 unsigned char buf[0x100];
1840 int i, offset;
1841 CORE_ADDR buildaddr = *jump_entry;
1842
1843 /* Build the jump pad. */
1844
1845 /* First, do tracepoint data collection. Save registers. */
1846 i = 0;
1847 buf[i++] = 0x60; /* pushad */
1848 buf[i++] = 0x68; /* push tpaddr aka $pc */
1849 *((int *)(buf + i)) = (int) tpaddr;
1850 i += 4;
1851 buf[i++] = 0x9c; /* pushf */
1852 buf[i++] = 0x1e; /* push %ds */
1853 buf[i++] = 0x06; /* push %es */
1854 buf[i++] = 0x0f; /* push %fs */
1855 buf[i++] = 0xa0;
1856 buf[i++] = 0x0f; /* push %gs */
1857 buf[i++] = 0xa8;
1858 buf[i++] = 0x16; /* push %ss */
1859 buf[i++] = 0x0e; /* push %cs */
1860 append_insns (&buildaddr, i, buf);
1861
1862 /* Stack space for the collecting_t object. */
1863 i = 0;
1864 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1865
1866 /* Build the object. */
1867 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1868 memcpy (buf + i, &tpoint, 4);
1869 i += 4;
1870 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1871
1872 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1873 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1874 append_insns (&buildaddr, i, buf);
1875
1876 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1877 If we cared for it, this could be using xchg alternatively. */
1878
1879 i = 0;
1880 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1881 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1882 %esp,<lockaddr> */
1883 memcpy (&buf[i], (void *) &lockaddr, 4);
1884 i += 4;
1885 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1886 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1887 append_insns (&buildaddr, i, buf);
1888
1889
1890 /* Set up arguments to the gdb_collect call. */
1891 i = 0;
1892 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1893 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1894 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1895 append_insns (&buildaddr, i, buf);
1896
1897 i = 0;
1898 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1899 append_insns (&buildaddr, i, buf);
1900
1901 i = 0;
1902 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1903 memcpy (&buf[i], (void *) &tpoint, 4);
1904 i += 4;
1905 append_insns (&buildaddr, i, buf);
1906
1907 buf[0] = 0xe8; /* call <reladdr> */
1908 offset = collector - (buildaddr + sizeof (jump_insn));
1909 memcpy (buf + 1, &offset, 4);
1910 append_insns (&buildaddr, 5, buf);
1911 /* Clean up after the call. */
1912 buf[0] = 0x83; /* add $0x8,%esp */
1913 buf[1] = 0xc4;
1914 buf[2] = 0x08;
1915 append_insns (&buildaddr, 3, buf);
1916
1917
1918 /* Clear the spin-lock. This would need the LOCK prefix on older
1919 broken archs. */
1920 i = 0;
1921 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1922 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1923 memcpy (buf + i, &lockaddr, 4);
1924 i += 4;
1925 append_insns (&buildaddr, i, buf);
1926
1927
1928 /* Remove stack that had been used for the collect_t object. */
1929 i = 0;
1930 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1931 append_insns (&buildaddr, i, buf);
1932
1933 i = 0;
1934 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1935 buf[i++] = 0xc4;
1936 buf[i++] = 0x04;
1937 buf[i++] = 0x17; /* pop %ss */
1938 buf[i++] = 0x0f; /* pop %gs */
1939 buf[i++] = 0xa9;
1940 buf[i++] = 0x0f; /* pop %fs */
1941 buf[i++] = 0xa1;
1942 buf[i++] = 0x07; /* pop %es */
405f8e94 1943 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1944 buf[i++] = 0x9d; /* popf */
1945 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1946 buf[i++] = 0xc4;
1947 buf[i++] = 0x04;
1948 buf[i++] = 0x61; /* popad */
1949 append_insns (&buildaddr, i, buf);
1950
1951 /* Now, adjust the original instruction to execute in the jump
1952 pad. */
1953 *adjusted_insn_addr = buildaddr;
1954 relocate_instruction (&buildaddr, tpaddr);
1955 *adjusted_insn_addr_end = buildaddr;
1956
1957 /* Write the jump back to the program. */
1958 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1959 memcpy (buf, jump_insn, sizeof (jump_insn));
1960 memcpy (buf + 1, &offset, 4);
1961 append_insns (&buildaddr, sizeof (jump_insn), buf);
1962
1963 /* The jump pad is now built. Wire in a jump to our jump pad. This
1964 is always done last (by our caller actually), so that we can
1965 install fast tracepoints with threads running. This relies on
1966 the agent's atomic write support. */
405f8e94
SS
1967 if (orig_size == 4)
1968 {
1969 /* Create a trampoline. */
1970 *trampoline_size = sizeof (jump_insn);
1971 if (!claim_trampoline_space (*trampoline_size, trampoline))
1972 {
1973 /* No trampoline space available. */
1974 strcpy (err,
1975 "E.Cannot allocate trampoline space needed for fast "
1976 "tracepoints on 4-byte instructions.");
1977 return 1;
1978 }
1979
1980 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1981 memcpy (buf, jump_insn, sizeof (jump_insn));
1982 memcpy (buf + 1, &offset, 4);
1983 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1984
1985 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1986 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1987 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1988 memcpy (buf + 2, &offset, 2);
1989 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1990 *jjump_pad_insn_size = sizeof (small_jump_insn);
1991 }
1992 else
1993 {
1994 /* Else use a 32-bit relative jump instruction. */
1995 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1996 memcpy (buf, jump_insn, sizeof (jump_insn));
1997 memcpy (buf + 1, &offset, 4);
1998 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1999 *jjump_pad_insn_size = sizeof (jump_insn);
2000 }
fa593d66
PA
2001
2002 /* Return the end address of our pad. */
2003 *jump_entry = buildaddr;
2004
2005 return 0;
2006}
2007
2008static int
2009x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
2010 CORE_ADDR collector,
2011 CORE_ADDR lockaddr,
2012 ULONGEST orig_size,
2013 CORE_ADDR *jump_entry,
405f8e94
SS
2014 CORE_ADDR *trampoline,
2015 ULONGEST *trampoline_size,
fa593d66
PA
2016 unsigned char *jjump_pad_insn,
2017 ULONGEST *jjump_pad_insn_size,
2018 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
2019 CORE_ADDR *adjusted_insn_addr_end,
2020 char *err)
fa593d66
PA
2021{
2022#ifdef __x86_64__
3aee8918 2023 if (is_64bit_tdesc ())
fa593d66
PA
2024 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
2025 collector, lockaddr,
2026 orig_size, jump_entry,
405f8e94 2027 trampoline, trampoline_size,
fa593d66
PA
2028 jjump_pad_insn,
2029 jjump_pad_insn_size,
2030 adjusted_insn_addr,
405f8e94
SS
2031 adjusted_insn_addr_end,
2032 err);
fa593d66
PA
2033#endif
2034
2035 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
2036 collector, lockaddr,
2037 orig_size, jump_entry,
405f8e94 2038 trampoline, trampoline_size,
fa593d66
PA
2039 jjump_pad_insn,
2040 jjump_pad_insn_size,
2041 adjusted_insn_addr,
405f8e94
SS
2042 adjusted_insn_addr_end,
2043 err);
2044}
2045
2046/* Return the minimum instruction length for fast tracepoints on x86/x86-64
2047 architectures. */
2048
2049static int
2050x86_get_min_fast_tracepoint_insn_len (void)
2051{
2052 static int warned_about_fast_tracepoints = 0;
2053
2054#ifdef __x86_64__
2055 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
2056 used for fast tracepoints. */
3aee8918 2057 if (is_64bit_tdesc ())
405f8e94
SS
2058 return 5;
2059#endif
2060
58b4daa5 2061 if (agent_loaded_p ())
405f8e94
SS
2062 {
2063 char errbuf[IPA_BUFSIZ];
2064
2065 errbuf[0] = '\0';
2066
2067 /* On x86, if trampolines are available, then 4-byte jump instructions
2068 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2069 with a 4-byte offset are used instead. */
2070 if (have_fast_tracepoint_trampoline_buffer (errbuf))
2071 return 4;
2072 else
2073 {
2074 /* GDB has no channel to explain to user why a shorter fast
2075 tracepoint is not possible, but at least make GDBserver
2076 mention that something has gone awry. */
2077 if (!warned_about_fast_tracepoints)
2078 {
2079 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
2080 warned_about_fast_tracepoints = 1;
2081 }
2082 return 5;
2083 }
2084 }
2085 else
2086 {
2087 /* Indicate that the minimum length is currently unknown since the IPA
2088 has not loaded yet. */
2089 return 0;
2090 }
fa593d66
PA
2091}
2092
6a271cae
PA
2093static void
2094add_insns (unsigned char *start, int len)
2095{
2096 CORE_ADDR buildaddr = current_insn_ptr;
2097
2098 if (debug_threads)
87ce2a04
DE
2099 debug_printf ("Adding %d bytes of insn at %s\n",
2100 len, paddress (buildaddr));
6a271cae
PA
2101
2102 append_insns (&buildaddr, len, start);
2103 current_insn_ptr = buildaddr;
2104}
2105
6a271cae
PA
2106/* Our general strategy for emitting code is to avoid specifying raw
2107 bytes whenever possible, and instead copy a block of inline asm
2108 that is embedded in the function. This is a little messy, because
2109 we need to keep the compiler from discarding what looks like dead
2110 code, plus suppress various warnings. */
2111
9e4344e5
PA
2112#define EMIT_ASM(NAME, INSNS) \
2113 do \
2114 { \
2115 extern unsigned char start_ ## NAME, end_ ## NAME; \
2116 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 2117 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
2118 "\t" "start_" #NAME ":" \
2119 "\t" INSNS "\n" \
2120 "\t" "end_" #NAME ":"); \
2121 } while (0)
6a271cae
PA
2122
2123#ifdef __x86_64__
2124
2125#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
2126 do \
2127 { \
2128 extern unsigned char start_ ## NAME, end_ ## NAME; \
2129 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2130 __asm__ (".code32\n" \
2131 "\t" "jmp end_" #NAME "\n" \
2132 "\t" "start_" #NAME ":\n" \
2133 "\t" INSNS "\n" \
2134 "\t" "end_" #NAME ":\n" \
2135 ".code64\n"); \
2136 } while (0)
6a271cae
PA
2137
2138#else
2139
2140#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2141
2142#endif
2143
2144#ifdef __x86_64__
2145
2146static void
2147amd64_emit_prologue (void)
2148{
2149 EMIT_ASM (amd64_prologue,
2150 "pushq %rbp\n\t"
2151 "movq %rsp,%rbp\n\t"
2152 "sub $0x20,%rsp\n\t"
2153 "movq %rdi,-8(%rbp)\n\t"
2154 "movq %rsi,-16(%rbp)");
2155}
2156
2157
2158static void
2159amd64_emit_epilogue (void)
2160{
2161 EMIT_ASM (amd64_epilogue,
2162 "movq -16(%rbp),%rdi\n\t"
2163 "movq %rax,(%rdi)\n\t"
2164 "xor %rax,%rax\n\t"
2165 "leave\n\t"
2166 "ret");
2167}
2168
2169static void
2170amd64_emit_add (void)
2171{
2172 EMIT_ASM (amd64_add,
2173 "add (%rsp),%rax\n\t"
2174 "lea 0x8(%rsp),%rsp");
2175}
2176
2177static void
2178amd64_emit_sub (void)
2179{
2180 EMIT_ASM (amd64_sub,
2181 "sub %rax,(%rsp)\n\t"
2182 "pop %rax");
2183}
2184
2185static void
2186amd64_emit_mul (void)
2187{
2188 emit_error = 1;
2189}
2190
2191static void
2192amd64_emit_lsh (void)
2193{
2194 emit_error = 1;
2195}
2196
2197static void
2198amd64_emit_rsh_signed (void)
2199{
2200 emit_error = 1;
2201}
2202
2203static void
2204amd64_emit_rsh_unsigned (void)
2205{
2206 emit_error = 1;
2207}
2208
2209static void
2210amd64_emit_ext (int arg)
2211{
2212 switch (arg)
2213 {
2214 case 8:
2215 EMIT_ASM (amd64_ext_8,
2216 "cbtw\n\t"
2217 "cwtl\n\t"
2218 "cltq");
2219 break;
2220 case 16:
2221 EMIT_ASM (amd64_ext_16,
2222 "cwtl\n\t"
2223 "cltq");
2224 break;
2225 case 32:
2226 EMIT_ASM (amd64_ext_32,
2227 "cltq");
2228 break;
2229 default:
2230 emit_error = 1;
2231 }
2232}
2233
2234static void
2235amd64_emit_log_not (void)
2236{
2237 EMIT_ASM (amd64_log_not,
2238 "test %rax,%rax\n\t"
2239 "sete %cl\n\t"
2240 "movzbq %cl,%rax");
2241}
2242
2243static void
2244amd64_emit_bit_and (void)
2245{
2246 EMIT_ASM (amd64_and,
2247 "and (%rsp),%rax\n\t"
2248 "lea 0x8(%rsp),%rsp");
2249}
2250
2251static void
2252amd64_emit_bit_or (void)
2253{
2254 EMIT_ASM (amd64_or,
2255 "or (%rsp),%rax\n\t"
2256 "lea 0x8(%rsp),%rsp");
2257}
2258
2259static void
2260amd64_emit_bit_xor (void)
2261{
2262 EMIT_ASM (amd64_xor,
2263 "xor (%rsp),%rax\n\t"
2264 "lea 0x8(%rsp),%rsp");
2265}
2266
2267static void
2268amd64_emit_bit_not (void)
2269{
2270 EMIT_ASM (amd64_bit_not,
2271 "xorq $0xffffffffffffffff,%rax");
2272}
2273
2274static void
2275amd64_emit_equal (void)
2276{
2277 EMIT_ASM (amd64_equal,
2278 "cmp %rax,(%rsp)\n\t"
2279 "je .Lamd64_equal_true\n\t"
2280 "xor %rax,%rax\n\t"
2281 "jmp .Lamd64_equal_end\n\t"
2282 ".Lamd64_equal_true:\n\t"
2283 "mov $0x1,%rax\n\t"
2284 ".Lamd64_equal_end:\n\t"
2285 "lea 0x8(%rsp),%rsp");
2286}
2287
2288static void
2289amd64_emit_less_signed (void)
2290{
2291 EMIT_ASM (amd64_less_signed,
2292 "cmp %rax,(%rsp)\n\t"
2293 "jl .Lamd64_less_signed_true\n\t"
2294 "xor %rax,%rax\n\t"
2295 "jmp .Lamd64_less_signed_end\n\t"
2296 ".Lamd64_less_signed_true:\n\t"
2297 "mov $1,%rax\n\t"
2298 ".Lamd64_less_signed_end:\n\t"
2299 "lea 0x8(%rsp),%rsp");
2300}
2301
2302static void
2303amd64_emit_less_unsigned (void)
2304{
2305 EMIT_ASM (amd64_less_unsigned,
2306 "cmp %rax,(%rsp)\n\t"
2307 "jb .Lamd64_less_unsigned_true\n\t"
2308 "xor %rax,%rax\n\t"
2309 "jmp .Lamd64_less_unsigned_end\n\t"
2310 ".Lamd64_less_unsigned_true:\n\t"
2311 "mov $1,%rax\n\t"
2312 ".Lamd64_less_unsigned_end:\n\t"
2313 "lea 0x8(%rsp),%rsp");
2314}
2315
2316static void
2317amd64_emit_ref (int size)
2318{
2319 switch (size)
2320 {
2321 case 1:
2322 EMIT_ASM (amd64_ref1,
2323 "movb (%rax),%al");
2324 break;
2325 case 2:
2326 EMIT_ASM (amd64_ref2,
2327 "movw (%rax),%ax");
2328 break;
2329 case 4:
2330 EMIT_ASM (amd64_ref4,
2331 "movl (%rax),%eax");
2332 break;
2333 case 8:
2334 EMIT_ASM (amd64_ref8,
2335 "movq (%rax),%rax");
2336 break;
2337 }
2338}
2339
2340static void
2341amd64_emit_if_goto (int *offset_p, int *size_p)
2342{
2343 EMIT_ASM (amd64_if_goto,
2344 "mov %rax,%rcx\n\t"
2345 "pop %rax\n\t"
2346 "cmp $0,%rcx\n\t"
2347 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2348 if (offset_p)
2349 *offset_p = 10;
2350 if (size_p)
2351 *size_p = 4;
2352}
2353
2354static void
2355amd64_emit_goto (int *offset_p, int *size_p)
2356{
2357 EMIT_ASM (amd64_goto,
2358 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2359 if (offset_p)
2360 *offset_p = 1;
2361 if (size_p)
2362 *size_p = 4;
2363}
2364
2365static void
2366amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2367{
2368 int diff = (to - (from + size));
2369 unsigned char buf[sizeof (int)];
2370
2371 if (size != 4)
2372 {
2373 emit_error = 1;
2374 return;
2375 }
2376
2377 memcpy (buf, &diff, sizeof (int));
2378 write_inferior_memory (from, buf, sizeof (int));
2379}
2380
2381static void
4e29fb54 2382amd64_emit_const (LONGEST num)
6a271cae
PA
2383{
2384 unsigned char buf[16];
2385 int i;
2386 CORE_ADDR buildaddr = current_insn_ptr;
2387
2388 i = 0;
2389 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 2390 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
2391 i += 8;
2392 append_insns (&buildaddr, i, buf);
2393 current_insn_ptr = buildaddr;
2394}
2395
2396static void
2397amd64_emit_call (CORE_ADDR fn)
2398{
2399 unsigned char buf[16];
2400 int i;
2401 CORE_ADDR buildaddr;
4e29fb54 2402 LONGEST offset64;
6a271cae
PA
2403
2404 /* The destination function being in the shared library, may be
2405 >31-bits away off the compiled code pad. */
2406
2407 buildaddr = current_insn_ptr;
2408
2409 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
2410
2411 i = 0;
2412
2413 if (offset64 > INT_MAX || offset64 < INT_MIN)
2414 {
2415 /* Offset is too large for a call. Use callq, but that requires
2416 a register, so avoid it if possible. Use r10, since it is
2417 call-clobbered, we don't have to push/pop it. */
2418 buf[i++] = 0x48; /* mov $fn,%r10 */
2419 buf[i++] = 0xba;
2420 memcpy (buf + i, &fn, 8);
2421 i += 8;
2422 buf[i++] = 0xff; /* callq *%r10 */
2423 buf[i++] = 0xd2;
2424 }
2425 else
2426 {
2427 int offset32 = offset64; /* we know we can't overflow here. */
2428 memcpy (buf + i, &offset32, 4);
2429 i += 4;
2430 }
2431
2432 append_insns (&buildaddr, i, buf);
2433 current_insn_ptr = buildaddr;
2434}
2435
2436static void
2437amd64_emit_reg (int reg)
2438{
2439 unsigned char buf[16];
2440 int i;
2441 CORE_ADDR buildaddr;
2442
2443 /* Assume raw_regs is still in %rdi. */
2444 buildaddr = current_insn_ptr;
2445 i = 0;
2446 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 2447 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2448 i += 4;
2449 append_insns (&buildaddr, i, buf);
2450 current_insn_ptr = buildaddr;
2451 amd64_emit_call (get_raw_reg_func_addr ());
2452}
2453
2454static void
2455amd64_emit_pop (void)
2456{
2457 EMIT_ASM (amd64_pop,
2458 "pop %rax");
2459}
2460
2461static void
2462amd64_emit_stack_flush (void)
2463{
2464 EMIT_ASM (amd64_stack_flush,
2465 "push %rax");
2466}
2467
2468static void
2469amd64_emit_zero_ext (int arg)
2470{
2471 switch (arg)
2472 {
2473 case 8:
2474 EMIT_ASM (amd64_zero_ext_8,
2475 "and $0xff,%rax");
2476 break;
2477 case 16:
2478 EMIT_ASM (amd64_zero_ext_16,
2479 "and $0xffff,%rax");
2480 break;
2481 case 32:
2482 EMIT_ASM (amd64_zero_ext_32,
2483 "mov $0xffffffff,%rcx\n\t"
2484 "and %rcx,%rax");
2485 break;
2486 default:
2487 emit_error = 1;
2488 }
2489}
2490
2491static void
2492amd64_emit_swap (void)
2493{
2494 EMIT_ASM (amd64_swap,
2495 "mov %rax,%rcx\n\t"
2496 "pop %rax\n\t"
2497 "push %rcx");
2498}
2499
2500static void
2501amd64_emit_stack_adjust (int n)
2502{
2503 unsigned char buf[16];
2504 int i;
2505 CORE_ADDR buildaddr = current_insn_ptr;
2506
2507 i = 0;
2508 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2509 buf[i++] = 0x8d;
2510 buf[i++] = 0x64;
2511 buf[i++] = 0x24;
2512 /* This only handles adjustments up to 16, but we don't expect any more. */
2513 buf[i++] = n * 8;
2514 append_insns (&buildaddr, i, buf);
2515 current_insn_ptr = buildaddr;
2516}
2517
2518/* FN's prototype is `LONGEST(*fn)(int)'. */
2519
2520static void
2521amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2522{
2523 unsigned char buf[16];
2524 int i;
2525 CORE_ADDR buildaddr;
2526
2527 buildaddr = current_insn_ptr;
2528 i = 0;
2529 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2530 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2531 i += 4;
2532 append_insns (&buildaddr, i, buf);
2533 current_insn_ptr = buildaddr;
2534 amd64_emit_call (fn);
2535}
2536
4e29fb54 2537/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2538
2539static void
2540amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2541{
2542 unsigned char buf[16];
2543 int i;
2544 CORE_ADDR buildaddr;
2545
2546 buildaddr = current_insn_ptr;
2547 i = 0;
2548 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2549 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2550 i += 4;
2551 append_insns (&buildaddr, i, buf);
2552 current_insn_ptr = buildaddr;
2553 EMIT_ASM (amd64_void_call_2_a,
2554 /* Save away a copy of the stack top. */
2555 "push %rax\n\t"
2556 /* Also pass top as the second argument. */
2557 "mov %rax,%rsi");
2558 amd64_emit_call (fn);
2559 EMIT_ASM (amd64_void_call_2_b,
2560 /* Restore the stack top, %rax may have been trashed. */
2561 "pop %rax");
2562}
2563
6b9801d4
SS
2564void
2565amd64_emit_eq_goto (int *offset_p, int *size_p)
2566{
2567 EMIT_ASM (amd64_eq,
2568 "cmp %rax,(%rsp)\n\t"
2569 "jne .Lamd64_eq_fallthru\n\t"
2570 "lea 0x8(%rsp),%rsp\n\t"
2571 "pop %rax\n\t"
2572 /* jmp, but don't trust the assembler to choose the right jump */
2573 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2574 ".Lamd64_eq_fallthru:\n\t"
2575 "lea 0x8(%rsp),%rsp\n\t"
2576 "pop %rax");
2577
2578 if (offset_p)
2579 *offset_p = 13;
2580 if (size_p)
2581 *size_p = 4;
2582}
2583
2584void
2585amd64_emit_ne_goto (int *offset_p, int *size_p)
2586{
2587 EMIT_ASM (amd64_ne,
2588 "cmp %rax,(%rsp)\n\t"
2589 "je .Lamd64_ne_fallthru\n\t"
2590 "lea 0x8(%rsp),%rsp\n\t"
2591 "pop %rax\n\t"
2592 /* jmp, but don't trust the assembler to choose the right jump */
2593 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2594 ".Lamd64_ne_fallthru:\n\t"
2595 "lea 0x8(%rsp),%rsp\n\t"
2596 "pop %rax");
2597
2598 if (offset_p)
2599 *offset_p = 13;
2600 if (size_p)
2601 *size_p = 4;
2602}
2603
2604void
2605amd64_emit_lt_goto (int *offset_p, int *size_p)
2606{
2607 EMIT_ASM (amd64_lt,
2608 "cmp %rax,(%rsp)\n\t"
2609 "jnl .Lamd64_lt_fallthru\n\t"
2610 "lea 0x8(%rsp),%rsp\n\t"
2611 "pop %rax\n\t"
2612 /* jmp, but don't trust the assembler to choose the right jump */
2613 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2614 ".Lamd64_lt_fallthru:\n\t"
2615 "lea 0x8(%rsp),%rsp\n\t"
2616 "pop %rax");
2617
2618 if (offset_p)
2619 *offset_p = 13;
2620 if (size_p)
2621 *size_p = 4;
2622}
2623
2624void
2625amd64_emit_le_goto (int *offset_p, int *size_p)
2626{
2627 EMIT_ASM (amd64_le,
2628 "cmp %rax,(%rsp)\n\t"
2629 "jnle .Lamd64_le_fallthru\n\t"
2630 "lea 0x8(%rsp),%rsp\n\t"
2631 "pop %rax\n\t"
2632 /* jmp, but don't trust the assembler to choose the right jump */
2633 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2634 ".Lamd64_le_fallthru:\n\t"
2635 "lea 0x8(%rsp),%rsp\n\t"
2636 "pop %rax");
2637
2638 if (offset_p)
2639 *offset_p = 13;
2640 if (size_p)
2641 *size_p = 4;
2642}
2643
2644void
2645amd64_emit_gt_goto (int *offset_p, int *size_p)
2646{
2647 EMIT_ASM (amd64_gt,
2648 "cmp %rax,(%rsp)\n\t"
2649 "jng .Lamd64_gt_fallthru\n\t"
2650 "lea 0x8(%rsp),%rsp\n\t"
2651 "pop %rax\n\t"
2652 /* jmp, but don't trust the assembler to choose the right jump */
2653 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2654 ".Lamd64_gt_fallthru:\n\t"
2655 "lea 0x8(%rsp),%rsp\n\t"
2656 "pop %rax");
2657
2658 if (offset_p)
2659 *offset_p = 13;
2660 if (size_p)
2661 *size_p = 4;
2662}
2663
2664void
2665amd64_emit_ge_goto (int *offset_p, int *size_p)
2666{
2667 EMIT_ASM (amd64_ge,
2668 "cmp %rax,(%rsp)\n\t"
2669 "jnge .Lamd64_ge_fallthru\n\t"
2670 ".Lamd64_ge_jump:\n\t"
2671 "lea 0x8(%rsp),%rsp\n\t"
2672 "pop %rax\n\t"
2673 /* jmp, but don't trust the assembler to choose the right jump */
2674 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2675 ".Lamd64_ge_fallthru:\n\t"
2676 "lea 0x8(%rsp),%rsp\n\t"
2677 "pop %rax");
2678
2679 if (offset_p)
2680 *offset_p = 13;
2681 if (size_p)
2682 *size_p = 4;
2683}
2684
6a271cae
PA
2685struct emit_ops amd64_emit_ops =
2686 {
2687 amd64_emit_prologue,
2688 amd64_emit_epilogue,
2689 amd64_emit_add,
2690 amd64_emit_sub,
2691 amd64_emit_mul,
2692 amd64_emit_lsh,
2693 amd64_emit_rsh_signed,
2694 amd64_emit_rsh_unsigned,
2695 amd64_emit_ext,
2696 amd64_emit_log_not,
2697 amd64_emit_bit_and,
2698 amd64_emit_bit_or,
2699 amd64_emit_bit_xor,
2700 amd64_emit_bit_not,
2701 amd64_emit_equal,
2702 amd64_emit_less_signed,
2703 amd64_emit_less_unsigned,
2704 amd64_emit_ref,
2705 amd64_emit_if_goto,
2706 amd64_emit_goto,
2707 amd64_write_goto_address,
2708 amd64_emit_const,
2709 amd64_emit_call,
2710 amd64_emit_reg,
2711 amd64_emit_pop,
2712 amd64_emit_stack_flush,
2713 amd64_emit_zero_ext,
2714 amd64_emit_swap,
2715 amd64_emit_stack_adjust,
2716 amd64_emit_int_call_1,
6b9801d4
SS
2717 amd64_emit_void_call_2,
2718 amd64_emit_eq_goto,
2719 amd64_emit_ne_goto,
2720 amd64_emit_lt_goto,
2721 amd64_emit_le_goto,
2722 amd64_emit_gt_goto,
2723 amd64_emit_ge_goto
6a271cae
PA
2724 };
2725
2726#endif /* __x86_64__ */
2727
2728static void
2729i386_emit_prologue (void)
2730{
2731 EMIT_ASM32 (i386_prologue,
2732 "push %ebp\n\t"
bf15cbda
SS
2733 "mov %esp,%ebp\n\t"
2734 "push %ebx");
6a271cae
PA
2735 /* At this point, the raw regs base address is at 8(%ebp), and the
2736 value pointer is at 12(%ebp). */
2737}
2738
2739static void
2740i386_emit_epilogue (void)
2741{
2742 EMIT_ASM32 (i386_epilogue,
2743 "mov 12(%ebp),%ecx\n\t"
2744 "mov %eax,(%ecx)\n\t"
2745 "mov %ebx,0x4(%ecx)\n\t"
2746 "xor %eax,%eax\n\t"
bf15cbda 2747 "pop %ebx\n\t"
6a271cae
PA
2748 "pop %ebp\n\t"
2749 "ret");
2750}
2751
2752static void
2753i386_emit_add (void)
2754{
2755 EMIT_ASM32 (i386_add,
2756 "add (%esp),%eax\n\t"
2757 "adc 0x4(%esp),%ebx\n\t"
2758 "lea 0x8(%esp),%esp");
2759}
2760
2761static void
2762i386_emit_sub (void)
2763{
2764 EMIT_ASM32 (i386_sub,
2765 "subl %eax,(%esp)\n\t"
2766 "sbbl %ebx,4(%esp)\n\t"
2767 "pop %eax\n\t"
2768 "pop %ebx\n\t");
2769}
2770
2771static void
2772i386_emit_mul (void)
2773{
2774 emit_error = 1;
2775}
2776
2777static void
2778i386_emit_lsh (void)
2779{
2780 emit_error = 1;
2781}
2782
2783static void
2784i386_emit_rsh_signed (void)
2785{
2786 emit_error = 1;
2787}
2788
2789static void
2790i386_emit_rsh_unsigned (void)
2791{
2792 emit_error = 1;
2793}
2794
2795static void
2796i386_emit_ext (int arg)
2797{
2798 switch (arg)
2799 {
2800 case 8:
2801 EMIT_ASM32 (i386_ext_8,
2802 "cbtw\n\t"
2803 "cwtl\n\t"
2804 "movl %eax,%ebx\n\t"
2805 "sarl $31,%ebx");
2806 break;
2807 case 16:
2808 EMIT_ASM32 (i386_ext_16,
2809 "cwtl\n\t"
2810 "movl %eax,%ebx\n\t"
2811 "sarl $31,%ebx");
2812 break;
2813 case 32:
2814 EMIT_ASM32 (i386_ext_32,
2815 "movl %eax,%ebx\n\t"
2816 "sarl $31,%ebx");
2817 break;
2818 default:
2819 emit_error = 1;
2820 }
2821}
2822
2823static void
2824i386_emit_log_not (void)
2825{
2826 EMIT_ASM32 (i386_log_not,
2827 "or %ebx,%eax\n\t"
2828 "test %eax,%eax\n\t"
2829 "sete %cl\n\t"
2830 "xor %ebx,%ebx\n\t"
2831 "movzbl %cl,%eax");
2832}
2833
2834static void
2835i386_emit_bit_and (void)
2836{
2837 EMIT_ASM32 (i386_and,
2838 "and (%esp),%eax\n\t"
2839 "and 0x4(%esp),%ebx\n\t"
2840 "lea 0x8(%esp),%esp");
2841}
2842
2843static void
2844i386_emit_bit_or (void)
2845{
2846 EMIT_ASM32 (i386_or,
2847 "or (%esp),%eax\n\t"
2848 "or 0x4(%esp),%ebx\n\t"
2849 "lea 0x8(%esp),%esp");
2850}
2851
2852static void
2853i386_emit_bit_xor (void)
2854{
2855 EMIT_ASM32 (i386_xor,
2856 "xor (%esp),%eax\n\t"
2857 "xor 0x4(%esp),%ebx\n\t"
2858 "lea 0x8(%esp),%esp");
2859}
2860
2861static void
2862i386_emit_bit_not (void)
2863{
2864 EMIT_ASM32 (i386_bit_not,
2865 "xor $0xffffffff,%eax\n\t"
2866 "xor $0xffffffff,%ebx\n\t");
2867}
2868
2869static void
2870i386_emit_equal (void)
2871{
2872 EMIT_ASM32 (i386_equal,
2873 "cmpl %ebx,4(%esp)\n\t"
2874 "jne .Li386_equal_false\n\t"
2875 "cmpl %eax,(%esp)\n\t"
2876 "je .Li386_equal_true\n\t"
2877 ".Li386_equal_false:\n\t"
2878 "xor %eax,%eax\n\t"
2879 "jmp .Li386_equal_end\n\t"
2880 ".Li386_equal_true:\n\t"
2881 "mov $1,%eax\n\t"
2882 ".Li386_equal_end:\n\t"
2883 "xor %ebx,%ebx\n\t"
2884 "lea 0x8(%esp),%esp");
2885}
2886
2887static void
2888i386_emit_less_signed (void)
2889{
2890 EMIT_ASM32 (i386_less_signed,
2891 "cmpl %ebx,4(%esp)\n\t"
2892 "jl .Li386_less_signed_true\n\t"
2893 "jne .Li386_less_signed_false\n\t"
2894 "cmpl %eax,(%esp)\n\t"
2895 "jl .Li386_less_signed_true\n\t"
2896 ".Li386_less_signed_false:\n\t"
2897 "xor %eax,%eax\n\t"
2898 "jmp .Li386_less_signed_end\n\t"
2899 ".Li386_less_signed_true:\n\t"
2900 "mov $1,%eax\n\t"
2901 ".Li386_less_signed_end:\n\t"
2902 "xor %ebx,%ebx\n\t"
2903 "lea 0x8(%esp),%esp");
2904}
2905
2906static void
2907i386_emit_less_unsigned (void)
2908{
2909 EMIT_ASM32 (i386_less_unsigned,
2910 "cmpl %ebx,4(%esp)\n\t"
2911 "jb .Li386_less_unsigned_true\n\t"
2912 "jne .Li386_less_unsigned_false\n\t"
2913 "cmpl %eax,(%esp)\n\t"
2914 "jb .Li386_less_unsigned_true\n\t"
2915 ".Li386_less_unsigned_false:\n\t"
2916 "xor %eax,%eax\n\t"
2917 "jmp .Li386_less_unsigned_end\n\t"
2918 ".Li386_less_unsigned_true:\n\t"
2919 "mov $1,%eax\n\t"
2920 ".Li386_less_unsigned_end:\n\t"
2921 "xor %ebx,%ebx\n\t"
2922 "lea 0x8(%esp),%esp");
2923}
2924
2925static void
2926i386_emit_ref (int size)
2927{
2928 switch (size)
2929 {
2930 case 1:
2931 EMIT_ASM32 (i386_ref1,
2932 "movb (%eax),%al");
2933 break;
2934 case 2:
2935 EMIT_ASM32 (i386_ref2,
2936 "movw (%eax),%ax");
2937 break;
2938 case 4:
2939 EMIT_ASM32 (i386_ref4,
2940 "movl (%eax),%eax");
2941 break;
2942 case 8:
2943 EMIT_ASM32 (i386_ref8,
2944 "movl 4(%eax),%ebx\n\t"
2945 "movl (%eax),%eax");
2946 break;
2947 }
2948}
2949
2950static void
2951i386_emit_if_goto (int *offset_p, int *size_p)
2952{
2953 EMIT_ASM32 (i386_if_goto,
2954 "mov %eax,%ecx\n\t"
2955 "or %ebx,%ecx\n\t"
2956 "pop %eax\n\t"
2957 "pop %ebx\n\t"
2958 "cmpl $0,%ecx\n\t"
2959 /* Don't trust the assembler to choose the right jump */
2960 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2961
2962 if (offset_p)
2963 *offset_p = 11; /* be sure that this matches the sequence above */
2964 if (size_p)
2965 *size_p = 4;
2966}
2967
2968static void
2969i386_emit_goto (int *offset_p, int *size_p)
2970{
2971 EMIT_ASM32 (i386_goto,
2972 /* Don't trust the assembler to choose the right jump */
2973 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2974 if (offset_p)
2975 *offset_p = 1;
2976 if (size_p)
2977 *size_p = 4;
2978}
2979
2980static void
2981i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2982{
2983 int diff = (to - (from + size));
2984 unsigned char buf[sizeof (int)];
2985
2986 /* We're only doing 4-byte sizes at the moment. */
2987 if (size != 4)
2988 {
2989 emit_error = 1;
2990 return;
2991 }
2992
2993 memcpy (buf, &diff, sizeof (int));
2994 write_inferior_memory (from, buf, sizeof (int));
2995}
2996
2997static void
4e29fb54 2998i386_emit_const (LONGEST num)
6a271cae
PA
2999{
3000 unsigned char buf[16];
b00ad6ff 3001 int i, hi, lo;
6a271cae
PA
3002 CORE_ADDR buildaddr = current_insn_ptr;
3003
3004 i = 0;
3005 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
3006 lo = num & 0xffffffff;
3007 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
3008 i += 4;
3009 hi = ((num >> 32) & 0xffffffff);
3010 if (hi)
3011 {
3012 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 3013 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
3014 i += 4;
3015 }
3016 else
3017 {
3018 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
3019 }
3020 append_insns (&buildaddr, i, buf);
3021 current_insn_ptr = buildaddr;
3022}
3023
3024static void
3025i386_emit_call (CORE_ADDR fn)
3026{
3027 unsigned char buf[16];
3028 int i, offset;
3029 CORE_ADDR buildaddr;
3030
3031 buildaddr = current_insn_ptr;
3032 i = 0;
3033 buf[i++] = 0xe8; /* call <reladdr> */
3034 offset = ((int) fn) - (buildaddr + 5);
3035 memcpy (buf + 1, &offset, 4);
3036 append_insns (&buildaddr, 5, buf);
3037 current_insn_ptr = buildaddr;
3038}
3039
3040static void
3041i386_emit_reg (int reg)
3042{
3043 unsigned char buf[16];
3044 int i;
3045 CORE_ADDR buildaddr;
3046
3047 EMIT_ASM32 (i386_reg_a,
3048 "sub $0x8,%esp");
3049 buildaddr = current_insn_ptr;
3050 i = 0;
3051 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 3052 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
3053 i += 4;
3054 append_insns (&buildaddr, i, buf);
3055 current_insn_ptr = buildaddr;
3056 EMIT_ASM32 (i386_reg_b,
3057 "mov %eax,4(%esp)\n\t"
3058 "mov 8(%ebp),%eax\n\t"
3059 "mov %eax,(%esp)");
3060 i386_emit_call (get_raw_reg_func_addr ());
3061 EMIT_ASM32 (i386_reg_c,
3062 "xor %ebx,%ebx\n\t"
3063 "lea 0x8(%esp),%esp");
3064}
3065
3066static void
3067i386_emit_pop (void)
3068{
3069 EMIT_ASM32 (i386_pop,
3070 "pop %eax\n\t"
3071 "pop %ebx");
3072}
3073
3074static void
3075i386_emit_stack_flush (void)
3076{
3077 EMIT_ASM32 (i386_stack_flush,
3078 "push %ebx\n\t"
3079 "push %eax");
3080}
3081
3082static void
3083i386_emit_zero_ext (int arg)
3084{
3085 switch (arg)
3086 {
3087 case 8:
3088 EMIT_ASM32 (i386_zero_ext_8,
3089 "and $0xff,%eax\n\t"
3090 "xor %ebx,%ebx");
3091 break;
3092 case 16:
3093 EMIT_ASM32 (i386_zero_ext_16,
3094 "and $0xffff,%eax\n\t"
3095 "xor %ebx,%ebx");
3096 break;
3097 case 32:
3098 EMIT_ASM32 (i386_zero_ext_32,
3099 "xor %ebx,%ebx");
3100 break;
3101 default:
3102 emit_error = 1;
3103 }
3104}
3105
3106static void
3107i386_emit_swap (void)
3108{
3109 EMIT_ASM32 (i386_swap,
3110 "mov %eax,%ecx\n\t"
3111 "mov %ebx,%edx\n\t"
3112 "pop %eax\n\t"
3113 "pop %ebx\n\t"
3114 "push %edx\n\t"
3115 "push %ecx");
3116}
3117
3118static void
3119i386_emit_stack_adjust (int n)
3120{
3121 unsigned char buf[16];
3122 int i;
3123 CORE_ADDR buildaddr = current_insn_ptr;
3124
3125 i = 0;
3126 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
3127 buf[i++] = 0x64;
3128 buf[i++] = 0x24;
3129 buf[i++] = n * 8;
3130 append_insns (&buildaddr, i, buf);
3131 current_insn_ptr = buildaddr;
3132}
3133
3134/* FN's prototype is `LONGEST(*fn)(int)'. */
3135
3136static void
3137i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
3138{
3139 unsigned char buf[16];
3140 int i;
3141 CORE_ADDR buildaddr;
3142
3143 EMIT_ASM32 (i386_int_call_1_a,
3144 /* Reserve a bit of stack space. */
3145 "sub $0x8,%esp");
3146 /* Put the one argument on the stack. */
3147 buildaddr = current_insn_ptr;
3148 i = 0;
3149 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3150 buf[i++] = 0x04;
3151 buf[i++] = 0x24;
b00ad6ff 3152 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
3153 i += 4;
3154 append_insns (&buildaddr, i, buf);
3155 current_insn_ptr = buildaddr;
3156 i386_emit_call (fn);
3157 EMIT_ASM32 (i386_int_call_1_c,
3158 "mov %edx,%ebx\n\t"
3159 "lea 0x8(%esp),%esp");
3160}
3161
4e29fb54 3162/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
3163
3164static void
3165i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
3166{
3167 unsigned char buf[16];
3168 int i;
3169 CORE_ADDR buildaddr;
3170
3171 EMIT_ASM32 (i386_void_call_2_a,
3172 /* Preserve %eax only; we don't have to worry about %ebx. */
3173 "push %eax\n\t"
3174 /* Reserve a bit of stack space for arguments. */
3175 "sub $0x10,%esp\n\t"
3176 /* Copy "top" to the second argument position. (Note that
3177 we can't assume function won't scribble on its
3178 arguments, so don't try to restore from this.) */
3179 "mov %eax,4(%esp)\n\t"
3180 "mov %ebx,8(%esp)");
3181 /* Put the first argument on the stack. */
3182 buildaddr = current_insn_ptr;
3183 i = 0;
3184 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3185 buf[i++] = 0x04;
3186 buf[i++] = 0x24;
b00ad6ff 3187 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
3188 i += 4;
3189 append_insns (&buildaddr, i, buf);
3190 current_insn_ptr = buildaddr;
3191 i386_emit_call (fn);
3192 EMIT_ASM32 (i386_void_call_2_b,
3193 "lea 0x10(%esp),%esp\n\t"
3194 /* Restore original stack top. */
3195 "pop %eax");
3196}
3197
6b9801d4
SS
3198
3199void
3200i386_emit_eq_goto (int *offset_p, int *size_p)
3201{
3202 EMIT_ASM32 (eq,
3203 /* Check low half first, more likely to be decider */
3204 "cmpl %eax,(%esp)\n\t"
3205 "jne .Leq_fallthru\n\t"
3206 "cmpl %ebx,4(%esp)\n\t"
3207 "jne .Leq_fallthru\n\t"
3208 "lea 0x8(%esp),%esp\n\t"
3209 "pop %eax\n\t"
3210 "pop %ebx\n\t"
3211 /* jmp, but don't trust the assembler to choose the right jump */
3212 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3213 ".Leq_fallthru:\n\t"
3214 "lea 0x8(%esp),%esp\n\t"
3215 "pop %eax\n\t"
3216 "pop %ebx");
3217
3218 if (offset_p)
3219 *offset_p = 18;
3220 if (size_p)
3221 *size_p = 4;
3222}
3223
3224void
3225i386_emit_ne_goto (int *offset_p, int *size_p)
3226{
3227 EMIT_ASM32 (ne,
3228 /* Check low half first, more likely to be decider */
3229 "cmpl %eax,(%esp)\n\t"
3230 "jne .Lne_jump\n\t"
3231 "cmpl %ebx,4(%esp)\n\t"
3232 "je .Lne_fallthru\n\t"
3233 ".Lne_jump:\n\t"
3234 "lea 0x8(%esp),%esp\n\t"
3235 "pop %eax\n\t"
3236 "pop %ebx\n\t"
3237 /* jmp, but don't trust the assembler to choose the right jump */
3238 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3239 ".Lne_fallthru:\n\t"
3240 "lea 0x8(%esp),%esp\n\t"
3241 "pop %eax\n\t"
3242 "pop %ebx");
3243
3244 if (offset_p)
3245 *offset_p = 18;
3246 if (size_p)
3247 *size_p = 4;
3248}
3249
3250void
3251i386_emit_lt_goto (int *offset_p, int *size_p)
3252{
3253 EMIT_ASM32 (lt,
3254 "cmpl %ebx,4(%esp)\n\t"
3255 "jl .Llt_jump\n\t"
3256 "jne .Llt_fallthru\n\t"
3257 "cmpl %eax,(%esp)\n\t"
3258 "jnl .Llt_fallthru\n\t"
3259 ".Llt_jump:\n\t"
3260 "lea 0x8(%esp),%esp\n\t"
3261 "pop %eax\n\t"
3262 "pop %ebx\n\t"
3263 /* jmp, but don't trust the assembler to choose the right jump */
3264 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3265 ".Llt_fallthru:\n\t"
3266 "lea 0x8(%esp),%esp\n\t"
3267 "pop %eax\n\t"
3268 "pop %ebx");
3269
3270 if (offset_p)
3271 *offset_p = 20;
3272 if (size_p)
3273 *size_p = 4;
3274}
3275
3276void
3277i386_emit_le_goto (int *offset_p, int *size_p)
3278{
3279 EMIT_ASM32 (le,
3280 "cmpl %ebx,4(%esp)\n\t"
3281 "jle .Lle_jump\n\t"
3282 "jne .Lle_fallthru\n\t"
3283 "cmpl %eax,(%esp)\n\t"
3284 "jnle .Lle_fallthru\n\t"
3285 ".Lle_jump:\n\t"
3286 "lea 0x8(%esp),%esp\n\t"
3287 "pop %eax\n\t"
3288 "pop %ebx\n\t"
3289 /* jmp, but don't trust the assembler to choose the right jump */
3290 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3291 ".Lle_fallthru:\n\t"
3292 "lea 0x8(%esp),%esp\n\t"
3293 "pop %eax\n\t"
3294 "pop %ebx");
3295
3296 if (offset_p)
3297 *offset_p = 20;
3298 if (size_p)
3299 *size_p = 4;
3300}
3301
3302void
3303i386_emit_gt_goto (int *offset_p, int *size_p)
3304{
3305 EMIT_ASM32 (gt,
3306 "cmpl %ebx,4(%esp)\n\t"
3307 "jg .Lgt_jump\n\t"
3308 "jne .Lgt_fallthru\n\t"
3309 "cmpl %eax,(%esp)\n\t"
3310 "jng .Lgt_fallthru\n\t"
3311 ".Lgt_jump:\n\t"
3312 "lea 0x8(%esp),%esp\n\t"
3313 "pop %eax\n\t"
3314 "pop %ebx\n\t"
3315 /* jmp, but don't trust the assembler to choose the right jump */
3316 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3317 ".Lgt_fallthru:\n\t"
3318 "lea 0x8(%esp),%esp\n\t"
3319 "pop %eax\n\t"
3320 "pop %ebx");
3321
3322 if (offset_p)
3323 *offset_p = 20;
3324 if (size_p)
3325 *size_p = 4;
3326}
3327
3328void
3329i386_emit_ge_goto (int *offset_p, int *size_p)
3330{
3331 EMIT_ASM32 (ge,
3332 "cmpl %ebx,4(%esp)\n\t"
3333 "jge .Lge_jump\n\t"
3334 "jne .Lge_fallthru\n\t"
3335 "cmpl %eax,(%esp)\n\t"
3336 "jnge .Lge_fallthru\n\t"
3337 ".Lge_jump:\n\t"
3338 "lea 0x8(%esp),%esp\n\t"
3339 "pop %eax\n\t"
3340 "pop %ebx\n\t"
3341 /* jmp, but don't trust the assembler to choose the right jump */
3342 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3343 ".Lge_fallthru:\n\t"
3344 "lea 0x8(%esp),%esp\n\t"
3345 "pop %eax\n\t"
3346 "pop %ebx");
3347
3348 if (offset_p)
3349 *offset_p = 20;
3350 if (size_p)
3351 *size_p = 4;
3352}
3353
6a271cae
PA
3354struct emit_ops i386_emit_ops =
3355 {
3356 i386_emit_prologue,
3357 i386_emit_epilogue,
3358 i386_emit_add,
3359 i386_emit_sub,
3360 i386_emit_mul,
3361 i386_emit_lsh,
3362 i386_emit_rsh_signed,
3363 i386_emit_rsh_unsigned,
3364 i386_emit_ext,
3365 i386_emit_log_not,
3366 i386_emit_bit_and,
3367 i386_emit_bit_or,
3368 i386_emit_bit_xor,
3369 i386_emit_bit_not,
3370 i386_emit_equal,
3371 i386_emit_less_signed,
3372 i386_emit_less_unsigned,
3373 i386_emit_ref,
3374 i386_emit_if_goto,
3375 i386_emit_goto,
3376 i386_write_goto_address,
3377 i386_emit_const,
3378 i386_emit_call,
3379 i386_emit_reg,
3380 i386_emit_pop,
3381 i386_emit_stack_flush,
3382 i386_emit_zero_ext,
3383 i386_emit_swap,
3384 i386_emit_stack_adjust,
3385 i386_emit_int_call_1,
6b9801d4
SS
3386 i386_emit_void_call_2,
3387 i386_emit_eq_goto,
3388 i386_emit_ne_goto,
3389 i386_emit_lt_goto,
3390 i386_emit_le_goto,
3391 i386_emit_gt_goto,
3392 i386_emit_ge_goto
6a271cae
PA
3393 };
3394
3395
3396static struct emit_ops *
3397x86_emit_ops (void)
3398{
3399#ifdef __x86_64__
3aee8918 3400 if (is_64bit_tdesc ())
6a271cae
PA
3401 return &amd64_emit_ops;
3402 else
3403#endif
3404 return &i386_emit_ops;
3405}
3406
c2d6af84
PA
3407static int
3408x86_supports_range_stepping (void)
3409{
3410 return 1;
3411}
3412
d0722149
DE
3413/* This is initialized assuming an amd64 target.
3414 x86_arch_setup will correct it for i386 or amd64 targets. */
3415
3416struct linux_target_ops the_low_target =
3417{
3418 x86_arch_setup,
3aee8918
PA
3419 x86_linux_regs_info,
3420 x86_cannot_fetch_register,
3421 x86_cannot_store_register,
c14dfd32 3422 NULL, /* fetch_register */
d0722149
DE
3423 x86_get_pc,
3424 x86_set_pc,
3425 x86_breakpoint,
3426 x86_breakpoint_len,
3427 NULL,
3428 1,
3429 x86_breakpoint_at,
802e8e6d 3430 x86_supports_z_point_type,
aa5ca48f
DE
3431 x86_insert_point,
3432 x86_remove_point,
3433 x86_stopped_by_watchpoint,
3434 x86_stopped_data_address,
d0722149
DE
3435 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3436 native i386 case (no registers smaller than an xfer unit), and are not
3437 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3438 NULL,
3439 NULL,
3440 /* need to fix up i386 siginfo if host is amd64 */
3441 x86_siginfo_fixup,
aa5ca48f
DE
3442 x86_linux_new_process,
3443 x86_linux_new_thread,
1570b33e 3444 x86_linux_prepare_to_resume,
219f2f23 3445 x86_linux_process_qsupported,
fa593d66
PA
3446 x86_supports_tracepoints,
3447 x86_get_thread_area,
6a271cae 3448 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
3449 x86_emit_ops,
3450 x86_get_min_fast_tracepoint_insn_len,
c2d6af84 3451 x86_supports_range_stepping,
d0722149 3452};
3aee8918
PA
3453
3454void
3455initialize_low_arch (void)
3456{
3457 /* Initialize the Linux target descriptions. */
3458#ifdef __x86_64__
3459 init_registers_amd64_linux ();
3460 init_registers_amd64_avx_linux ();
01f9f808 3461 init_registers_amd64_avx512_linux ();
a196ebeb
WT
3462 init_registers_amd64_mpx_linux ();
3463
3aee8918 3464 init_registers_x32_linux ();
7e5aaa09 3465 init_registers_x32_avx_linux ();
01f9f808 3466 init_registers_x32_avx512_linux ();
3aee8918
PA
3467
3468 tdesc_amd64_linux_no_xml = xmalloc (sizeof (struct target_desc));
3469 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
3470 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3471#endif
3472 init_registers_i386_linux ();
3473 init_registers_i386_mmx_linux ();
3474 init_registers_i386_avx_linux ();
01f9f808 3475 init_registers_i386_avx512_linux ();
a196ebeb 3476 init_registers_i386_mpx_linux ();
3aee8918
PA
3477
3478 tdesc_i386_linux_no_xml = xmalloc (sizeof (struct target_desc));
3479 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
3480 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3481
3482 initialize_regsets_info (&x86_regsets_info);
3483}
This page took 0.619457 seconds and 4 git commands to generate.