Add --enable-build-with-cxx configure switch
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
32d0add0 3 Copyright (C) 2002-2015 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
d41f6d8e 20#include "server.h"
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "linux-low.h"
25#include "i387-fp.h"
df7e5265
GB
26#include "x86-low.h"
27#include "x86-xstate.h"
d0722149
DE
28
29#include "gdb_proc_service.h"
b5737fa9
PA
30/* Don't include elf/common.h if linux/elf.h got included by
31 gdb_proc_service.h. */
32#ifndef ELFMAG0
33#include "elf/common.h"
34#endif
35
58b4daa5 36#include "agent.h"
3aee8918 37#include "tdesc.h"
c144c7a0 38#include "tracepoint.h"
f699aaba 39#include "ax.h"
d0722149 40
3aee8918 41#ifdef __x86_64__
90884b2b
L
42/* Defined in auto-generated file amd64-linux.c. */
43void init_registers_amd64_linux (void);
3aee8918
PA
44extern const struct target_desc *tdesc_amd64_linux;
45
1570b33e
L
46/* Defined in auto-generated file amd64-avx-linux.c. */
47void init_registers_amd64_avx_linux (void);
3aee8918
PA
48extern const struct target_desc *tdesc_amd64_avx_linux;
49
01f9f808
MS
50/* Defined in auto-generated file amd64-avx512-linux.c. */
51void init_registers_amd64_avx512_linux (void);
52extern const struct target_desc *tdesc_amd64_avx512_linux;
53
a196ebeb
WT
54/* Defined in auto-generated file amd64-mpx-linux.c. */
55void init_registers_amd64_mpx_linux (void);
56extern const struct target_desc *tdesc_amd64_mpx_linux;
57
4d47af5c
L
58/* Defined in auto-generated file x32-linux.c. */
59void init_registers_x32_linux (void);
3aee8918
PA
60extern const struct target_desc *tdesc_x32_linux;
61
4d47af5c
L
62/* Defined in auto-generated file x32-avx-linux.c. */
63void init_registers_x32_avx_linux (void);
3aee8918 64extern const struct target_desc *tdesc_x32_avx_linux;
a196ebeb 65
01f9f808
MS
66/* Defined in auto-generated file x32-avx512-linux.c. */
67void init_registers_x32_avx512_linux (void);
68extern const struct target_desc *tdesc_x32_avx512_linux;
69
3aee8918
PA
70#endif
71
72/* Defined in auto-generated file i386-linux.c. */
73void init_registers_i386_linux (void);
74extern const struct target_desc *tdesc_i386_linux;
75
76/* Defined in auto-generated file i386-mmx-linux.c. */
77void init_registers_i386_mmx_linux (void);
78extern const struct target_desc *tdesc_i386_mmx_linux;
79
80/* Defined in auto-generated file i386-avx-linux.c. */
81void init_registers_i386_avx_linux (void);
82extern const struct target_desc *tdesc_i386_avx_linux;
83
01f9f808
MS
84/* Defined in auto-generated file i386-avx512-linux.c. */
85void init_registers_i386_avx512_linux (void);
86extern const struct target_desc *tdesc_i386_avx512_linux;
87
a196ebeb
WT
88/* Defined in auto-generated file i386-mpx-linux.c. */
89void init_registers_i386_mpx_linux (void);
90extern const struct target_desc *tdesc_i386_mpx_linux;
91
3aee8918
PA
92#ifdef __x86_64__
93static struct target_desc *tdesc_amd64_linux_no_xml;
94#endif
95static struct target_desc *tdesc_i386_linux_no_xml;
96
1570b33e 97
fa593d66 98static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 99static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 100
1570b33e
L
101/* Backward compatibility for gdb without XML support. */
102
103static const char *xmltarget_i386_linux_no_xml = "@<target>\
104<architecture>i386</architecture>\
105<osabi>GNU/Linux</osabi>\
106</target>";
f6d1620c
L
107
108#ifdef __x86_64__
1570b33e
L
109static const char *xmltarget_amd64_linux_no_xml = "@<target>\
110<architecture>i386:x86-64</architecture>\
111<osabi>GNU/Linux</osabi>\
112</target>";
f6d1620c 113#endif
d0722149
DE
114
115#include <sys/reg.h>
116#include <sys/procfs.h>
117#include <sys/ptrace.h>
1570b33e
L
118#include <sys/uio.h>
119
120#ifndef PTRACE_GETREGSET
121#define PTRACE_GETREGSET 0x4204
122#endif
123
124#ifndef PTRACE_SETREGSET
125#define PTRACE_SETREGSET 0x4205
126#endif
127
d0722149
DE
128
129#ifndef PTRACE_GET_THREAD_AREA
130#define PTRACE_GET_THREAD_AREA 25
131#endif
132
133/* This definition comes from prctl.h, but some kernels may not have it. */
134#ifndef PTRACE_ARCH_PRCTL
135#define PTRACE_ARCH_PRCTL 30
136#endif
137
138/* The following definitions come from prctl.h, but may be absent
139 for certain configurations. */
140#ifndef ARCH_GET_FS
141#define ARCH_SET_GS 0x1001
142#define ARCH_SET_FS 0x1002
143#define ARCH_GET_FS 0x1003
144#define ARCH_GET_GS 0x1004
145#endif
146
aa5ca48f
DE
147/* Per-process arch-specific data we want to keep. */
148
149struct arch_process_info
150{
df7e5265 151 struct x86_debug_reg_state debug_reg_state;
aa5ca48f
DE
152};
153
154/* Per-thread arch-specific data we want to keep. */
155
156struct arch_lwp_info
157{
158 /* Non-zero if our copy differs from what's recorded in the thread. */
159 int debug_registers_changed;
160};
161
d0722149
DE
162#ifdef __x86_64__
163
164/* Mapping between the general-purpose registers in `struct user'
165 format and GDB's register array layout.
166 Note that the transfer layout uses 64-bit regs. */
167static /*const*/ int i386_regmap[] =
168{
169 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
170 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
171 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
172 DS * 8, ES * 8, FS * 8, GS * 8
173};
174
175#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
176
177/* So code below doesn't have to care, i386 or amd64. */
178#define ORIG_EAX ORIG_RAX
bc9540e8 179#define REGSIZE 8
d0722149
DE
180
181static const int x86_64_regmap[] =
182{
183 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
184 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
185 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
186 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
187 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
188 DS * 8, ES * 8, FS * 8, GS * 8,
189 -1, -1, -1, -1, -1, -1, -1, -1,
190 -1, -1, -1, -1, -1, -1, -1, -1,
191 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
192 -1,
193 -1, -1, -1, -1, -1, -1, -1, -1,
194 ORIG_RAX * 8,
195 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
01f9f808
MS
196 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
197 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
198 -1, -1, -1, -1, -1, -1, -1, -1,
199 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
200 -1, -1, -1, -1, -1, -1, -1, -1,
201 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
202 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
203 -1, -1, -1, -1, -1, -1, -1, -1,
204 -1, -1, -1, -1, -1, -1, -1, -1,
205 -1, -1, -1, -1, -1, -1, -1, -1
d0722149
DE
206};
207
208#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
9e0aa64f 209#define X86_64_USER_REGS (GS + 1)
d0722149
DE
210
211#else /* ! __x86_64__ */
212
213/* Mapping between the general-purpose registers in `struct user'
214 format and GDB's register array layout. */
215static /*const*/ int i386_regmap[] =
216{
217 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
218 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
219 EIP * 4, EFL * 4, CS * 4, SS * 4,
220 DS * 4, ES * 4, FS * 4, GS * 4
221};
222
223#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
224
bc9540e8
PA
225#define REGSIZE 4
226
d0722149 227#endif
3aee8918
PA
228
229#ifdef __x86_64__
230
231/* Returns true if the current inferior belongs to a x86-64 process,
232 per the tdesc. */
233
234static int
235is_64bit_tdesc (void)
236{
0bfdf32f 237 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3aee8918
PA
238
239 return register_size (regcache->tdesc, 0) == 8;
240}
241
242#endif
243
d0722149
DE
244\f
245/* Called by libthread_db. */
246
247ps_err_e
248ps_get_thread_area (const struct ps_prochandle *ph,
249 lwpid_t lwpid, int idx, void **base)
250{
251#ifdef __x86_64__
3aee8918 252 int use_64bit = is_64bit_tdesc ();
d0722149
DE
253
254 if (use_64bit)
255 {
256 switch (idx)
257 {
258 case FS:
259 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
260 return PS_OK;
261 break;
262 case GS:
263 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
264 return PS_OK;
265 break;
266 default:
267 return PS_BADADDR;
268 }
269 return PS_ERR;
270 }
271#endif
272
273 {
274 unsigned int desc[4];
275
276 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
277 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
278 return PS_ERR;
279
d1ec4ce7
DE
280 /* Ensure we properly extend the value to 64-bits for x86_64. */
281 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
282 return PS_OK;
283 }
284}
fa593d66
PA
285
286/* Get the thread area address. This is used to recognize which
287 thread is which when tracing with the in-process agent library. We
288 don't read anything from the address, and treat it as opaque; it's
289 the address itself that we assume is unique per-thread. */
290
291static int
292x86_get_thread_area (int lwpid, CORE_ADDR *addr)
293{
294#ifdef __x86_64__
3aee8918 295 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
296
297 if (use_64bit)
298 {
299 void *base;
300 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
301 {
302 *addr = (CORE_ADDR) (uintptr_t) base;
303 return 0;
304 }
305
306 return -1;
307 }
308#endif
309
310 {
311 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
d86d4aaf
DE
312 struct thread_info *thr = get_lwp_thread (lwp);
313 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
314 unsigned int desc[4];
315 ULONGEST gs = 0;
316 const int reg_thread_area = 3; /* bits to scale down register value. */
317 int idx;
318
319 collect_register_by_name (regcache, "gs", &gs);
320
321 idx = gs >> reg_thread_area;
322
323 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 324 lwpid_of (thr),
493e2a69 325 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
326 return -1;
327
328 *addr = desc[1];
329 return 0;
330 }
331}
332
333
d0722149
DE
334\f
335static int
3aee8918 336x86_cannot_store_register (int regno)
d0722149 337{
3aee8918
PA
338#ifdef __x86_64__
339 if (is_64bit_tdesc ())
340 return 0;
341#endif
342
d0722149
DE
343 return regno >= I386_NUM_REGS;
344}
345
346static int
3aee8918 347x86_cannot_fetch_register (int regno)
d0722149 348{
3aee8918
PA
349#ifdef __x86_64__
350 if (is_64bit_tdesc ())
351 return 0;
352#endif
353
d0722149
DE
354 return regno >= I386_NUM_REGS;
355}
356
357static void
442ea881 358x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
359{
360 int i;
361
362#ifdef __x86_64__
3aee8918 363 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
364 {
365 for (i = 0; i < X86_64_NUM_REGS; i++)
366 if (x86_64_regmap[i] != -1)
442ea881 367 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
368 return;
369 }
9e0aa64f
JK
370
371 /* 32-bit inferior registers need to be zero-extended.
372 Callers would read uninitialized memory otherwise. */
373 memset (buf, 0x00, X86_64_USER_REGS * 8);
d0722149
DE
374#endif
375
376 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 377 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 378
442ea881 379 collect_register_by_name (regcache, "orig_eax",
bc9540e8 380 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
381}
382
383static void
442ea881 384x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
385{
386 int i;
387
388#ifdef __x86_64__
3aee8918 389 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
390 {
391 for (i = 0; i < X86_64_NUM_REGS; i++)
392 if (x86_64_regmap[i] != -1)
442ea881 393 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
394 return;
395 }
396#endif
397
398 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 399 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 400
442ea881 401 supply_register_by_name (regcache, "orig_eax",
bc9540e8 402 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
403}
404
405static void
442ea881 406x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
407{
408#ifdef __x86_64__
442ea881 409 i387_cache_to_fxsave (regcache, buf);
d0722149 410#else
442ea881 411 i387_cache_to_fsave (regcache, buf);
d0722149
DE
412#endif
413}
414
415static void
442ea881 416x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
417{
418#ifdef __x86_64__
442ea881 419 i387_fxsave_to_cache (regcache, buf);
d0722149 420#else
442ea881 421 i387_fsave_to_cache (regcache, buf);
d0722149
DE
422#endif
423}
424
425#ifndef __x86_64__
426
427static void
442ea881 428x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 429{
442ea881 430 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
431}
432
433static void
442ea881 434x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 435{
442ea881 436 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
437}
438
439#endif
440
1570b33e
L
441static void
442x86_fill_xstateregset (struct regcache *regcache, void *buf)
443{
444 i387_cache_to_xsave (regcache, buf);
445}
446
447static void
448x86_store_xstateregset (struct regcache *regcache, const void *buf)
449{
450 i387_xsave_to_cache (regcache, buf);
451}
452
d0722149
DE
453/* ??? The non-biarch i386 case stores all the i387 regs twice.
454 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
455 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
456 doesn't work. IWBN to avoid the duplication in the case where it
457 does work. Maybe the arch_setup routine could check whether it works
3aee8918 458 and update the supported regsets accordingly. */
d0722149 459
3aee8918 460static struct regset_info x86_regsets[] =
d0722149
DE
461{
462#ifdef HAVE_PTRACE_GETREGS
1570b33e 463 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
464 GENERAL_REGS,
465 x86_fill_gregset, x86_store_gregset },
1570b33e
L
466 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
467 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
468# ifndef __x86_64__
469# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 470 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
471 EXTENDED_REGS,
472 x86_fill_fpxregset, x86_store_fpxregset },
473# endif
474# endif
1570b33e 475 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
476 FP_REGS,
477 x86_fill_fpregset, x86_store_fpregset },
478#endif /* HAVE_PTRACE_GETREGS */
1570b33e 479 { 0, 0, 0, -1, -1, NULL, NULL }
d0722149
DE
480};
481
482static CORE_ADDR
442ea881 483x86_get_pc (struct regcache *regcache)
d0722149 484{
3aee8918 485 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
486
487 if (use_64bit)
488 {
489 unsigned long pc;
442ea881 490 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
491 return (CORE_ADDR) pc;
492 }
493 else
494 {
495 unsigned int pc;
442ea881 496 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
497 return (CORE_ADDR) pc;
498 }
499}
500
501static void
442ea881 502x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
d0722149 503{
3aee8918 504 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
505
506 if (use_64bit)
507 {
508 unsigned long newpc = pc;
442ea881 509 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
510 }
511 else
512 {
513 unsigned int newpc = pc;
442ea881 514 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
515 }
516}
517\f
518static const unsigned char x86_breakpoint[] = { 0xCC };
519#define x86_breakpoint_len 1
520
521static int
522x86_breakpoint_at (CORE_ADDR pc)
523{
524 unsigned char c;
525
fc7238bb 526 (*the_target->read_memory) (pc, &c, 1);
d0722149
DE
527 if (c == 0xCC)
528 return 1;
529
530 return 0;
531}
532\f
aa5ca48f
DE
533/* Support for debug registers. */
534
535static unsigned long
536x86_linux_dr_get (ptid_t ptid, int regnum)
537{
538 int tid;
539 unsigned long value;
540
541 tid = ptid_get_lwp (ptid);
542
543 errno = 0;
544 value = ptrace (PTRACE_PEEKUSER, tid,
545 offsetof (struct user, u_debugreg[regnum]), 0);
546 if (errno != 0)
547 error ("Couldn't read debug register");
548
549 return value;
550}
551
552static void
553x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
554{
555 int tid;
556
557 tid = ptid_get_lwp (ptid);
558
559 errno = 0;
560 ptrace (PTRACE_POKEUSER, tid,
561 offsetof (struct user, u_debugreg[regnum]), value);
562 if (errno != 0)
563 error ("Couldn't write debug register");
564}
565
964e4306
PA
566static int
567update_debug_registers_callback (struct inferior_list_entry *entry,
568 void *pid_p)
569{
d86d4aaf
DE
570 struct thread_info *thr = (struct thread_info *) entry;
571 struct lwp_info *lwp = get_thread_lwp (thr);
964e4306
PA
572 int pid = *(int *) pid_p;
573
574 /* Only update the threads of this process. */
d86d4aaf 575 if (pid_of (thr) == pid)
964e4306
PA
576 {
577 /* The actual update is done later just before resuming the lwp,
578 we just mark that the registers need updating. */
579 lwp->arch_private->debug_registers_changed = 1;
580
581 /* If the lwp isn't stopped, force it to momentarily pause, so
582 we can update its debug registers. */
583 if (!lwp->stopped)
584 linux_stop_lwp (lwp);
585 }
586
587 return 0;
588}
589
aa5ca48f
DE
590/* Update the inferior's debug register REGNUM from STATE. */
591
42995dbd 592static void
df7e5265 593x86_dr_low_set_addr (int regnum, CORE_ADDR addr)
aa5ca48f 594{
964e4306 595 /* Only update the threads of this process. */
0bfdf32f 596 int pid = pid_of (current_thread);
aa5ca48f 597
f7160e97 598 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
aa5ca48f 599
d86d4aaf 600 find_inferior (&all_threads, update_debug_registers_callback, &pid);
964e4306 601}
aa5ca48f 602
964e4306 603/* Return the inferior's debug register REGNUM. */
aa5ca48f 604
42995dbd 605static CORE_ADDR
df7e5265 606x86_dr_low_get_addr (int regnum)
964e4306 607{
0bfdf32f 608 ptid_t ptid = ptid_of (current_thread);
964e4306 609
0a5b1e09 610 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
964e4306
PA
611
612 return x86_linux_dr_get (ptid, regnum);
aa5ca48f
DE
613}
614
615/* Update the inferior's DR7 debug control register from STATE. */
616
42995dbd 617static void
df7e5265 618x86_dr_low_set_control (unsigned long control)
aa5ca48f 619{
964e4306 620 /* Only update the threads of this process. */
0bfdf32f 621 int pid = pid_of (current_thread);
aa5ca48f 622
d86d4aaf 623 find_inferior (&all_threads, update_debug_registers_callback, &pid);
964e4306 624}
aa5ca48f 625
964e4306
PA
626/* Return the inferior's DR7 debug control register. */
627
42995dbd 628static unsigned long
df7e5265 629x86_dr_low_get_control (void)
964e4306 630{
0bfdf32f 631 ptid_t ptid = ptid_of (current_thread);
964e4306
PA
632
633 return x86_linux_dr_get (ptid, DR_CONTROL);
aa5ca48f
DE
634}
635
636/* Get the value of the DR6 debug status register from the inferior
637 and record it in STATE. */
638
42995dbd 639static unsigned long
df7e5265 640x86_dr_low_get_status (void)
aa5ca48f 641{
0bfdf32f 642 ptid_t ptid = ptid_of (current_thread);
aa5ca48f 643
964e4306 644 return x86_linux_dr_get (ptid, DR_STATUS);
aa5ca48f 645}
42995dbd
GB
646
647/* Low-level function vector. */
df7e5265 648struct x86_dr_low_type x86_dr_low =
42995dbd 649 {
df7e5265
GB
650 x86_dr_low_set_control,
651 x86_dr_low_set_addr,
652 x86_dr_low_get_addr,
653 x86_dr_low_get_status,
654 x86_dr_low_get_control,
42995dbd
GB
655 sizeof (void *),
656 };
aa5ca48f 657\f
90d74c30 658/* Breakpoint/Watchpoint support. */
aa5ca48f
DE
659
660static int
802e8e6d
PA
661x86_supports_z_point_type (char z_type)
662{
663 switch (z_type)
664 {
665 case Z_PACKET_SW_BP:
666 case Z_PACKET_HW_BP:
667 case Z_PACKET_WRITE_WP:
668 case Z_PACKET_ACCESS_WP:
669 return 1;
670 default:
671 return 0;
672 }
673}
674
675static int
676x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
677 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
678{
679 struct process_info *proc = current_process ();
802e8e6d 680
aa5ca48f
DE
681 switch (type)
682 {
802e8e6d
PA
683 case raw_bkpt_type_sw:
684 return insert_memory_breakpoint (bp);
685
686 case raw_bkpt_type_hw:
687 case raw_bkpt_type_write_wp:
688 case raw_bkpt_type_access_wp:
a4165e94 689 {
802e8e6d
PA
690 enum target_hw_bp_type hw_type
691 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 692 struct x86_debug_reg_state *state
a4165e94
PA
693 = &proc->private->arch_private->debug_reg_state;
694
df7e5265 695 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
a4165e94 696 }
961bd387 697
aa5ca48f
DE
698 default:
699 /* Unsupported. */
700 return 1;
701 }
702}
703
704static int
802e8e6d
PA
705x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
706 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
707{
708 struct process_info *proc = current_process ();
802e8e6d 709
aa5ca48f
DE
710 switch (type)
711 {
802e8e6d
PA
712 case raw_bkpt_type_sw:
713 return remove_memory_breakpoint (bp);
714
715 case raw_bkpt_type_hw:
716 case raw_bkpt_type_write_wp:
717 case raw_bkpt_type_access_wp:
a4165e94 718 {
802e8e6d
PA
719 enum target_hw_bp_type hw_type
720 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 721 struct x86_debug_reg_state *state
a4165e94
PA
722 = &proc->private->arch_private->debug_reg_state;
723
df7e5265 724 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
a4165e94 725 }
aa5ca48f
DE
726 default:
727 /* Unsupported. */
728 return 1;
729 }
730}
731
732static int
733x86_stopped_by_watchpoint (void)
734{
735 struct process_info *proc = current_process ();
df7e5265 736 return x86_dr_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
aa5ca48f
DE
737}
738
739static CORE_ADDR
740x86_stopped_data_address (void)
741{
742 struct process_info *proc = current_process ();
743 CORE_ADDR addr;
df7e5265
GB
744 if (x86_dr_stopped_data_address (&proc->private->arch_private->debug_reg_state,
745 &addr))
aa5ca48f
DE
746 return addr;
747 return 0;
748}
749\f
750/* Called when a new process is created. */
751
752static struct arch_process_info *
753x86_linux_new_process (void)
754{
ed859da7 755 struct arch_process_info *info = XCNEW (struct arch_process_info);
aa5ca48f 756
df7e5265 757 x86_low_init_dregs (&info->debug_reg_state);
aa5ca48f
DE
758
759 return info;
760}
761
762/* Called when a new thread is detected. */
763
764static struct arch_lwp_info *
765x86_linux_new_thread (void)
766{
ed859da7 767 struct arch_lwp_info *info = XCNEW (struct arch_lwp_info);
aa5ca48f
DE
768
769 info->debug_registers_changed = 1;
770
771 return info;
772}
773
774/* Called when resuming a thread.
775 If the debug regs have changed, update the thread's copies. */
776
777static void
778x86_linux_prepare_to_resume (struct lwp_info *lwp)
779{
d86d4aaf 780 ptid_t ptid = ptid_of (get_lwp_thread (lwp));
6210a125 781 int clear_status = 0;
b9a881c2 782
aa5ca48f
DE
783 if (lwp->arch_private->debug_registers_changed)
784 {
785 int i;
aa5ca48f
DE
786 int pid = ptid_get_pid (ptid);
787 struct process_info *proc = find_process_pid (pid);
df7e5265 788 struct x86_debug_reg_state *state
493e2a69 789 = &proc->private->arch_private->debug_reg_state;
aa5ca48f 790
8e9db26e
PA
791 x86_linux_dr_set (ptid, DR_CONTROL, 0);
792
97ea6506 793 ALL_DEBUG_ADDRESS_REGISTERS (i)
6210a125
PA
794 if (state->dr_ref_count[i] > 0)
795 {
796 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
797
798 /* If we're setting a watchpoint, any change the inferior
799 had done itself to the debug registers needs to be
df7e5265 800 discarded, otherwise, x86_dr_stopped_data_address can
6210a125
PA
801 get confused. */
802 clear_status = 1;
803 }
aa5ca48f 804
8e9db26e
PA
805 if (state->dr_control_mirror != 0)
806 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
aa5ca48f
DE
807
808 lwp->arch_private->debug_registers_changed = 0;
809 }
b9a881c2 810
582511be 811 if (clear_status || lwp->stop_reason == LWP_STOPPED_BY_WATCHPOINT)
b9a881c2 812 x86_linux_dr_set (ptid, DR_STATUS, 0);
aa5ca48f
DE
813}
814\f
d0722149
DE
815/* When GDBSERVER is built as a 64-bit application on linux, the
816 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
817 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
818 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
819 conversion in-place ourselves. */
820
821/* These types below (compat_*) define a siginfo type that is layout
822 compatible with the siginfo type exported by the 32-bit userspace
823 support. */
824
825#ifdef __x86_64__
826
827typedef int compat_int_t;
828typedef unsigned int compat_uptr_t;
829
830typedef int compat_time_t;
831typedef int compat_timer_t;
832typedef int compat_clock_t;
833
834struct compat_timeval
835{
836 compat_time_t tv_sec;
837 int tv_usec;
838};
839
840typedef union compat_sigval
841{
842 compat_int_t sival_int;
843 compat_uptr_t sival_ptr;
844} compat_sigval_t;
845
846typedef struct compat_siginfo
847{
848 int si_signo;
849 int si_errno;
850 int si_code;
851
852 union
853 {
854 int _pad[((128 / sizeof (int)) - 3)];
855
856 /* kill() */
857 struct
858 {
859 unsigned int _pid;
860 unsigned int _uid;
861 } _kill;
862
863 /* POSIX.1b timers */
864 struct
865 {
866 compat_timer_t _tid;
867 int _overrun;
868 compat_sigval_t _sigval;
869 } _timer;
870
871 /* POSIX.1b signals */
872 struct
873 {
874 unsigned int _pid;
875 unsigned int _uid;
876 compat_sigval_t _sigval;
877 } _rt;
878
879 /* SIGCHLD */
880 struct
881 {
882 unsigned int _pid;
883 unsigned int _uid;
884 int _status;
885 compat_clock_t _utime;
886 compat_clock_t _stime;
887 } _sigchld;
888
889 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
890 struct
891 {
892 unsigned int _addr;
893 } _sigfault;
894
895 /* SIGPOLL */
896 struct
897 {
898 int _band;
899 int _fd;
900 } _sigpoll;
901 } _sifields;
902} compat_siginfo_t;
903
c92b5177
L
904/* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
905typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
906
907typedef struct compat_x32_siginfo
908{
909 int si_signo;
910 int si_errno;
911 int si_code;
912
913 union
914 {
915 int _pad[((128 / sizeof (int)) - 3)];
916
917 /* kill() */
918 struct
919 {
920 unsigned int _pid;
921 unsigned int _uid;
922 } _kill;
923
924 /* POSIX.1b timers */
925 struct
926 {
927 compat_timer_t _tid;
928 int _overrun;
929 compat_sigval_t _sigval;
930 } _timer;
931
932 /* POSIX.1b signals */
933 struct
934 {
935 unsigned int _pid;
936 unsigned int _uid;
937 compat_sigval_t _sigval;
938 } _rt;
939
940 /* SIGCHLD */
941 struct
942 {
943 unsigned int _pid;
944 unsigned int _uid;
945 int _status;
946 compat_x32_clock_t _utime;
947 compat_x32_clock_t _stime;
948 } _sigchld;
949
950 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
951 struct
952 {
953 unsigned int _addr;
954 } _sigfault;
955
956 /* SIGPOLL */
957 struct
958 {
959 int _band;
960 int _fd;
961 } _sigpoll;
962 } _sifields;
963} compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
964
d0722149
DE
965#define cpt_si_pid _sifields._kill._pid
966#define cpt_si_uid _sifields._kill._uid
967#define cpt_si_timerid _sifields._timer._tid
968#define cpt_si_overrun _sifields._timer._overrun
969#define cpt_si_status _sifields._sigchld._status
970#define cpt_si_utime _sifields._sigchld._utime
971#define cpt_si_stime _sifields._sigchld._stime
972#define cpt_si_ptr _sifields._rt._sigval.sival_ptr
973#define cpt_si_addr _sifields._sigfault._addr
974#define cpt_si_band _sifields._sigpoll._band
975#define cpt_si_fd _sifields._sigpoll._fd
976
977/* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
978 In their place is si_timer1,si_timer2. */
979#ifndef si_timerid
980#define si_timerid si_timer1
981#endif
982#ifndef si_overrun
983#define si_overrun si_timer2
984#endif
985
986static void
987compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
988{
989 memset (to, 0, sizeof (*to));
990
991 to->si_signo = from->si_signo;
992 to->si_errno = from->si_errno;
993 to->si_code = from->si_code;
994
b53a1623 995 if (to->si_code == SI_TIMER)
d0722149 996 {
b53a1623
PA
997 to->cpt_si_timerid = from->si_timerid;
998 to->cpt_si_overrun = from->si_overrun;
d0722149
DE
999 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1000 }
1001 else if (to->si_code == SI_USER)
1002 {
1003 to->cpt_si_pid = from->si_pid;
1004 to->cpt_si_uid = from->si_uid;
1005 }
b53a1623 1006 else if (to->si_code < 0)
d0722149 1007 {
b53a1623
PA
1008 to->cpt_si_pid = from->si_pid;
1009 to->cpt_si_uid = from->si_uid;
d0722149
DE
1010 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1011 }
1012 else
1013 {
1014 switch (to->si_signo)
1015 {
1016 case SIGCHLD:
1017 to->cpt_si_pid = from->si_pid;
1018 to->cpt_si_uid = from->si_uid;
1019 to->cpt_si_status = from->si_status;
1020 to->cpt_si_utime = from->si_utime;
1021 to->cpt_si_stime = from->si_stime;
1022 break;
1023 case SIGILL:
1024 case SIGFPE:
1025 case SIGSEGV:
1026 case SIGBUS:
1027 to->cpt_si_addr = (intptr_t) from->si_addr;
1028 break;
1029 case SIGPOLL:
1030 to->cpt_si_band = from->si_band;
1031 to->cpt_si_fd = from->si_fd;
1032 break;
1033 default:
1034 to->cpt_si_pid = from->si_pid;
1035 to->cpt_si_uid = from->si_uid;
1036 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1037 break;
1038 }
1039 }
1040}
1041
1042static void
1043siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
1044{
1045 memset (to, 0, sizeof (*to));
1046
1047 to->si_signo = from->si_signo;
1048 to->si_errno = from->si_errno;
1049 to->si_code = from->si_code;
1050
b53a1623 1051 if (to->si_code == SI_TIMER)
d0722149 1052 {
b53a1623
PA
1053 to->si_timerid = from->cpt_si_timerid;
1054 to->si_overrun = from->cpt_si_overrun;
d0722149
DE
1055 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1056 }
1057 else if (to->si_code == SI_USER)
1058 {
1059 to->si_pid = from->cpt_si_pid;
1060 to->si_uid = from->cpt_si_uid;
1061 }
b53a1623 1062 else if (to->si_code < 0)
d0722149 1063 {
b53a1623
PA
1064 to->si_pid = from->cpt_si_pid;
1065 to->si_uid = from->cpt_si_uid;
d0722149
DE
1066 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1067 }
1068 else
1069 {
1070 switch (to->si_signo)
1071 {
1072 case SIGCHLD:
1073 to->si_pid = from->cpt_si_pid;
1074 to->si_uid = from->cpt_si_uid;
1075 to->si_status = from->cpt_si_status;
1076 to->si_utime = from->cpt_si_utime;
1077 to->si_stime = from->cpt_si_stime;
1078 break;
1079 case SIGILL:
1080 case SIGFPE:
1081 case SIGSEGV:
1082 case SIGBUS:
1083 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1084 break;
1085 case SIGPOLL:
1086 to->si_band = from->cpt_si_band;
1087 to->si_fd = from->cpt_si_fd;
1088 break;
1089 default:
1090 to->si_pid = from->cpt_si_pid;
1091 to->si_uid = from->cpt_si_uid;
1092 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1093 break;
1094 }
1095 }
1096}
1097
c92b5177
L
1098static void
1099compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
1100 siginfo_t *from)
1101{
1102 memset (to, 0, sizeof (*to));
1103
1104 to->si_signo = from->si_signo;
1105 to->si_errno = from->si_errno;
1106 to->si_code = from->si_code;
1107
1108 if (to->si_code == SI_TIMER)
1109 {
1110 to->cpt_si_timerid = from->si_timerid;
1111 to->cpt_si_overrun = from->si_overrun;
1112 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1113 }
1114 else if (to->si_code == SI_USER)
1115 {
1116 to->cpt_si_pid = from->si_pid;
1117 to->cpt_si_uid = from->si_uid;
1118 }
1119 else if (to->si_code < 0)
1120 {
1121 to->cpt_si_pid = from->si_pid;
1122 to->cpt_si_uid = from->si_uid;
1123 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1124 }
1125 else
1126 {
1127 switch (to->si_signo)
1128 {
1129 case SIGCHLD:
1130 to->cpt_si_pid = from->si_pid;
1131 to->cpt_si_uid = from->si_uid;
1132 to->cpt_si_status = from->si_status;
1133 to->cpt_si_utime = from->si_utime;
1134 to->cpt_si_stime = from->si_stime;
1135 break;
1136 case SIGILL:
1137 case SIGFPE:
1138 case SIGSEGV:
1139 case SIGBUS:
1140 to->cpt_si_addr = (intptr_t) from->si_addr;
1141 break;
1142 case SIGPOLL:
1143 to->cpt_si_band = from->si_band;
1144 to->cpt_si_fd = from->si_fd;
1145 break;
1146 default:
1147 to->cpt_si_pid = from->si_pid;
1148 to->cpt_si_uid = from->si_uid;
1149 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1150 break;
1151 }
1152 }
1153}
1154
1155static void
1156siginfo_from_compat_x32_siginfo (siginfo_t *to,
1157 compat_x32_siginfo_t *from)
1158{
1159 memset (to, 0, sizeof (*to));
1160
1161 to->si_signo = from->si_signo;
1162 to->si_errno = from->si_errno;
1163 to->si_code = from->si_code;
1164
1165 if (to->si_code == SI_TIMER)
1166 {
1167 to->si_timerid = from->cpt_si_timerid;
1168 to->si_overrun = from->cpt_si_overrun;
1169 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1170 }
1171 else if (to->si_code == SI_USER)
1172 {
1173 to->si_pid = from->cpt_si_pid;
1174 to->si_uid = from->cpt_si_uid;
1175 }
1176 else if (to->si_code < 0)
1177 {
1178 to->si_pid = from->cpt_si_pid;
1179 to->si_uid = from->cpt_si_uid;
1180 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1181 }
1182 else
1183 {
1184 switch (to->si_signo)
1185 {
1186 case SIGCHLD:
1187 to->si_pid = from->cpt_si_pid;
1188 to->si_uid = from->cpt_si_uid;
1189 to->si_status = from->cpt_si_status;
1190 to->si_utime = from->cpt_si_utime;
1191 to->si_stime = from->cpt_si_stime;
1192 break;
1193 case SIGILL:
1194 case SIGFPE:
1195 case SIGSEGV:
1196 case SIGBUS:
1197 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1198 break;
1199 case SIGPOLL:
1200 to->si_band = from->cpt_si_band;
1201 to->si_fd = from->cpt_si_fd;
1202 break;
1203 default:
1204 to->si_pid = from->cpt_si_pid;
1205 to->si_uid = from->cpt_si_uid;
1206 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1207 break;
1208 }
1209 }
1210}
1211
d0722149
DE
1212#endif /* __x86_64__ */
1213
1214/* Convert a native/host siginfo object, into/from the siginfo in the
1215 layout of the inferiors' architecture. Returns true if any
1216 conversion was done; false otherwise. If DIRECTION is 1, then copy
1217 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1218 INF. */
1219
1220static int
a5362b9a 1221x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
d0722149
DE
1222{
1223#ifdef __x86_64__
760256f9 1224 unsigned int machine;
0bfdf32f 1225 int tid = lwpid_of (current_thread);
760256f9
PA
1226 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1227
d0722149 1228 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 1229 if (!is_64bit_tdesc ())
d0722149 1230 {
38e08fca 1231 gdb_assert (sizeof (siginfo_t) == sizeof (compat_siginfo_t));
d0722149
DE
1232
1233 if (direction == 0)
1234 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
1235 else
1236 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
1237
c92b5177
L
1238 return 1;
1239 }
1240 /* No fixup for native x32 GDB. */
760256f9 1241 else if (!is_elf64 && sizeof (void *) == 8)
c92b5177 1242 {
38e08fca 1243 gdb_assert (sizeof (siginfo_t) == sizeof (compat_x32_siginfo_t));
c92b5177
L
1244
1245 if (direction == 0)
1246 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
1247 native);
1248 else
1249 siginfo_from_compat_x32_siginfo (native,
1250 (struct compat_x32_siginfo *) inf);
1251
d0722149
DE
1252 return 1;
1253 }
1254#endif
1255
1256 return 0;
1257}
1258\f
1570b33e
L
1259static int use_xml;
1260
3aee8918
PA
1261/* Format of XSAVE extended state is:
1262 struct
1263 {
1264 fxsave_bytes[0..463]
1265 sw_usable_bytes[464..511]
1266 xstate_hdr_bytes[512..575]
1267 avx_bytes[576..831]
1268 future_state etc
1269 };
1270
1271 Same memory layout will be used for the coredump NT_X86_XSTATE
1272 representing the XSAVE extended state registers.
1273
1274 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1275 extended state mask, which is the same as the extended control register
1276 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1277 together with the mask saved in the xstate_hdr_bytes to determine what
1278 states the processor/OS supports and what state, used or initialized,
1279 the process/thread is in. */
1280#define I386_LINUX_XSAVE_XCR0_OFFSET 464
1281
1282/* Does the current host support the GETFPXREGS request? The header
1283 file may or may not define it, and even if it is defined, the
1284 kernel will return EIO if it's running on a pre-SSE processor. */
1285int have_ptrace_getfpxregs =
1286#ifdef HAVE_PTRACE_GETFPXREGS
1287 -1
1288#else
1289 0
1290#endif
1291;
1570b33e 1292
3aee8918
PA
1293/* Does the current host support PTRACE_GETREGSET? */
1294static int have_ptrace_getregset = -1;
1295
1296/* Get Linux/x86 target description from running target. */
1297
1298static const struct target_desc *
1299x86_linux_read_description (void)
1570b33e 1300{
3aee8918
PA
1301 unsigned int machine;
1302 int is_elf64;
a196ebeb 1303 int xcr0_features;
3aee8918
PA
1304 int tid;
1305 static uint64_t xcr0;
3a13a53b 1306 struct regset_info *regset;
1570b33e 1307
0bfdf32f 1308 tid = lwpid_of (current_thread);
1570b33e 1309
3aee8918 1310 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 1311
3aee8918 1312 if (sizeof (void *) == 4)
3a13a53b 1313 {
3aee8918
PA
1314 if (is_elf64 > 0)
1315 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1316#ifndef __x86_64__
1317 else if (machine == EM_X86_64)
1318 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1319#endif
1320 }
3a13a53b 1321
3aee8918
PA
1322#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1323 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
1324 {
1325 elf_fpxregset_t fpxregs;
3a13a53b 1326
3aee8918 1327 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 1328 {
3aee8918
PA
1329 have_ptrace_getfpxregs = 0;
1330 have_ptrace_getregset = 0;
1331 return tdesc_i386_mmx_linux;
3a13a53b 1332 }
3aee8918
PA
1333 else
1334 have_ptrace_getfpxregs = 1;
3a13a53b 1335 }
1570b33e
L
1336#endif
1337
1338 if (!use_xml)
1339 {
df7e5265 1340 x86_xcr0 = X86_XSTATE_SSE_MASK;
3aee8918 1341
1570b33e
L
1342 /* Don't use XML. */
1343#ifdef __x86_64__
3aee8918
PA
1344 if (machine == EM_X86_64)
1345 return tdesc_amd64_linux_no_xml;
1570b33e 1346 else
1570b33e 1347#endif
3aee8918 1348 return tdesc_i386_linux_no_xml;
1570b33e
L
1349 }
1350
1570b33e
L
1351 if (have_ptrace_getregset == -1)
1352 {
df7e5265 1353 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 1354 struct iovec iov;
1570b33e
L
1355
1356 iov.iov_base = xstateregs;
1357 iov.iov_len = sizeof (xstateregs);
1358
1359 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
1360 if (ptrace (PTRACE_GETREGSET, tid,
1361 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
1362 have_ptrace_getregset = 0;
1363 else
1570b33e 1364 {
3aee8918
PA
1365 have_ptrace_getregset = 1;
1366
1367 /* Get XCR0 from XSAVE extended state. */
1368 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
1369 / sizeof (uint64_t))];
1370
1371 /* Use PTRACE_GETREGSET if it is available. */
1372 for (regset = x86_regsets;
1373 regset->fill_function != NULL; regset++)
1374 if (regset->get_request == PTRACE_GETREGSET)
df7e5265 1375 regset->size = X86_XSTATE_SIZE (xcr0);
3aee8918
PA
1376 else if (regset->type != GENERAL_REGS)
1377 regset->size = 0;
1570b33e 1378 }
1570b33e
L
1379 }
1380
3aee8918 1381 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb 1382 xcr0_features = (have_ptrace_getregset
df7e5265 1383 && (xcr0 & X86_XSTATE_ALL_MASK));
3aee8918 1384
a196ebeb 1385 if (xcr0_features)
3aee8918 1386 x86_xcr0 = xcr0;
1570b33e 1387
3aee8918
PA
1388 if (machine == EM_X86_64)
1389 {
1570b33e 1390#ifdef __x86_64__
a196ebeb 1391 if (is_elf64)
3aee8918 1392 {
a196ebeb
WT
1393 if (xcr0_features)
1394 {
df7e5265 1395 switch (xcr0 & X86_XSTATE_ALL_MASK)
a196ebeb 1396 {
df7e5265 1397 case X86_XSTATE_AVX512_MASK:
01f9f808
MS
1398 return tdesc_amd64_avx512_linux;
1399
df7e5265 1400 case X86_XSTATE_MPX_MASK:
a196ebeb
WT
1401 return tdesc_amd64_mpx_linux;
1402
df7e5265 1403 case X86_XSTATE_AVX_MASK:
a196ebeb
WT
1404 return tdesc_amd64_avx_linux;
1405
1406 default:
1407 return tdesc_amd64_linux;
1408 }
1409 }
4d47af5c 1410 else
a196ebeb 1411 return tdesc_amd64_linux;
3aee8918
PA
1412 }
1413 else
1414 {
a196ebeb
WT
1415 if (xcr0_features)
1416 {
df7e5265 1417 switch (xcr0 & X86_XSTATE_ALL_MASK)
a196ebeb 1418 {
df7e5265 1419 case X86_XSTATE_AVX512_MASK:
01f9f808
MS
1420 return tdesc_x32_avx512_linux;
1421
df7e5265
GB
1422 case X86_XSTATE_MPX_MASK: /* No MPX on x32. */
1423 case X86_XSTATE_AVX_MASK:
a196ebeb
WT
1424 return tdesc_x32_avx_linux;
1425
1426 default:
1427 return tdesc_x32_linux;
1428 }
1429 }
3aee8918 1430 else
a196ebeb 1431 return tdesc_x32_linux;
1570b33e 1432 }
3aee8918 1433#endif
1570b33e 1434 }
3aee8918
PA
1435 else
1436 {
a196ebeb
WT
1437 if (xcr0_features)
1438 {
df7e5265 1439 switch (xcr0 & X86_XSTATE_ALL_MASK)
a196ebeb 1440 {
df7e5265 1441 case (X86_XSTATE_AVX512_MASK):
01f9f808
MS
1442 return tdesc_i386_avx512_linux;
1443
df7e5265 1444 case (X86_XSTATE_MPX_MASK):
a196ebeb
WT
1445 return tdesc_i386_mpx_linux;
1446
df7e5265 1447 case (X86_XSTATE_AVX_MASK):
a196ebeb
WT
1448 return tdesc_i386_avx_linux;
1449
1450 default:
1451 return tdesc_i386_linux;
1452 }
1453 }
3aee8918
PA
1454 else
1455 return tdesc_i386_linux;
1456 }
1457
1458 gdb_assert_not_reached ("failed to return tdesc");
1459}
1460
1461/* Callback for find_inferior. Stops iteration when a thread with a
1462 given PID is found. */
1463
1464static int
1465same_process_callback (struct inferior_list_entry *entry, void *data)
1466{
1467 int pid = *(int *) data;
1468
1469 return (ptid_get_pid (entry->id) == pid);
1470}
1471
1472/* Callback for for_each_inferior. Calls the arch_setup routine for
1473 each process. */
1474
1475static void
1476x86_arch_setup_process_callback (struct inferior_list_entry *entry)
1477{
1478 int pid = ptid_get_pid (entry->id);
1479
1480 /* Look up any thread of this processes. */
0bfdf32f 1481 current_thread
3aee8918
PA
1482 = (struct thread_info *) find_inferior (&all_threads,
1483 same_process_callback, &pid);
1484
1485 the_low_target.arch_setup ();
1486}
1487
1488/* Update all the target description of all processes; a new GDB
1489 connected, and it may or not support xml target descriptions. */
1490
1491static void
1492x86_linux_update_xmltarget (void)
1493{
0bfdf32f 1494 struct thread_info *saved_thread = current_thread;
3aee8918
PA
1495
1496 /* Before changing the register cache's internal layout, flush the
1497 contents of the current valid caches back to the threads, and
1498 release the current regcache objects. */
1499 regcache_release ();
1500
1501 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
1502
0bfdf32f 1503 current_thread = saved_thread;
1570b33e
L
1504}
1505
1506/* Process qSupported query, "xmlRegisters=". Update the buffer size for
1507 PTRACE_GETREGSET. */
1508
1509static void
1510x86_linux_process_qsupported (const char *query)
1511{
1512 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1513 with "i386" in qSupported query, it supports x86 XML target
1514 descriptions. */
1515 use_xml = 0;
1516 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1517 {
1518 char *copy = xstrdup (query + 13);
1519 char *p;
1520
1521 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1522 {
1523 if (strcmp (p, "i386") == 0)
1524 {
1525 use_xml = 1;
1526 break;
1527 }
1528 }
1529
1530 free (copy);
1531 }
1532
1533 x86_linux_update_xmltarget ();
1534}
1535
3aee8918 1536/* Common for x86/x86-64. */
d0722149 1537
3aee8918
PA
1538static struct regsets_info x86_regsets_info =
1539 {
1540 x86_regsets, /* regsets */
1541 0, /* num_regsets */
1542 NULL, /* disabled_regsets */
1543 };
214d508e
L
1544
1545#ifdef __x86_64__
3aee8918
PA
1546static struct regs_info amd64_linux_regs_info =
1547 {
1548 NULL, /* regset_bitmap */
1549 NULL, /* usrregs_info */
1550 &x86_regsets_info
1551 };
d0722149 1552#endif
3aee8918
PA
1553static struct usrregs_info i386_linux_usrregs_info =
1554 {
1555 I386_NUM_REGS,
1556 i386_regmap,
1557 };
d0722149 1558
3aee8918
PA
1559static struct regs_info i386_linux_regs_info =
1560 {
1561 NULL, /* regset_bitmap */
1562 &i386_linux_usrregs_info,
1563 &x86_regsets_info
1564 };
d0722149 1565
3aee8918
PA
1566const struct regs_info *
1567x86_linux_regs_info (void)
1568{
1569#ifdef __x86_64__
1570 if (is_64bit_tdesc ())
1571 return &amd64_linux_regs_info;
1572 else
1573#endif
1574 return &i386_linux_regs_info;
1575}
d0722149 1576
3aee8918
PA
1577/* Initialize the target description for the architecture of the
1578 inferior. */
1570b33e 1579
3aee8918
PA
1580static void
1581x86_arch_setup (void)
1582{
1583 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1584}
1585
219f2f23
PA
1586static int
1587x86_supports_tracepoints (void)
1588{
1589 return 1;
1590}
1591
fa593d66
PA
1592static void
1593append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1594{
1595 write_inferior_memory (*to, buf, len);
1596 *to += len;
1597}
1598
1599static int
1600push_opcode (unsigned char *buf, char *op)
1601{
1602 unsigned char *buf_org = buf;
1603
1604 while (1)
1605 {
1606 char *endptr;
1607 unsigned long ul = strtoul (op, &endptr, 16);
1608
1609 if (endptr == op)
1610 break;
1611
1612 *buf++ = ul;
1613 op = endptr;
1614 }
1615
1616 return buf - buf_org;
1617}
1618
1619#ifdef __x86_64__
1620
1621/* Build a jump pad that saves registers and calls a collection
1622 function. Writes a jump instruction to the jump pad to
1623 JJUMPAD_INSN. The caller is responsible to write it in at the
1624 tracepoint address. */
1625
1626static int
1627amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1628 CORE_ADDR collector,
1629 CORE_ADDR lockaddr,
1630 ULONGEST orig_size,
1631 CORE_ADDR *jump_entry,
405f8e94
SS
1632 CORE_ADDR *trampoline,
1633 ULONGEST *trampoline_size,
fa593d66
PA
1634 unsigned char *jjump_pad_insn,
1635 ULONGEST *jjump_pad_insn_size,
1636 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1637 CORE_ADDR *adjusted_insn_addr_end,
1638 char *err)
fa593d66
PA
1639{
1640 unsigned char buf[40];
1641 int i, offset;
f4647387
YQ
1642 int64_t loffset;
1643
fa593d66
PA
1644 CORE_ADDR buildaddr = *jump_entry;
1645
1646 /* Build the jump pad. */
1647
1648 /* First, do tracepoint data collection. Save registers. */
1649 i = 0;
1650 /* Need to ensure stack pointer saved first. */
1651 buf[i++] = 0x54; /* push %rsp */
1652 buf[i++] = 0x55; /* push %rbp */
1653 buf[i++] = 0x57; /* push %rdi */
1654 buf[i++] = 0x56; /* push %rsi */
1655 buf[i++] = 0x52; /* push %rdx */
1656 buf[i++] = 0x51; /* push %rcx */
1657 buf[i++] = 0x53; /* push %rbx */
1658 buf[i++] = 0x50; /* push %rax */
1659 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1660 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1661 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1662 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1663 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1664 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1665 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1666 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1667 buf[i++] = 0x9c; /* pushfq */
1668 buf[i++] = 0x48; /* movl <addr>,%rdi */
1669 buf[i++] = 0xbf;
1670 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1671 i += sizeof (unsigned long);
1672 buf[i++] = 0x57; /* push %rdi */
1673 append_insns (&buildaddr, i, buf);
1674
1675 /* Stack space for the collecting_t object. */
1676 i = 0;
1677 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1678 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1679 memcpy (buf + i, &tpoint, 8);
1680 i += 8;
1681 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1682 i += push_opcode (&buf[i],
1683 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1684 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1685 append_insns (&buildaddr, i, buf);
1686
1687 /* spin-lock. */
1688 i = 0;
1689 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1690 memcpy (&buf[i], (void *) &lockaddr, 8);
1691 i += 8;
1692 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1693 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1694 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1695 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1696 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1697 append_insns (&buildaddr, i, buf);
1698
1699 /* Set up the gdb_collect call. */
1700 /* At this point, (stack pointer + 0x18) is the base of our saved
1701 register block. */
1702
1703 i = 0;
1704 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1705 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1706
1707 /* tpoint address may be 64-bit wide. */
1708 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1709 memcpy (buf + i, &tpoint, 8);
1710 i += 8;
1711 append_insns (&buildaddr, i, buf);
1712
1713 /* The collector function being in the shared library, may be
1714 >31-bits away off the jump pad. */
1715 i = 0;
1716 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1717 memcpy (buf + i, &collector, 8);
1718 i += 8;
1719 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1720 append_insns (&buildaddr, i, buf);
1721
1722 /* Clear the spin-lock. */
1723 i = 0;
1724 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1725 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1726 memcpy (buf + i, &lockaddr, 8);
1727 i += 8;
1728 append_insns (&buildaddr, i, buf);
1729
1730 /* Remove stack that had been used for the collect_t object. */
1731 i = 0;
1732 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1733 append_insns (&buildaddr, i, buf);
1734
1735 /* Restore register state. */
1736 i = 0;
1737 buf[i++] = 0x48; /* add $0x8,%rsp */
1738 buf[i++] = 0x83;
1739 buf[i++] = 0xc4;
1740 buf[i++] = 0x08;
1741 buf[i++] = 0x9d; /* popfq */
1742 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1743 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1744 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1745 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1746 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1747 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1748 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1749 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1750 buf[i++] = 0x58; /* pop %rax */
1751 buf[i++] = 0x5b; /* pop %rbx */
1752 buf[i++] = 0x59; /* pop %rcx */
1753 buf[i++] = 0x5a; /* pop %rdx */
1754 buf[i++] = 0x5e; /* pop %rsi */
1755 buf[i++] = 0x5f; /* pop %rdi */
1756 buf[i++] = 0x5d; /* pop %rbp */
1757 buf[i++] = 0x5c; /* pop %rsp */
1758 append_insns (&buildaddr, i, buf);
1759
1760 /* Now, adjust the original instruction to execute in the jump
1761 pad. */
1762 *adjusted_insn_addr = buildaddr;
1763 relocate_instruction (&buildaddr, tpaddr);
1764 *adjusted_insn_addr_end = buildaddr;
1765
1766 /* Finally, write a jump back to the program. */
f4647387
YQ
1767
1768 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1769 if (loffset > INT_MAX || loffset < INT_MIN)
1770 {
1771 sprintf (err,
1772 "E.Jump back from jump pad too far from tracepoint "
1773 "(offset 0x%" PRIx64 " > int32).", loffset);
1774 return 1;
1775 }
1776
1777 offset = (int) loffset;
fa593d66
PA
1778 memcpy (buf, jump_insn, sizeof (jump_insn));
1779 memcpy (buf + 1, &offset, 4);
1780 append_insns (&buildaddr, sizeof (jump_insn), buf);
1781
1782 /* The jump pad is now built. Wire in a jump to our jump pad. This
1783 is always done last (by our caller actually), so that we can
1784 install fast tracepoints with threads running. This relies on
1785 the agent's atomic write support. */
f4647387
YQ
1786 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1787 if (loffset > INT_MAX || loffset < INT_MIN)
1788 {
1789 sprintf (err,
1790 "E.Jump pad too far from tracepoint "
1791 "(offset 0x%" PRIx64 " > int32).", loffset);
1792 return 1;
1793 }
1794
1795 offset = (int) loffset;
1796
fa593d66
PA
1797 memcpy (buf, jump_insn, sizeof (jump_insn));
1798 memcpy (buf + 1, &offset, 4);
1799 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1800 *jjump_pad_insn_size = sizeof (jump_insn);
1801
1802 /* Return the end address of our pad. */
1803 *jump_entry = buildaddr;
1804
1805 return 0;
1806}
1807
1808#endif /* __x86_64__ */
1809
1810/* Build a jump pad that saves registers and calls a collection
1811 function. Writes a jump instruction to the jump pad to
1812 JJUMPAD_INSN. The caller is responsible to write it in at the
1813 tracepoint address. */
1814
1815static int
1816i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1817 CORE_ADDR collector,
1818 CORE_ADDR lockaddr,
1819 ULONGEST orig_size,
1820 CORE_ADDR *jump_entry,
405f8e94
SS
1821 CORE_ADDR *trampoline,
1822 ULONGEST *trampoline_size,
fa593d66
PA
1823 unsigned char *jjump_pad_insn,
1824 ULONGEST *jjump_pad_insn_size,
1825 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1826 CORE_ADDR *adjusted_insn_addr_end,
1827 char *err)
fa593d66
PA
1828{
1829 unsigned char buf[0x100];
1830 int i, offset;
1831 CORE_ADDR buildaddr = *jump_entry;
1832
1833 /* Build the jump pad. */
1834
1835 /* First, do tracepoint data collection. Save registers. */
1836 i = 0;
1837 buf[i++] = 0x60; /* pushad */
1838 buf[i++] = 0x68; /* push tpaddr aka $pc */
1839 *((int *)(buf + i)) = (int) tpaddr;
1840 i += 4;
1841 buf[i++] = 0x9c; /* pushf */
1842 buf[i++] = 0x1e; /* push %ds */
1843 buf[i++] = 0x06; /* push %es */
1844 buf[i++] = 0x0f; /* push %fs */
1845 buf[i++] = 0xa0;
1846 buf[i++] = 0x0f; /* push %gs */
1847 buf[i++] = 0xa8;
1848 buf[i++] = 0x16; /* push %ss */
1849 buf[i++] = 0x0e; /* push %cs */
1850 append_insns (&buildaddr, i, buf);
1851
1852 /* Stack space for the collecting_t object. */
1853 i = 0;
1854 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1855
1856 /* Build the object. */
1857 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1858 memcpy (buf + i, &tpoint, 4);
1859 i += 4;
1860 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1861
1862 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1863 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1864 append_insns (&buildaddr, i, buf);
1865
1866 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1867 If we cared for it, this could be using xchg alternatively. */
1868
1869 i = 0;
1870 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1871 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1872 %esp,<lockaddr> */
1873 memcpy (&buf[i], (void *) &lockaddr, 4);
1874 i += 4;
1875 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1876 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1877 append_insns (&buildaddr, i, buf);
1878
1879
1880 /* Set up arguments to the gdb_collect call. */
1881 i = 0;
1882 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1883 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1884 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1885 append_insns (&buildaddr, i, buf);
1886
1887 i = 0;
1888 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1889 append_insns (&buildaddr, i, buf);
1890
1891 i = 0;
1892 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1893 memcpy (&buf[i], (void *) &tpoint, 4);
1894 i += 4;
1895 append_insns (&buildaddr, i, buf);
1896
1897 buf[0] = 0xe8; /* call <reladdr> */
1898 offset = collector - (buildaddr + sizeof (jump_insn));
1899 memcpy (buf + 1, &offset, 4);
1900 append_insns (&buildaddr, 5, buf);
1901 /* Clean up after the call. */
1902 buf[0] = 0x83; /* add $0x8,%esp */
1903 buf[1] = 0xc4;
1904 buf[2] = 0x08;
1905 append_insns (&buildaddr, 3, buf);
1906
1907
1908 /* Clear the spin-lock. This would need the LOCK prefix on older
1909 broken archs. */
1910 i = 0;
1911 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1912 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1913 memcpy (buf + i, &lockaddr, 4);
1914 i += 4;
1915 append_insns (&buildaddr, i, buf);
1916
1917
1918 /* Remove stack that had been used for the collect_t object. */
1919 i = 0;
1920 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1921 append_insns (&buildaddr, i, buf);
1922
1923 i = 0;
1924 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1925 buf[i++] = 0xc4;
1926 buf[i++] = 0x04;
1927 buf[i++] = 0x17; /* pop %ss */
1928 buf[i++] = 0x0f; /* pop %gs */
1929 buf[i++] = 0xa9;
1930 buf[i++] = 0x0f; /* pop %fs */
1931 buf[i++] = 0xa1;
1932 buf[i++] = 0x07; /* pop %es */
405f8e94 1933 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1934 buf[i++] = 0x9d; /* popf */
1935 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1936 buf[i++] = 0xc4;
1937 buf[i++] = 0x04;
1938 buf[i++] = 0x61; /* popad */
1939 append_insns (&buildaddr, i, buf);
1940
1941 /* Now, adjust the original instruction to execute in the jump
1942 pad. */
1943 *adjusted_insn_addr = buildaddr;
1944 relocate_instruction (&buildaddr, tpaddr);
1945 *adjusted_insn_addr_end = buildaddr;
1946
1947 /* Write the jump back to the program. */
1948 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1949 memcpy (buf, jump_insn, sizeof (jump_insn));
1950 memcpy (buf + 1, &offset, 4);
1951 append_insns (&buildaddr, sizeof (jump_insn), buf);
1952
1953 /* The jump pad is now built. Wire in a jump to our jump pad. This
1954 is always done last (by our caller actually), so that we can
1955 install fast tracepoints with threads running. This relies on
1956 the agent's atomic write support. */
405f8e94
SS
1957 if (orig_size == 4)
1958 {
1959 /* Create a trampoline. */
1960 *trampoline_size = sizeof (jump_insn);
1961 if (!claim_trampoline_space (*trampoline_size, trampoline))
1962 {
1963 /* No trampoline space available. */
1964 strcpy (err,
1965 "E.Cannot allocate trampoline space needed for fast "
1966 "tracepoints on 4-byte instructions.");
1967 return 1;
1968 }
1969
1970 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1971 memcpy (buf, jump_insn, sizeof (jump_insn));
1972 memcpy (buf + 1, &offset, 4);
1973 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1974
1975 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1976 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1977 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1978 memcpy (buf + 2, &offset, 2);
1979 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1980 *jjump_pad_insn_size = sizeof (small_jump_insn);
1981 }
1982 else
1983 {
1984 /* Else use a 32-bit relative jump instruction. */
1985 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1986 memcpy (buf, jump_insn, sizeof (jump_insn));
1987 memcpy (buf + 1, &offset, 4);
1988 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1989 *jjump_pad_insn_size = sizeof (jump_insn);
1990 }
fa593d66
PA
1991
1992 /* Return the end address of our pad. */
1993 *jump_entry = buildaddr;
1994
1995 return 0;
1996}
1997
1998static int
1999x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
2000 CORE_ADDR collector,
2001 CORE_ADDR lockaddr,
2002 ULONGEST orig_size,
2003 CORE_ADDR *jump_entry,
405f8e94
SS
2004 CORE_ADDR *trampoline,
2005 ULONGEST *trampoline_size,
fa593d66
PA
2006 unsigned char *jjump_pad_insn,
2007 ULONGEST *jjump_pad_insn_size,
2008 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
2009 CORE_ADDR *adjusted_insn_addr_end,
2010 char *err)
fa593d66
PA
2011{
2012#ifdef __x86_64__
3aee8918 2013 if (is_64bit_tdesc ())
fa593d66
PA
2014 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
2015 collector, lockaddr,
2016 orig_size, jump_entry,
405f8e94 2017 trampoline, trampoline_size,
fa593d66
PA
2018 jjump_pad_insn,
2019 jjump_pad_insn_size,
2020 adjusted_insn_addr,
405f8e94
SS
2021 adjusted_insn_addr_end,
2022 err);
fa593d66
PA
2023#endif
2024
2025 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
2026 collector, lockaddr,
2027 orig_size, jump_entry,
405f8e94 2028 trampoline, trampoline_size,
fa593d66
PA
2029 jjump_pad_insn,
2030 jjump_pad_insn_size,
2031 adjusted_insn_addr,
405f8e94
SS
2032 adjusted_insn_addr_end,
2033 err);
2034}
2035
2036/* Return the minimum instruction length for fast tracepoints on x86/x86-64
2037 architectures. */
2038
2039static int
2040x86_get_min_fast_tracepoint_insn_len (void)
2041{
2042 static int warned_about_fast_tracepoints = 0;
2043
2044#ifdef __x86_64__
2045 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
2046 used for fast tracepoints. */
3aee8918 2047 if (is_64bit_tdesc ())
405f8e94
SS
2048 return 5;
2049#endif
2050
58b4daa5 2051 if (agent_loaded_p ())
405f8e94
SS
2052 {
2053 char errbuf[IPA_BUFSIZ];
2054
2055 errbuf[0] = '\0';
2056
2057 /* On x86, if trampolines are available, then 4-byte jump instructions
2058 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2059 with a 4-byte offset are used instead. */
2060 if (have_fast_tracepoint_trampoline_buffer (errbuf))
2061 return 4;
2062 else
2063 {
2064 /* GDB has no channel to explain to user why a shorter fast
2065 tracepoint is not possible, but at least make GDBserver
2066 mention that something has gone awry. */
2067 if (!warned_about_fast_tracepoints)
2068 {
2069 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
2070 warned_about_fast_tracepoints = 1;
2071 }
2072 return 5;
2073 }
2074 }
2075 else
2076 {
2077 /* Indicate that the minimum length is currently unknown since the IPA
2078 has not loaded yet. */
2079 return 0;
2080 }
fa593d66
PA
2081}
2082
6a271cae
PA
2083static void
2084add_insns (unsigned char *start, int len)
2085{
2086 CORE_ADDR buildaddr = current_insn_ptr;
2087
2088 if (debug_threads)
87ce2a04
DE
2089 debug_printf ("Adding %d bytes of insn at %s\n",
2090 len, paddress (buildaddr));
6a271cae
PA
2091
2092 append_insns (&buildaddr, len, start);
2093 current_insn_ptr = buildaddr;
2094}
2095
6a271cae
PA
2096/* Our general strategy for emitting code is to avoid specifying raw
2097 bytes whenever possible, and instead copy a block of inline asm
2098 that is embedded in the function. This is a little messy, because
2099 we need to keep the compiler from discarding what looks like dead
2100 code, plus suppress various warnings. */
2101
9e4344e5
PA
2102#define EMIT_ASM(NAME, INSNS) \
2103 do \
2104 { \
2105 extern unsigned char start_ ## NAME, end_ ## NAME; \
2106 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 2107 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
2108 "\t" "start_" #NAME ":" \
2109 "\t" INSNS "\n" \
2110 "\t" "end_" #NAME ":"); \
2111 } while (0)
6a271cae
PA
2112
2113#ifdef __x86_64__
2114
2115#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
2116 do \
2117 { \
2118 extern unsigned char start_ ## NAME, end_ ## NAME; \
2119 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2120 __asm__ (".code32\n" \
2121 "\t" "jmp end_" #NAME "\n" \
2122 "\t" "start_" #NAME ":\n" \
2123 "\t" INSNS "\n" \
2124 "\t" "end_" #NAME ":\n" \
2125 ".code64\n"); \
2126 } while (0)
6a271cae
PA
2127
2128#else
2129
2130#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2131
2132#endif
2133
2134#ifdef __x86_64__
2135
2136static void
2137amd64_emit_prologue (void)
2138{
2139 EMIT_ASM (amd64_prologue,
2140 "pushq %rbp\n\t"
2141 "movq %rsp,%rbp\n\t"
2142 "sub $0x20,%rsp\n\t"
2143 "movq %rdi,-8(%rbp)\n\t"
2144 "movq %rsi,-16(%rbp)");
2145}
2146
2147
2148static void
2149amd64_emit_epilogue (void)
2150{
2151 EMIT_ASM (amd64_epilogue,
2152 "movq -16(%rbp),%rdi\n\t"
2153 "movq %rax,(%rdi)\n\t"
2154 "xor %rax,%rax\n\t"
2155 "leave\n\t"
2156 "ret");
2157}
2158
2159static void
2160amd64_emit_add (void)
2161{
2162 EMIT_ASM (amd64_add,
2163 "add (%rsp),%rax\n\t"
2164 "lea 0x8(%rsp),%rsp");
2165}
2166
2167static void
2168amd64_emit_sub (void)
2169{
2170 EMIT_ASM (amd64_sub,
2171 "sub %rax,(%rsp)\n\t"
2172 "pop %rax");
2173}
2174
2175static void
2176amd64_emit_mul (void)
2177{
2178 emit_error = 1;
2179}
2180
2181static void
2182amd64_emit_lsh (void)
2183{
2184 emit_error = 1;
2185}
2186
2187static void
2188amd64_emit_rsh_signed (void)
2189{
2190 emit_error = 1;
2191}
2192
2193static void
2194amd64_emit_rsh_unsigned (void)
2195{
2196 emit_error = 1;
2197}
2198
2199static void
2200amd64_emit_ext (int arg)
2201{
2202 switch (arg)
2203 {
2204 case 8:
2205 EMIT_ASM (amd64_ext_8,
2206 "cbtw\n\t"
2207 "cwtl\n\t"
2208 "cltq");
2209 break;
2210 case 16:
2211 EMIT_ASM (amd64_ext_16,
2212 "cwtl\n\t"
2213 "cltq");
2214 break;
2215 case 32:
2216 EMIT_ASM (amd64_ext_32,
2217 "cltq");
2218 break;
2219 default:
2220 emit_error = 1;
2221 }
2222}
2223
2224static void
2225amd64_emit_log_not (void)
2226{
2227 EMIT_ASM (amd64_log_not,
2228 "test %rax,%rax\n\t"
2229 "sete %cl\n\t"
2230 "movzbq %cl,%rax");
2231}
2232
2233static void
2234amd64_emit_bit_and (void)
2235{
2236 EMIT_ASM (amd64_and,
2237 "and (%rsp),%rax\n\t"
2238 "lea 0x8(%rsp),%rsp");
2239}
2240
2241static void
2242amd64_emit_bit_or (void)
2243{
2244 EMIT_ASM (amd64_or,
2245 "or (%rsp),%rax\n\t"
2246 "lea 0x8(%rsp),%rsp");
2247}
2248
2249static void
2250amd64_emit_bit_xor (void)
2251{
2252 EMIT_ASM (amd64_xor,
2253 "xor (%rsp),%rax\n\t"
2254 "lea 0x8(%rsp),%rsp");
2255}
2256
2257static void
2258amd64_emit_bit_not (void)
2259{
2260 EMIT_ASM (amd64_bit_not,
2261 "xorq $0xffffffffffffffff,%rax");
2262}
2263
2264static void
2265amd64_emit_equal (void)
2266{
2267 EMIT_ASM (amd64_equal,
2268 "cmp %rax,(%rsp)\n\t"
2269 "je .Lamd64_equal_true\n\t"
2270 "xor %rax,%rax\n\t"
2271 "jmp .Lamd64_equal_end\n\t"
2272 ".Lamd64_equal_true:\n\t"
2273 "mov $0x1,%rax\n\t"
2274 ".Lamd64_equal_end:\n\t"
2275 "lea 0x8(%rsp),%rsp");
2276}
2277
2278static void
2279amd64_emit_less_signed (void)
2280{
2281 EMIT_ASM (amd64_less_signed,
2282 "cmp %rax,(%rsp)\n\t"
2283 "jl .Lamd64_less_signed_true\n\t"
2284 "xor %rax,%rax\n\t"
2285 "jmp .Lamd64_less_signed_end\n\t"
2286 ".Lamd64_less_signed_true:\n\t"
2287 "mov $1,%rax\n\t"
2288 ".Lamd64_less_signed_end:\n\t"
2289 "lea 0x8(%rsp),%rsp");
2290}
2291
2292static void
2293amd64_emit_less_unsigned (void)
2294{
2295 EMIT_ASM (amd64_less_unsigned,
2296 "cmp %rax,(%rsp)\n\t"
2297 "jb .Lamd64_less_unsigned_true\n\t"
2298 "xor %rax,%rax\n\t"
2299 "jmp .Lamd64_less_unsigned_end\n\t"
2300 ".Lamd64_less_unsigned_true:\n\t"
2301 "mov $1,%rax\n\t"
2302 ".Lamd64_less_unsigned_end:\n\t"
2303 "lea 0x8(%rsp),%rsp");
2304}
2305
2306static void
2307amd64_emit_ref (int size)
2308{
2309 switch (size)
2310 {
2311 case 1:
2312 EMIT_ASM (amd64_ref1,
2313 "movb (%rax),%al");
2314 break;
2315 case 2:
2316 EMIT_ASM (amd64_ref2,
2317 "movw (%rax),%ax");
2318 break;
2319 case 4:
2320 EMIT_ASM (amd64_ref4,
2321 "movl (%rax),%eax");
2322 break;
2323 case 8:
2324 EMIT_ASM (amd64_ref8,
2325 "movq (%rax),%rax");
2326 break;
2327 }
2328}
2329
2330static void
2331amd64_emit_if_goto (int *offset_p, int *size_p)
2332{
2333 EMIT_ASM (amd64_if_goto,
2334 "mov %rax,%rcx\n\t"
2335 "pop %rax\n\t"
2336 "cmp $0,%rcx\n\t"
2337 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2338 if (offset_p)
2339 *offset_p = 10;
2340 if (size_p)
2341 *size_p = 4;
2342}
2343
2344static void
2345amd64_emit_goto (int *offset_p, int *size_p)
2346{
2347 EMIT_ASM (amd64_goto,
2348 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2349 if (offset_p)
2350 *offset_p = 1;
2351 if (size_p)
2352 *size_p = 4;
2353}
2354
2355static void
2356amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2357{
2358 int diff = (to - (from + size));
2359 unsigned char buf[sizeof (int)];
2360
2361 if (size != 4)
2362 {
2363 emit_error = 1;
2364 return;
2365 }
2366
2367 memcpy (buf, &diff, sizeof (int));
2368 write_inferior_memory (from, buf, sizeof (int));
2369}
2370
2371static void
4e29fb54 2372amd64_emit_const (LONGEST num)
6a271cae
PA
2373{
2374 unsigned char buf[16];
2375 int i;
2376 CORE_ADDR buildaddr = current_insn_ptr;
2377
2378 i = 0;
2379 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 2380 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
2381 i += 8;
2382 append_insns (&buildaddr, i, buf);
2383 current_insn_ptr = buildaddr;
2384}
2385
2386static void
2387amd64_emit_call (CORE_ADDR fn)
2388{
2389 unsigned char buf[16];
2390 int i;
2391 CORE_ADDR buildaddr;
4e29fb54 2392 LONGEST offset64;
6a271cae
PA
2393
2394 /* The destination function being in the shared library, may be
2395 >31-bits away off the compiled code pad. */
2396
2397 buildaddr = current_insn_ptr;
2398
2399 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
2400
2401 i = 0;
2402
2403 if (offset64 > INT_MAX || offset64 < INT_MIN)
2404 {
2405 /* Offset is too large for a call. Use callq, but that requires
2406 a register, so avoid it if possible. Use r10, since it is
2407 call-clobbered, we don't have to push/pop it. */
2408 buf[i++] = 0x48; /* mov $fn,%r10 */
2409 buf[i++] = 0xba;
2410 memcpy (buf + i, &fn, 8);
2411 i += 8;
2412 buf[i++] = 0xff; /* callq *%r10 */
2413 buf[i++] = 0xd2;
2414 }
2415 else
2416 {
2417 int offset32 = offset64; /* we know we can't overflow here. */
2418 memcpy (buf + i, &offset32, 4);
2419 i += 4;
2420 }
2421
2422 append_insns (&buildaddr, i, buf);
2423 current_insn_ptr = buildaddr;
2424}
2425
2426static void
2427amd64_emit_reg (int reg)
2428{
2429 unsigned char buf[16];
2430 int i;
2431 CORE_ADDR buildaddr;
2432
2433 /* Assume raw_regs is still in %rdi. */
2434 buildaddr = current_insn_ptr;
2435 i = 0;
2436 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 2437 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2438 i += 4;
2439 append_insns (&buildaddr, i, buf);
2440 current_insn_ptr = buildaddr;
2441 amd64_emit_call (get_raw_reg_func_addr ());
2442}
2443
2444static void
2445amd64_emit_pop (void)
2446{
2447 EMIT_ASM (amd64_pop,
2448 "pop %rax");
2449}
2450
2451static void
2452amd64_emit_stack_flush (void)
2453{
2454 EMIT_ASM (amd64_stack_flush,
2455 "push %rax");
2456}
2457
2458static void
2459amd64_emit_zero_ext (int arg)
2460{
2461 switch (arg)
2462 {
2463 case 8:
2464 EMIT_ASM (amd64_zero_ext_8,
2465 "and $0xff,%rax");
2466 break;
2467 case 16:
2468 EMIT_ASM (amd64_zero_ext_16,
2469 "and $0xffff,%rax");
2470 break;
2471 case 32:
2472 EMIT_ASM (amd64_zero_ext_32,
2473 "mov $0xffffffff,%rcx\n\t"
2474 "and %rcx,%rax");
2475 break;
2476 default:
2477 emit_error = 1;
2478 }
2479}
2480
2481static void
2482amd64_emit_swap (void)
2483{
2484 EMIT_ASM (amd64_swap,
2485 "mov %rax,%rcx\n\t"
2486 "pop %rax\n\t"
2487 "push %rcx");
2488}
2489
2490static void
2491amd64_emit_stack_adjust (int n)
2492{
2493 unsigned char buf[16];
2494 int i;
2495 CORE_ADDR buildaddr = current_insn_ptr;
2496
2497 i = 0;
2498 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2499 buf[i++] = 0x8d;
2500 buf[i++] = 0x64;
2501 buf[i++] = 0x24;
2502 /* This only handles adjustments up to 16, but we don't expect any more. */
2503 buf[i++] = n * 8;
2504 append_insns (&buildaddr, i, buf);
2505 current_insn_ptr = buildaddr;
2506}
2507
2508/* FN's prototype is `LONGEST(*fn)(int)'. */
2509
2510static void
2511amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2512{
2513 unsigned char buf[16];
2514 int i;
2515 CORE_ADDR buildaddr;
2516
2517 buildaddr = current_insn_ptr;
2518 i = 0;
2519 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2520 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2521 i += 4;
2522 append_insns (&buildaddr, i, buf);
2523 current_insn_ptr = buildaddr;
2524 amd64_emit_call (fn);
2525}
2526
4e29fb54 2527/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2528
2529static void
2530amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2531{
2532 unsigned char buf[16];
2533 int i;
2534 CORE_ADDR buildaddr;
2535
2536 buildaddr = current_insn_ptr;
2537 i = 0;
2538 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2539 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2540 i += 4;
2541 append_insns (&buildaddr, i, buf);
2542 current_insn_ptr = buildaddr;
2543 EMIT_ASM (amd64_void_call_2_a,
2544 /* Save away a copy of the stack top. */
2545 "push %rax\n\t"
2546 /* Also pass top as the second argument. */
2547 "mov %rax,%rsi");
2548 amd64_emit_call (fn);
2549 EMIT_ASM (amd64_void_call_2_b,
2550 /* Restore the stack top, %rax may have been trashed. */
2551 "pop %rax");
2552}
2553
6b9801d4
SS
2554void
2555amd64_emit_eq_goto (int *offset_p, int *size_p)
2556{
2557 EMIT_ASM (amd64_eq,
2558 "cmp %rax,(%rsp)\n\t"
2559 "jne .Lamd64_eq_fallthru\n\t"
2560 "lea 0x8(%rsp),%rsp\n\t"
2561 "pop %rax\n\t"
2562 /* jmp, but don't trust the assembler to choose the right jump */
2563 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2564 ".Lamd64_eq_fallthru:\n\t"
2565 "lea 0x8(%rsp),%rsp\n\t"
2566 "pop %rax");
2567
2568 if (offset_p)
2569 *offset_p = 13;
2570 if (size_p)
2571 *size_p = 4;
2572}
2573
2574void
2575amd64_emit_ne_goto (int *offset_p, int *size_p)
2576{
2577 EMIT_ASM (amd64_ne,
2578 "cmp %rax,(%rsp)\n\t"
2579 "je .Lamd64_ne_fallthru\n\t"
2580 "lea 0x8(%rsp),%rsp\n\t"
2581 "pop %rax\n\t"
2582 /* jmp, but don't trust the assembler to choose the right jump */
2583 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2584 ".Lamd64_ne_fallthru:\n\t"
2585 "lea 0x8(%rsp),%rsp\n\t"
2586 "pop %rax");
2587
2588 if (offset_p)
2589 *offset_p = 13;
2590 if (size_p)
2591 *size_p = 4;
2592}
2593
2594void
2595amd64_emit_lt_goto (int *offset_p, int *size_p)
2596{
2597 EMIT_ASM (amd64_lt,
2598 "cmp %rax,(%rsp)\n\t"
2599 "jnl .Lamd64_lt_fallthru\n\t"
2600 "lea 0x8(%rsp),%rsp\n\t"
2601 "pop %rax\n\t"
2602 /* jmp, but don't trust the assembler to choose the right jump */
2603 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2604 ".Lamd64_lt_fallthru:\n\t"
2605 "lea 0x8(%rsp),%rsp\n\t"
2606 "pop %rax");
2607
2608 if (offset_p)
2609 *offset_p = 13;
2610 if (size_p)
2611 *size_p = 4;
2612}
2613
2614void
2615amd64_emit_le_goto (int *offset_p, int *size_p)
2616{
2617 EMIT_ASM (amd64_le,
2618 "cmp %rax,(%rsp)\n\t"
2619 "jnle .Lamd64_le_fallthru\n\t"
2620 "lea 0x8(%rsp),%rsp\n\t"
2621 "pop %rax\n\t"
2622 /* jmp, but don't trust the assembler to choose the right jump */
2623 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2624 ".Lamd64_le_fallthru:\n\t"
2625 "lea 0x8(%rsp),%rsp\n\t"
2626 "pop %rax");
2627
2628 if (offset_p)
2629 *offset_p = 13;
2630 if (size_p)
2631 *size_p = 4;
2632}
2633
2634void
2635amd64_emit_gt_goto (int *offset_p, int *size_p)
2636{
2637 EMIT_ASM (amd64_gt,
2638 "cmp %rax,(%rsp)\n\t"
2639 "jng .Lamd64_gt_fallthru\n\t"
2640 "lea 0x8(%rsp),%rsp\n\t"
2641 "pop %rax\n\t"
2642 /* jmp, but don't trust the assembler to choose the right jump */
2643 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2644 ".Lamd64_gt_fallthru:\n\t"
2645 "lea 0x8(%rsp),%rsp\n\t"
2646 "pop %rax");
2647
2648 if (offset_p)
2649 *offset_p = 13;
2650 if (size_p)
2651 *size_p = 4;
2652}
2653
2654void
2655amd64_emit_ge_goto (int *offset_p, int *size_p)
2656{
2657 EMIT_ASM (amd64_ge,
2658 "cmp %rax,(%rsp)\n\t"
2659 "jnge .Lamd64_ge_fallthru\n\t"
2660 ".Lamd64_ge_jump:\n\t"
2661 "lea 0x8(%rsp),%rsp\n\t"
2662 "pop %rax\n\t"
2663 /* jmp, but don't trust the assembler to choose the right jump */
2664 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2665 ".Lamd64_ge_fallthru:\n\t"
2666 "lea 0x8(%rsp),%rsp\n\t"
2667 "pop %rax");
2668
2669 if (offset_p)
2670 *offset_p = 13;
2671 if (size_p)
2672 *size_p = 4;
2673}
2674
6a271cae
PA
2675struct emit_ops amd64_emit_ops =
2676 {
2677 amd64_emit_prologue,
2678 amd64_emit_epilogue,
2679 amd64_emit_add,
2680 amd64_emit_sub,
2681 amd64_emit_mul,
2682 amd64_emit_lsh,
2683 amd64_emit_rsh_signed,
2684 amd64_emit_rsh_unsigned,
2685 amd64_emit_ext,
2686 amd64_emit_log_not,
2687 amd64_emit_bit_and,
2688 amd64_emit_bit_or,
2689 amd64_emit_bit_xor,
2690 amd64_emit_bit_not,
2691 amd64_emit_equal,
2692 amd64_emit_less_signed,
2693 amd64_emit_less_unsigned,
2694 amd64_emit_ref,
2695 amd64_emit_if_goto,
2696 amd64_emit_goto,
2697 amd64_write_goto_address,
2698 amd64_emit_const,
2699 amd64_emit_call,
2700 amd64_emit_reg,
2701 amd64_emit_pop,
2702 amd64_emit_stack_flush,
2703 amd64_emit_zero_ext,
2704 amd64_emit_swap,
2705 amd64_emit_stack_adjust,
2706 amd64_emit_int_call_1,
6b9801d4
SS
2707 amd64_emit_void_call_2,
2708 amd64_emit_eq_goto,
2709 amd64_emit_ne_goto,
2710 amd64_emit_lt_goto,
2711 amd64_emit_le_goto,
2712 amd64_emit_gt_goto,
2713 amd64_emit_ge_goto
6a271cae
PA
2714 };
2715
2716#endif /* __x86_64__ */
2717
2718static void
2719i386_emit_prologue (void)
2720{
2721 EMIT_ASM32 (i386_prologue,
2722 "push %ebp\n\t"
bf15cbda
SS
2723 "mov %esp,%ebp\n\t"
2724 "push %ebx");
6a271cae
PA
2725 /* At this point, the raw regs base address is at 8(%ebp), and the
2726 value pointer is at 12(%ebp). */
2727}
2728
2729static void
2730i386_emit_epilogue (void)
2731{
2732 EMIT_ASM32 (i386_epilogue,
2733 "mov 12(%ebp),%ecx\n\t"
2734 "mov %eax,(%ecx)\n\t"
2735 "mov %ebx,0x4(%ecx)\n\t"
2736 "xor %eax,%eax\n\t"
bf15cbda 2737 "pop %ebx\n\t"
6a271cae
PA
2738 "pop %ebp\n\t"
2739 "ret");
2740}
2741
2742static void
2743i386_emit_add (void)
2744{
2745 EMIT_ASM32 (i386_add,
2746 "add (%esp),%eax\n\t"
2747 "adc 0x4(%esp),%ebx\n\t"
2748 "lea 0x8(%esp),%esp");
2749}
2750
2751static void
2752i386_emit_sub (void)
2753{
2754 EMIT_ASM32 (i386_sub,
2755 "subl %eax,(%esp)\n\t"
2756 "sbbl %ebx,4(%esp)\n\t"
2757 "pop %eax\n\t"
2758 "pop %ebx\n\t");
2759}
2760
2761static void
2762i386_emit_mul (void)
2763{
2764 emit_error = 1;
2765}
2766
2767static void
2768i386_emit_lsh (void)
2769{
2770 emit_error = 1;
2771}
2772
2773static void
2774i386_emit_rsh_signed (void)
2775{
2776 emit_error = 1;
2777}
2778
2779static void
2780i386_emit_rsh_unsigned (void)
2781{
2782 emit_error = 1;
2783}
2784
2785static void
2786i386_emit_ext (int arg)
2787{
2788 switch (arg)
2789 {
2790 case 8:
2791 EMIT_ASM32 (i386_ext_8,
2792 "cbtw\n\t"
2793 "cwtl\n\t"
2794 "movl %eax,%ebx\n\t"
2795 "sarl $31,%ebx");
2796 break;
2797 case 16:
2798 EMIT_ASM32 (i386_ext_16,
2799 "cwtl\n\t"
2800 "movl %eax,%ebx\n\t"
2801 "sarl $31,%ebx");
2802 break;
2803 case 32:
2804 EMIT_ASM32 (i386_ext_32,
2805 "movl %eax,%ebx\n\t"
2806 "sarl $31,%ebx");
2807 break;
2808 default:
2809 emit_error = 1;
2810 }
2811}
2812
2813static void
2814i386_emit_log_not (void)
2815{
2816 EMIT_ASM32 (i386_log_not,
2817 "or %ebx,%eax\n\t"
2818 "test %eax,%eax\n\t"
2819 "sete %cl\n\t"
2820 "xor %ebx,%ebx\n\t"
2821 "movzbl %cl,%eax");
2822}
2823
2824static void
2825i386_emit_bit_and (void)
2826{
2827 EMIT_ASM32 (i386_and,
2828 "and (%esp),%eax\n\t"
2829 "and 0x4(%esp),%ebx\n\t"
2830 "lea 0x8(%esp),%esp");
2831}
2832
2833static void
2834i386_emit_bit_or (void)
2835{
2836 EMIT_ASM32 (i386_or,
2837 "or (%esp),%eax\n\t"
2838 "or 0x4(%esp),%ebx\n\t"
2839 "lea 0x8(%esp),%esp");
2840}
2841
2842static void
2843i386_emit_bit_xor (void)
2844{
2845 EMIT_ASM32 (i386_xor,
2846 "xor (%esp),%eax\n\t"
2847 "xor 0x4(%esp),%ebx\n\t"
2848 "lea 0x8(%esp),%esp");
2849}
2850
2851static void
2852i386_emit_bit_not (void)
2853{
2854 EMIT_ASM32 (i386_bit_not,
2855 "xor $0xffffffff,%eax\n\t"
2856 "xor $0xffffffff,%ebx\n\t");
2857}
2858
2859static void
2860i386_emit_equal (void)
2861{
2862 EMIT_ASM32 (i386_equal,
2863 "cmpl %ebx,4(%esp)\n\t"
2864 "jne .Li386_equal_false\n\t"
2865 "cmpl %eax,(%esp)\n\t"
2866 "je .Li386_equal_true\n\t"
2867 ".Li386_equal_false:\n\t"
2868 "xor %eax,%eax\n\t"
2869 "jmp .Li386_equal_end\n\t"
2870 ".Li386_equal_true:\n\t"
2871 "mov $1,%eax\n\t"
2872 ".Li386_equal_end:\n\t"
2873 "xor %ebx,%ebx\n\t"
2874 "lea 0x8(%esp),%esp");
2875}
2876
2877static void
2878i386_emit_less_signed (void)
2879{
2880 EMIT_ASM32 (i386_less_signed,
2881 "cmpl %ebx,4(%esp)\n\t"
2882 "jl .Li386_less_signed_true\n\t"
2883 "jne .Li386_less_signed_false\n\t"
2884 "cmpl %eax,(%esp)\n\t"
2885 "jl .Li386_less_signed_true\n\t"
2886 ".Li386_less_signed_false:\n\t"
2887 "xor %eax,%eax\n\t"
2888 "jmp .Li386_less_signed_end\n\t"
2889 ".Li386_less_signed_true:\n\t"
2890 "mov $1,%eax\n\t"
2891 ".Li386_less_signed_end:\n\t"
2892 "xor %ebx,%ebx\n\t"
2893 "lea 0x8(%esp),%esp");
2894}
2895
2896static void
2897i386_emit_less_unsigned (void)
2898{
2899 EMIT_ASM32 (i386_less_unsigned,
2900 "cmpl %ebx,4(%esp)\n\t"
2901 "jb .Li386_less_unsigned_true\n\t"
2902 "jne .Li386_less_unsigned_false\n\t"
2903 "cmpl %eax,(%esp)\n\t"
2904 "jb .Li386_less_unsigned_true\n\t"
2905 ".Li386_less_unsigned_false:\n\t"
2906 "xor %eax,%eax\n\t"
2907 "jmp .Li386_less_unsigned_end\n\t"
2908 ".Li386_less_unsigned_true:\n\t"
2909 "mov $1,%eax\n\t"
2910 ".Li386_less_unsigned_end:\n\t"
2911 "xor %ebx,%ebx\n\t"
2912 "lea 0x8(%esp),%esp");
2913}
2914
2915static void
2916i386_emit_ref (int size)
2917{
2918 switch (size)
2919 {
2920 case 1:
2921 EMIT_ASM32 (i386_ref1,
2922 "movb (%eax),%al");
2923 break;
2924 case 2:
2925 EMIT_ASM32 (i386_ref2,
2926 "movw (%eax),%ax");
2927 break;
2928 case 4:
2929 EMIT_ASM32 (i386_ref4,
2930 "movl (%eax),%eax");
2931 break;
2932 case 8:
2933 EMIT_ASM32 (i386_ref8,
2934 "movl 4(%eax),%ebx\n\t"
2935 "movl (%eax),%eax");
2936 break;
2937 }
2938}
2939
2940static void
2941i386_emit_if_goto (int *offset_p, int *size_p)
2942{
2943 EMIT_ASM32 (i386_if_goto,
2944 "mov %eax,%ecx\n\t"
2945 "or %ebx,%ecx\n\t"
2946 "pop %eax\n\t"
2947 "pop %ebx\n\t"
2948 "cmpl $0,%ecx\n\t"
2949 /* Don't trust the assembler to choose the right jump */
2950 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2951
2952 if (offset_p)
2953 *offset_p = 11; /* be sure that this matches the sequence above */
2954 if (size_p)
2955 *size_p = 4;
2956}
2957
2958static void
2959i386_emit_goto (int *offset_p, int *size_p)
2960{
2961 EMIT_ASM32 (i386_goto,
2962 /* Don't trust the assembler to choose the right jump */
2963 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2964 if (offset_p)
2965 *offset_p = 1;
2966 if (size_p)
2967 *size_p = 4;
2968}
2969
2970static void
2971i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2972{
2973 int diff = (to - (from + size));
2974 unsigned char buf[sizeof (int)];
2975
2976 /* We're only doing 4-byte sizes at the moment. */
2977 if (size != 4)
2978 {
2979 emit_error = 1;
2980 return;
2981 }
2982
2983 memcpy (buf, &diff, sizeof (int));
2984 write_inferior_memory (from, buf, sizeof (int));
2985}
2986
2987static void
4e29fb54 2988i386_emit_const (LONGEST num)
6a271cae
PA
2989{
2990 unsigned char buf[16];
b00ad6ff 2991 int i, hi, lo;
6a271cae
PA
2992 CORE_ADDR buildaddr = current_insn_ptr;
2993
2994 i = 0;
2995 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2996 lo = num & 0xffffffff;
2997 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2998 i += 4;
2999 hi = ((num >> 32) & 0xffffffff);
3000 if (hi)
3001 {
3002 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 3003 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
3004 i += 4;
3005 }
3006 else
3007 {
3008 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
3009 }
3010 append_insns (&buildaddr, i, buf);
3011 current_insn_ptr = buildaddr;
3012}
3013
3014static void
3015i386_emit_call (CORE_ADDR fn)
3016{
3017 unsigned char buf[16];
3018 int i, offset;
3019 CORE_ADDR buildaddr;
3020
3021 buildaddr = current_insn_ptr;
3022 i = 0;
3023 buf[i++] = 0xe8; /* call <reladdr> */
3024 offset = ((int) fn) - (buildaddr + 5);
3025 memcpy (buf + 1, &offset, 4);
3026 append_insns (&buildaddr, 5, buf);
3027 current_insn_ptr = buildaddr;
3028}
3029
3030static void
3031i386_emit_reg (int reg)
3032{
3033 unsigned char buf[16];
3034 int i;
3035 CORE_ADDR buildaddr;
3036
3037 EMIT_ASM32 (i386_reg_a,
3038 "sub $0x8,%esp");
3039 buildaddr = current_insn_ptr;
3040 i = 0;
3041 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 3042 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
3043 i += 4;
3044 append_insns (&buildaddr, i, buf);
3045 current_insn_ptr = buildaddr;
3046 EMIT_ASM32 (i386_reg_b,
3047 "mov %eax,4(%esp)\n\t"
3048 "mov 8(%ebp),%eax\n\t"
3049 "mov %eax,(%esp)");
3050 i386_emit_call (get_raw_reg_func_addr ());
3051 EMIT_ASM32 (i386_reg_c,
3052 "xor %ebx,%ebx\n\t"
3053 "lea 0x8(%esp),%esp");
3054}
3055
3056static void
3057i386_emit_pop (void)
3058{
3059 EMIT_ASM32 (i386_pop,
3060 "pop %eax\n\t"
3061 "pop %ebx");
3062}
3063
3064static void
3065i386_emit_stack_flush (void)
3066{
3067 EMIT_ASM32 (i386_stack_flush,
3068 "push %ebx\n\t"
3069 "push %eax");
3070}
3071
3072static void
3073i386_emit_zero_ext (int arg)
3074{
3075 switch (arg)
3076 {
3077 case 8:
3078 EMIT_ASM32 (i386_zero_ext_8,
3079 "and $0xff,%eax\n\t"
3080 "xor %ebx,%ebx");
3081 break;
3082 case 16:
3083 EMIT_ASM32 (i386_zero_ext_16,
3084 "and $0xffff,%eax\n\t"
3085 "xor %ebx,%ebx");
3086 break;
3087 case 32:
3088 EMIT_ASM32 (i386_zero_ext_32,
3089 "xor %ebx,%ebx");
3090 break;
3091 default:
3092 emit_error = 1;
3093 }
3094}
3095
3096static void
3097i386_emit_swap (void)
3098{
3099 EMIT_ASM32 (i386_swap,
3100 "mov %eax,%ecx\n\t"
3101 "mov %ebx,%edx\n\t"
3102 "pop %eax\n\t"
3103 "pop %ebx\n\t"
3104 "push %edx\n\t"
3105 "push %ecx");
3106}
3107
3108static void
3109i386_emit_stack_adjust (int n)
3110{
3111 unsigned char buf[16];
3112 int i;
3113 CORE_ADDR buildaddr = current_insn_ptr;
3114
3115 i = 0;
3116 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
3117 buf[i++] = 0x64;
3118 buf[i++] = 0x24;
3119 buf[i++] = n * 8;
3120 append_insns (&buildaddr, i, buf);
3121 current_insn_ptr = buildaddr;
3122}
3123
3124/* FN's prototype is `LONGEST(*fn)(int)'. */
3125
3126static void
3127i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
3128{
3129 unsigned char buf[16];
3130 int i;
3131 CORE_ADDR buildaddr;
3132
3133 EMIT_ASM32 (i386_int_call_1_a,
3134 /* Reserve a bit of stack space. */
3135 "sub $0x8,%esp");
3136 /* Put the one argument on the stack. */
3137 buildaddr = current_insn_ptr;
3138 i = 0;
3139 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3140 buf[i++] = 0x04;
3141 buf[i++] = 0x24;
b00ad6ff 3142 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
3143 i += 4;
3144 append_insns (&buildaddr, i, buf);
3145 current_insn_ptr = buildaddr;
3146 i386_emit_call (fn);
3147 EMIT_ASM32 (i386_int_call_1_c,
3148 "mov %edx,%ebx\n\t"
3149 "lea 0x8(%esp),%esp");
3150}
3151
4e29fb54 3152/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
3153
3154static void
3155i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
3156{
3157 unsigned char buf[16];
3158 int i;
3159 CORE_ADDR buildaddr;
3160
3161 EMIT_ASM32 (i386_void_call_2_a,
3162 /* Preserve %eax only; we don't have to worry about %ebx. */
3163 "push %eax\n\t"
3164 /* Reserve a bit of stack space for arguments. */
3165 "sub $0x10,%esp\n\t"
3166 /* Copy "top" to the second argument position. (Note that
3167 we can't assume function won't scribble on its
3168 arguments, so don't try to restore from this.) */
3169 "mov %eax,4(%esp)\n\t"
3170 "mov %ebx,8(%esp)");
3171 /* Put the first argument on the stack. */
3172 buildaddr = current_insn_ptr;
3173 i = 0;
3174 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3175 buf[i++] = 0x04;
3176 buf[i++] = 0x24;
b00ad6ff 3177 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
3178 i += 4;
3179 append_insns (&buildaddr, i, buf);
3180 current_insn_ptr = buildaddr;
3181 i386_emit_call (fn);
3182 EMIT_ASM32 (i386_void_call_2_b,
3183 "lea 0x10(%esp),%esp\n\t"
3184 /* Restore original stack top. */
3185 "pop %eax");
3186}
3187
6b9801d4
SS
3188
3189void
3190i386_emit_eq_goto (int *offset_p, int *size_p)
3191{
3192 EMIT_ASM32 (eq,
3193 /* Check low half first, more likely to be decider */
3194 "cmpl %eax,(%esp)\n\t"
3195 "jne .Leq_fallthru\n\t"
3196 "cmpl %ebx,4(%esp)\n\t"
3197 "jne .Leq_fallthru\n\t"
3198 "lea 0x8(%esp),%esp\n\t"
3199 "pop %eax\n\t"
3200 "pop %ebx\n\t"
3201 /* jmp, but don't trust the assembler to choose the right jump */
3202 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3203 ".Leq_fallthru:\n\t"
3204 "lea 0x8(%esp),%esp\n\t"
3205 "pop %eax\n\t"
3206 "pop %ebx");
3207
3208 if (offset_p)
3209 *offset_p = 18;
3210 if (size_p)
3211 *size_p = 4;
3212}
3213
3214void
3215i386_emit_ne_goto (int *offset_p, int *size_p)
3216{
3217 EMIT_ASM32 (ne,
3218 /* Check low half first, more likely to be decider */
3219 "cmpl %eax,(%esp)\n\t"
3220 "jne .Lne_jump\n\t"
3221 "cmpl %ebx,4(%esp)\n\t"
3222 "je .Lne_fallthru\n\t"
3223 ".Lne_jump:\n\t"
3224 "lea 0x8(%esp),%esp\n\t"
3225 "pop %eax\n\t"
3226 "pop %ebx\n\t"
3227 /* jmp, but don't trust the assembler to choose the right jump */
3228 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3229 ".Lne_fallthru:\n\t"
3230 "lea 0x8(%esp),%esp\n\t"
3231 "pop %eax\n\t"
3232 "pop %ebx");
3233
3234 if (offset_p)
3235 *offset_p = 18;
3236 if (size_p)
3237 *size_p = 4;
3238}
3239
3240void
3241i386_emit_lt_goto (int *offset_p, int *size_p)
3242{
3243 EMIT_ASM32 (lt,
3244 "cmpl %ebx,4(%esp)\n\t"
3245 "jl .Llt_jump\n\t"
3246 "jne .Llt_fallthru\n\t"
3247 "cmpl %eax,(%esp)\n\t"
3248 "jnl .Llt_fallthru\n\t"
3249 ".Llt_jump:\n\t"
3250 "lea 0x8(%esp),%esp\n\t"
3251 "pop %eax\n\t"
3252 "pop %ebx\n\t"
3253 /* jmp, but don't trust the assembler to choose the right jump */
3254 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3255 ".Llt_fallthru:\n\t"
3256 "lea 0x8(%esp),%esp\n\t"
3257 "pop %eax\n\t"
3258 "pop %ebx");
3259
3260 if (offset_p)
3261 *offset_p = 20;
3262 if (size_p)
3263 *size_p = 4;
3264}
3265
3266void
3267i386_emit_le_goto (int *offset_p, int *size_p)
3268{
3269 EMIT_ASM32 (le,
3270 "cmpl %ebx,4(%esp)\n\t"
3271 "jle .Lle_jump\n\t"
3272 "jne .Lle_fallthru\n\t"
3273 "cmpl %eax,(%esp)\n\t"
3274 "jnle .Lle_fallthru\n\t"
3275 ".Lle_jump:\n\t"
3276 "lea 0x8(%esp),%esp\n\t"
3277 "pop %eax\n\t"
3278 "pop %ebx\n\t"
3279 /* jmp, but don't trust the assembler to choose the right jump */
3280 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3281 ".Lle_fallthru:\n\t"
3282 "lea 0x8(%esp),%esp\n\t"
3283 "pop %eax\n\t"
3284 "pop %ebx");
3285
3286 if (offset_p)
3287 *offset_p = 20;
3288 if (size_p)
3289 *size_p = 4;
3290}
3291
3292void
3293i386_emit_gt_goto (int *offset_p, int *size_p)
3294{
3295 EMIT_ASM32 (gt,
3296 "cmpl %ebx,4(%esp)\n\t"
3297 "jg .Lgt_jump\n\t"
3298 "jne .Lgt_fallthru\n\t"
3299 "cmpl %eax,(%esp)\n\t"
3300 "jng .Lgt_fallthru\n\t"
3301 ".Lgt_jump:\n\t"
3302 "lea 0x8(%esp),%esp\n\t"
3303 "pop %eax\n\t"
3304 "pop %ebx\n\t"
3305 /* jmp, but don't trust the assembler to choose the right jump */
3306 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3307 ".Lgt_fallthru:\n\t"
3308 "lea 0x8(%esp),%esp\n\t"
3309 "pop %eax\n\t"
3310 "pop %ebx");
3311
3312 if (offset_p)
3313 *offset_p = 20;
3314 if (size_p)
3315 *size_p = 4;
3316}
3317
3318void
3319i386_emit_ge_goto (int *offset_p, int *size_p)
3320{
3321 EMIT_ASM32 (ge,
3322 "cmpl %ebx,4(%esp)\n\t"
3323 "jge .Lge_jump\n\t"
3324 "jne .Lge_fallthru\n\t"
3325 "cmpl %eax,(%esp)\n\t"
3326 "jnge .Lge_fallthru\n\t"
3327 ".Lge_jump:\n\t"
3328 "lea 0x8(%esp),%esp\n\t"
3329 "pop %eax\n\t"
3330 "pop %ebx\n\t"
3331 /* jmp, but don't trust the assembler to choose the right jump */
3332 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3333 ".Lge_fallthru:\n\t"
3334 "lea 0x8(%esp),%esp\n\t"
3335 "pop %eax\n\t"
3336 "pop %ebx");
3337
3338 if (offset_p)
3339 *offset_p = 20;
3340 if (size_p)
3341 *size_p = 4;
3342}
3343
6a271cae
PA
3344struct emit_ops i386_emit_ops =
3345 {
3346 i386_emit_prologue,
3347 i386_emit_epilogue,
3348 i386_emit_add,
3349 i386_emit_sub,
3350 i386_emit_mul,
3351 i386_emit_lsh,
3352 i386_emit_rsh_signed,
3353 i386_emit_rsh_unsigned,
3354 i386_emit_ext,
3355 i386_emit_log_not,
3356 i386_emit_bit_and,
3357 i386_emit_bit_or,
3358 i386_emit_bit_xor,
3359 i386_emit_bit_not,
3360 i386_emit_equal,
3361 i386_emit_less_signed,
3362 i386_emit_less_unsigned,
3363 i386_emit_ref,
3364 i386_emit_if_goto,
3365 i386_emit_goto,
3366 i386_write_goto_address,
3367 i386_emit_const,
3368 i386_emit_call,
3369 i386_emit_reg,
3370 i386_emit_pop,
3371 i386_emit_stack_flush,
3372 i386_emit_zero_ext,
3373 i386_emit_swap,
3374 i386_emit_stack_adjust,
3375 i386_emit_int_call_1,
6b9801d4
SS
3376 i386_emit_void_call_2,
3377 i386_emit_eq_goto,
3378 i386_emit_ne_goto,
3379 i386_emit_lt_goto,
3380 i386_emit_le_goto,
3381 i386_emit_gt_goto,
3382 i386_emit_ge_goto
6a271cae
PA
3383 };
3384
3385
3386static struct emit_ops *
3387x86_emit_ops (void)
3388{
3389#ifdef __x86_64__
3aee8918 3390 if (is_64bit_tdesc ())
6a271cae
PA
3391 return &amd64_emit_ops;
3392 else
3393#endif
3394 return &i386_emit_ops;
3395}
3396
c2d6af84
PA
3397static int
3398x86_supports_range_stepping (void)
3399{
3400 return 1;
3401}
3402
d0722149
DE
3403/* This is initialized assuming an amd64 target.
3404 x86_arch_setup will correct it for i386 or amd64 targets. */
3405
3406struct linux_target_ops the_low_target =
3407{
3408 x86_arch_setup,
3aee8918
PA
3409 x86_linux_regs_info,
3410 x86_cannot_fetch_register,
3411 x86_cannot_store_register,
c14dfd32 3412 NULL, /* fetch_register */
d0722149
DE
3413 x86_get_pc,
3414 x86_set_pc,
3415 x86_breakpoint,
3416 x86_breakpoint_len,
3417 NULL,
3418 1,
3419 x86_breakpoint_at,
802e8e6d 3420 x86_supports_z_point_type,
aa5ca48f
DE
3421 x86_insert_point,
3422 x86_remove_point,
3423 x86_stopped_by_watchpoint,
3424 x86_stopped_data_address,
d0722149
DE
3425 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3426 native i386 case (no registers smaller than an xfer unit), and are not
3427 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3428 NULL,
3429 NULL,
3430 /* need to fix up i386 siginfo if host is amd64 */
3431 x86_siginfo_fixup,
aa5ca48f
DE
3432 x86_linux_new_process,
3433 x86_linux_new_thread,
1570b33e 3434 x86_linux_prepare_to_resume,
219f2f23 3435 x86_linux_process_qsupported,
fa593d66
PA
3436 x86_supports_tracepoints,
3437 x86_get_thread_area,
6a271cae 3438 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
3439 x86_emit_ops,
3440 x86_get_min_fast_tracepoint_insn_len,
c2d6af84 3441 x86_supports_range_stepping,
d0722149 3442};
3aee8918
PA
3443
3444void
3445initialize_low_arch (void)
3446{
3447 /* Initialize the Linux target descriptions. */
3448#ifdef __x86_64__
3449 init_registers_amd64_linux ();
3450 init_registers_amd64_avx_linux ();
01f9f808 3451 init_registers_amd64_avx512_linux ();
a196ebeb
WT
3452 init_registers_amd64_mpx_linux ();
3453
3aee8918 3454 init_registers_x32_linux ();
7e5aaa09 3455 init_registers_x32_avx_linux ();
01f9f808 3456 init_registers_x32_avx512_linux ();
3aee8918
PA
3457
3458 tdesc_amd64_linux_no_xml = xmalloc (sizeof (struct target_desc));
3459 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
3460 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3461#endif
3462 init_registers_i386_linux ();
3463 init_registers_i386_mmx_linux ();
3464 init_registers_i386_avx_linux ();
01f9f808 3465 init_registers_i386_avx512_linux ();
a196ebeb 3466 init_registers_i386_mpx_linux ();
3aee8918
PA
3467
3468 tdesc_i386_linux_no_xml = xmalloc (sizeof (struct target_desc));
3469 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
3470 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3471
3472 initialize_regsets_info (&x86_regsets_info);
3473}
This page took 0.832454 seconds and 4 git commands to generate.