Fix gdbserver cross build.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
ecd75fc8 3 Copyright (C) 2002-2014 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
aa5ca48f 20#include <stddef.h>
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "server.h"
25#include "linux-low.h"
26#include "i387-fp.h"
aa5ca48f 27#include "i386-low.h"
1570b33e 28#include "i386-xstate.h"
d0722149
DE
29
30#include "gdb_proc_service.h"
b5737fa9
PA
31/* Don't include elf/common.h if linux/elf.h got included by
32 gdb_proc_service.h. */
33#ifndef ELFMAG0
34#include "elf/common.h"
35#endif
36
58b4daa5 37#include "agent.h"
3aee8918 38#include "tdesc.h"
c144c7a0 39#include "tracepoint.h"
f699aaba 40#include "ax.h"
d0722149 41
3aee8918 42#ifdef __x86_64__
90884b2b
L
43/* Defined in auto-generated file amd64-linux.c. */
44void init_registers_amd64_linux (void);
3aee8918
PA
45extern const struct target_desc *tdesc_amd64_linux;
46
1570b33e
L
47/* Defined in auto-generated file amd64-avx-linux.c. */
48void init_registers_amd64_avx_linux (void);
3aee8918
PA
49extern const struct target_desc *tdesc_amd64_avx_linux;
50
01f9f808
MS
51/* Defined in auto-generated file amd64-avx512-linux.c. */
52void init_registers_amd64_avx512_linux (void);
53extern const struct target_desc *tdesc_amd64_avx512_linux;
54
a196ebeb
WT
55/* Defined in auto-generated file amd64-mpx-linux.c. */
56void init_registers_amd64_mpx_linux (void);
57extern const struct target_desc *tdesc_amd64_mpx_linux;
58
4d47af5c
L
59/* Defined in auto-generated file x32-linux.c. */
60void init_registers_x32_linux (void);
3aee8918
PA
61extern const struct target_desc *tdesc_x32_linux;
62
4d47af5c
L
63/* Defined in auto-generated file x32-avx-linux.c. */
64void init_registers_x32_avx_linux (void);
3aee8918 65extern const struct target_desc *tdesc_x32_avx_linux;
a196ebeb 66
01f9f808
MS
67/* Defined in auto-generated file x32-avx512-linux.c. */
68void init_registers_x32_avx512_linux (void);
69extern const struct target_desc *tdesc_x32_avx512_linux;
70
3aee8918
PA
71#endif
72
73/* Defined in auto-generated file i386-linux.c. */
74void init_registers_i386_linux (void);
75extern const struct target_desc *tdesc_i386_linux;
76
77/* Defined in auto-generated file i386-mmx-linux.c. */
78void init_registers_i386_mmx_linux (void);
79extern const struct target_desc *tdesc_i386_mmx_linux;
80
81/* Defined in auto-generated file i386-avx-linux.c. */
82void init_registers_i386_avx_linux (void);
83extern const struct target_desc *tdesc_i386_avx_linux;
84
01f9f808
MS
85/* Defined in auto-generated file i386-avx512-linux.c. */
86void init_registers_i386_avx512_linux (void);
87extern const struct target_desc *tdesc_i386_avx512_linux;
88
a196ebeb
WT
89/* Defined in auto-generated file i386-mpx-linux.c. */
90void init_registers_i386_mpx_linux (void);
91extern const struct target_desc *tdesc_i386_mpx_linux;
92
3aee8918
PA
93#ifdef __x86_64__
94static struct target_desc *tdesc_amd64_linux_no_xml;
95#endif
96static struct target_desc *tdesc_i386_linux_no_xml;
97
1570b33e 98
fa593d66 99static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 100static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 101
1570b33e
L
102/* Backward compatibility for gdb without XML support. */
103
104static const char *xmltarget_i386_linux_no_xml = "@<target>\
105<architecture>i386</architecture>\
106<osabi>GNU/Linux</osabi>\
107</target>";
f6d1620c
L
108
109#ifdef __x86_64__
1570b33e
L
110static const char *xmltarget_amd64_linux_no_xml = "@<target>\
111<architecture>i386:x86-64</architecture>\
112<osabi>GNU/Linux</osabi>\
113</target>";
f6d1620c 114#endif
d0722149
DE
115
116#include <sys/reg.h>
117#include <sys/procfs.h>
118#include <sys/ptrace.h>
1570b33e
L
119#include <sys/uio.h>
120
121#ifndef PTRACE_GETREGSET
122#define PTRACE_GETREGSET 0x4204
123#endif
124
125#ifndef PTRACE_SETREGSET
126#define PTRACE_SETREGSET 0x4205
127#endif
128
d0722149
DE
129
130#ifndef PTRACE_GET_THREAD_AREA
131#define PTRACE_GET_THREAD_AREA 25
132#endif
133
134/* This definition comes from prctl.h, but some kernels may not have it. */
135#ifndef PTRACE_ARCH_PRCTL
136#define PTRACE_ARCH_PRCTL 30
137#endif
138
139/* The following definitions come from prctl.h, but may be absent
140 for certain configurations. */
141#ifndef ARCH_GET_FS
142#define ARCH_SET_GS 0x1001
143#define ARCH_SET_FS 0x1002
144#define ARCH_GET_FS 0x1003
145#define ARCH_GET_GS 0x1004
146#endif
147
aa5ca48f
DE
148/* Per-process arch-specific data we want to keep. */
149
150struct arch_process_info
151{
152 struct i386_debug_reg_state debug_reg_state;
153};
154
155/* Per-thread arch-specific data we want to keep. */
156
157struct arch_lwp_info
158{
159 /* Non-zero if our copy differs from what's recorded in the thread. */
160 int debug_registers_changed;
161};
162
d0722149
DE
163#ifdef __x86_64__
164
165/* Mapping between the general-purpose registers in `struct user'
166 format and GDB's register array layout.
167 Note that the transfer layout uses 64-bit regs. */
168static /*const*/ int i386_regmap[] =
169{
170 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
171 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
172 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
173 DS * 8, ES * 8, FS * 8, GS * 8
174};
175
176#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
177
178/* So code below doesn't have to care, i386 or amd64. */
179#define ORIG_EAX ORIG_RAX
180
181static const int x86_64_regmap[] =
182{
183 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
184 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
185 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
186 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
187 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
188 DS * 8, ES * 8, FS * 8, GS * 8,
189 -1, -1, -1, -1, -1, -1, -1, -1,
190 -1, -1, -1, -1, -1, -1, -1, -1,
191 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
192 -1,
193 -1, -1, -1, -1, -1, -1, -1, -1,
194 ORIG_RAX * 8,
195 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
01f9f808
MS
196 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
197 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
198 -1, -1, -1, -1, -1, -1, -1, -1,
199 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
200 -1, -1, -1, -1, -1, -1, -1, -1,
201 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
202 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
203 -1, -1, -1, -1, -1, -1, -1, -1,
204 -1, -1, -1, -1, -1, -1, -1, -1,
205 -1, -1, -1, -1, -1, -1, -1, -1
d0722149
DE
206};
207
208#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
9e0aa64f 209#define X86_64_USER_REGS (GS + 1)
d0722149
DE
210
211#else /* ! __x86_64__ */
212
213/* Mapping between the general-purpose registers in `struct user'
214 format and GDB's register array layout. */
215static /*const*/ int i386_regmap[] =
216{
217 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
218 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
219 EIP * 4, EFL * 4, CS * 4, SS * 4,
220 DS * 4, ES * 4, FS * 4, GS * 4
221};
222
223#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
224
225#endif
3aee8918
PA
226
227#ifdef __x86_64__
228
229/* Returns true if the current inferior belongs to a x86-64 process,
230 per the tdesc. */
231
232static int
233is_64bit_tdesc (void)
234{
235 struct regcache *regcache = get_thread_regcache (current_inferior, 0);
236
237 return register_size (regcache->tdesc, 0) == 8;
238}
239
240#endif
241
d0722149
DE
242\f
243/* Called by libthread_db. */
244
245ps_err_e
246ps_get_thread_area (const struct ps_prochandle *ph,
247 lwpid_t lwpid, int idx, void **base)
248{
249#ifdef __x86_64__
3aee8918 250 int use_64bit = is_64bit_tdesc ();
d0722149
DE
251
252 if (use_64bit)
253 {
254 switch (idx)
255 {
256 case FS:
257 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
258 return PS_OK;
259 break;
260 case GS:
261 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
262 return PS_OK;
263 break;
264 default:
265 return PS_BADADDR;
266 }
267 return PS_ERR;
268 }
269#endif
270
271 {
272 unsigned int desc[4];
273
274 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
275 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
276 return PS_ERR;
277
d1ec4ce7
DE
278 /* Ensure we properly extend the value to 64-bits for x86_64. */
279 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
280 return PS_OK;
281 }
282}
fa593d66
PA
283
284/* Get the thread area address. This is used to recognize which
285 thread is which when tracing with the in-process agent library. We
286 don't read anything from the address, and treat it as opaque; it's
287 the address itself that we assume is unique per-thread. */
288
289static int
290x86_get_thread_area (int lwpid, CORE_ADDR *addr)
291{
292#ifdef __x86_64__
3aee8918 293 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
294
295 if (use_64bit)
296 {
297 void *base;
298 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
299 {
300 *addr = (CORE_ADDR) (uintptr_t) base;
301 return 0;
302 }
303
304 return -1;
305 }
306#endif
307
308 {
309 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
d86d4aaf
DE
310 struct thread_info *thr = get_lwp_thread (lwp);
311 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
312 unsigned int desc[4];
313 ULONGEST gs = 0;
314 const int reg_thread_area = 3; /* bits to scale down register value. */
315 int idx;
316
317 collect_register_by_name (regcache, "gs", &gs);
318
319 idx = gs >> reg_thread_area;
320
321 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 322 lwpid_of (thr),
493e2a69 323 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
324 return -1;
325
326 *addr = desc[1];
327 return 0;
328 }
329}
330
331
d0722149
DE
332\f
333static int
3aee8918 334x86_cannot_store_register (int regno)
d0722149 335{
3aee8918
PA
336#ifdef __x86_64__
337 if (is_64bit_tdesc ())
338 return 0;
339#endif
340
d0722149
DE
341 return regno >= I386_NUM_REGS;
342}
343
344static int
3aee8918 345x86_cannot_fetch_register (int regno)
d0722149 346{
3aee8918
PA
347#ifdef __x86_64__
348 if (is_64bit_tdesc ())
349 return 0;
350#endif
351
d0722149
DE
352 return regno >= I386_NUM_REGS;
353}
354
355static void
442ea881 356x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
357{
358 int i;
359
360#ifdef __x86_64__
3aee8918 361 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
362 {
363 for (i = 0; i < X86_64_NUM_REGS; i++)
364 if (x86_64_regmap[i] != -1)
442ea881 365 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
366 return;
367 }
9e0aa64f
JK
368
369 /* 32-bit inferior registers need to be zero-extended.
370 Callers would read uninitialized memory otherwise. */
371 memset (buf, 0x00, X86_64_USER_REGS * 8);
d0722149
DE
372#endif
373
374 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 375 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 376
442ea881
PA
377 collect_register_by_name (regcache, "orig_eax",
378 ((char *) buf) + ORIG_EAX * 4);
d0722149
DE
379}
380
381static void
442ea881 382x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
383{
384 int i;
385
386#ifdef __x86_64__
3aee8918 387 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
388 {
389 for (i = 0; i < X86_64_NUM_REGS; i++)
390 if (x86_64_regmap[i] != -1)
442ea881 391 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
392 return;
393 }
394#endif
395
396 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 397 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 398
442ea881
PA
399 supply_register_by_name (regcache, "orig_eax",
400 ((char *) buf) + ORIG_EAX * 4);
d0722149
DE
401}
402
403static void
442ea881 404x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
405{
406#ifdef __x86_64__
442ea881 407 i387_cache_to_fxsave (regcache, buf);
d0722149 408#else
442ea881 409 i387_cache_to_fsave (regcache, buf);
d0722149
DE
410#endif
411}
412
413static void
442ea881 414x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
415{
416#ifdef __x86_64__
442ea881 417 i387_fxsave_to_cache (regcache, buf);
d0722149 418#else
442ea881 419 i387_fsave_to_cache (regcache, buf);
d0722149
DE
420#endif
421}
422
423#ifndef __x86_64__
424
425static void
442ea881 426x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 427{
442ea881 428 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
429}
430
431static void
442ea881 432x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 433{
442ea881 434 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
435}
436
437#endif
438
1570b33e
L
439static void
440x86_fill_xstateregset (struct regcache *regcache, void *buf)
441{
442 i387_cache_to_xsave (regcache, buf);
443}
444
445static void
446x86_store_xstateregset (struct regcache *regcache, const void *buf)
447{
448 i387_xsave_to_cache (regcache, buf);
449}
450
d0722149
DE
451/* ??? The non-biarch i386 case stores all the i387 regs twice.
452 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
453 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
454 doesn't work. IWBN to avoid the duplication in the case where it
455 does work. Maybe the arch_setup routine could check whether it works
3aee8918 456 and update the supported regsets accordingly. */
d0722149 457
3aee8918 458static struct regset_info x86_regsets[] =
d0722149
DE
459{
460#ifdef HAVE_PTRACE_GETREGS
1570b33e 461 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
462 GENERAL_REGS,
463 x86_fill_gregset, x86_store_gregset },
1570b33e
L
464 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
465 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
466# ifndef __x86_64__
467# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 468 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
469 EXTENDED_REGS,
470 x86_fill_fpxregset, x86_store_fpxregset },
471# endif
472# endif
1570b33e 473 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
474 FP_REGS,
475 x86_fill_fpregset, x86_store_fpregset },
476#endif /* HAVE_PTRACE_GETREGS */
1570b33e 477 { 0, 0, 0, -1, -1, NULL, NULL }
d0722149
DE
478};
479
480static CORE_ADDR
442ea881 481x86_get_pc (struct regcache *regcache)
d0722149 482{
3aee8918 483 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
484
485 if (use_64bit)
486 {
487 unsigned long pc;
442ea881 488 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
489 return (CORE_ADDR) pc;
490 }
491 else
492 {
493 unsigned int pc;
442ea881 494 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
495 return (CORE_ADDR) pc;
496 }
497}
498
499static void
442ea881 500x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
d0722149 501{
3aee8918 502 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
503
504 if (use_64bit)
505 {
506 unsigned long newpc = pc;
442ea881 507 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
508 }
509 else
510 {
511 unsigned int newpc = pc;
442ea881 512 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
513 }
514}
515\f
516static const unsigned char x86_breakpoint[] = { 0xCC };
517#define x86_breakpoint_len 1
518
519static int
520x86_breakpoint_at (CORE_ADDR pc)
521{
522 unsigned char c;
523
fc7238bb 524 (*the_target->read_memory) (pc, &c, 1);
d0722149
DE
525 if (c == 0xCC)
526 return 1;
527
528 return 0;
529}
530\f
aa5ca48f
DE
531/* Support for debug registers. */
532
533static unsigned long
534x86_linux_dr_get (ptid_t ptid, int regnum)
535{
536 int tid;
537 unsigned long value;
538
539 tid = ptid_get_lwp (ptid);
540
541 errno = 0;
542 value = ptrace (PTRACE_PEEKUSER, tid,
543 offsetof (struct user, u_debugreg[regnum]), 0);
544 if (errno != 0)
545 error ("Couldn't read debug register");
546
547 return value;
548}
549
550static void
551x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
552{
553 int tid;
554
555 tid = ptid_get_lwp (ptid);
556
557 errno = 0;
558 ptrace (PTRACE_POKEUSER, tid,
559 offsetof (struct user, u_debugreg[regnum]), value);
560 if (errno != 0)
561 error ("Couldn't write debug register");
562}
563
964e4306
PA
564static int
565update_debug_registers_callback (struct inferior_list_entry *entry,
566 void *pid_p)
567{
d86d4aaf
DE
568 struct thread_info *thr = (struct thread_info *) entry;
569 struct lwp_info *lwp = get_thread_lwp (thr);
964e4306
PA
570 int pid = *(int *) pid_p;
571
572 /* Only update the threads of this process. */
d86d4aaf 573 if (pid_of (thr) == pid)
964e4306
PA
574 {
575 /* The actual update is done later just before resuming the lwp,
576 we just mark that the registers need updating. */
577 lwp->arch_private->debug_registers_changed = 1;
578
579 /* If the lwp isn't stopped, force it to momentarily pause, so
580 we can update its debug registers. */
581 if (!lwp->stopped)
582 linux_stop_lwp (lwp);
583 }
584
585 return 0;
586}
587
aa5ca48f
DE
588/* Update the inferior's debug register REGNUM from STATE. */
589
590void
591i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum)
592{
964e4306 593 /* Only update the threads of this process. */
d86d4aaf 594 int pid = pid_of (current_inferior);
aa5ca48f
DE
595
596 if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
597 fatal ("Invalid debug register %d", regnum);
598
d86d4aaf 599 find_inferior (&all_threads, update_debug_registers_callback, &pid);
964e4306 600}
aa5ca48f 601
964e4306 602/* Return the inferior's debug register REGNUM. */
aa5ca48f 603
964e4306
PA
604CORE_ADDR
605i386_dr_low_get_addr (int regnum)
606{
d86d4aaf 607 ptid_t ptid = ptid_of (current_inferior);
964e4306
PA
608
609 /* DR6 and DR7 are retrieved with some other way. */
0a5b1e09 610 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
964e4306
PA
611
612 return x86_linux_dr_get (ptid, regnum);
aa5ca48f
DE
613}
614
615/* Update the inferior's DR7 debug control register from STATE. */
616
617void
618i386_dr_low_set_control (const struct i386_debug_reg_state *state)
619{
964e4306 620 /* Only update the threads of this process. */
d86d4aaf 621 int pid = pid_of (current_inferior);
aa5ca48f 622
d86d4aaf 623 find_inferior (&all_threads, update_debug_registers_callback, &pid);
964e4306 624}
aa5ca48f 625
964e4306
PA
626/* Return the inferior's DR7 debug control register. */
627
628unsigned
629i386_dr_low_get_control (void)
630{
d86d4aaf 631 ptid_t ptid = ptid_of (current_inferior);
964e4306
PA
632
633 return x86_linux_dr_get (ptid, DR_CONTROL);
aa5ca48f
DE
634}
635
636/* Get the value of the DR6 debug status register from the inferior
637 and record it in STATE. */
638
964e4306
PA
639unsigned
640i386_dr_low_get_status (void)
aa5ca48f 641{
d86d4aaf 642 ptid_t ptid = ptid_of (current_inferior);
aa5ca48f 643
964e4306 644 return x86_linux_dr_get (ptid, DR_STATUS);
aa5ca48f
DE
645}
646\f
90d74c30 647/* Breakpoint/Watchpoint support. */
aa5ca48f
DE
648
649static int
802e8e6d
PA
650x86_supports_z_point_type (char z_type)
651{
652 switch (z_type)
653 {
654 case Z_PACKET_SW_BP:
655 case Z_PACKET_HW_BP:
656 case Z_PACKET_WRITE_WP:
657 case Z_PACKET_ACCESS_WP:
658 return 1;
659 default:
660 return 0;
661 }
662}
663
664static int
665x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
666 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
667{
668 struct process_info *proc = current_process ();
802e8e6d 669
aa5ca48f
DE
670 switch (type)
671 {
802e8e6d
PA
672 case raw_bkpt_type_sw:
673 return insert_memory_breakpoint (bp);
674
675 case raw_bkpt_type_hw:
676 case raw_bkpt_type_write_wp:
677 case raw_bkpt_type_access_wp:
a4165e94 678 {
802e8e6d
PA
679 enum target_hw_bp_type hw_type
680 = raw_bkpt_type_to_target_hw_bp_type (type);
a4165e94
PA
681 struct i386_debug_reg_state *state
682 = &proc->private->arch_private->debug_reg_state;
683
4be83cc2 684 return i386_dr_insert_watchpoint (state, hw_type, addr, size);
a4165e94 685 }
961bd387 686
aa5ca48f
DE
687 default:
688 /* Unsupported. */
689 return 1;
690 }
691}
692
693static int
802e8e6d
PA
694x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
695 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
696{
697 struct process_info *proc = current_process ();
802e8e6d 698
aa5ca48f
DE
699 switch (type)
700 {
802e8e6d
PA
701 case raw_bkpt_type_sw:
702 return remove_memory_breakpoint (bp);
703
704 case raw_bkpt_type_hw:
705 case raw_bkpt_type_write_wp:
706 case raw_bkpt_type_access_wp:
a4165e94 707 {
802e8e6d
PA
708 enum target_hw_bp_type hw_type
709 = raw_bkpt_type_to_target_hw_bp_type (type);
a4165e94
PA
710 struct i386_debug_reg_state *state
711 = &proc->private->arch_private->debug_reg_state;
712
4be83cc2 713 return i386_dr_remove_watchpoint (state, hw_type, addr, size);
a4165e94 714 }
aa5ca48f
DE
715 default:
716 /* Unsupported. */
717 return 1;
718 }
719}
720
721static int
722x86_stopped_by_watchpoint (void)
723{
724 struct process_info *proc = current_process ();
4be83cc2 725 return i386_dr_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
aa5ca48f
DE
726}
727
728static CORE_ADDR
729x86_stopped_data_address (void)
730{
731 struct process_info *proc = current_process ();
732 CORE_ADDR addr;
4be83cc2
GB
733 if (i386_dr_stopped_data_address (&proc->private->arch_private->debug_reg_state,
734 &addr))
aa5ca48f
DE
735 return addr;
736 return 0;
737}
738\f
739/* Called when a new process is created. */
740
741static struct arch_process_info *
742x86_linux_new_process (void)
743{
744 struct arch_process_info *info = xcalloc (1, sizeof (*info));
745
746 i386_low_init_dregs (&info->debug_reg_state);
747
748 return info;
749}
750
751/* Called when a new thread is detected. */
752
753static struct arch_lwp_info *
754x86_linux_new_thread (void)
755{
756 struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
757
758 info->debug_registers_changed = 1;
759
760 return info;
761}
762
763/* Called when resuming a thread.
764 If the debug regs have changed, update the thread's copies. */
765
766static void
767x86_linux_prepare_to_resume (struct lwp_info *lwp)
768{
d86d4aaf 769 ptid_t ptid = ptid_of (get_lwp_thread (lwp));
6210a125 770 int clear_status = 0;
b9a881c2 771
aa5ca48f
DE
772 if (lwp->arch_private->debug_registers_changed)
773 {
774 int i;
aa5ca48f
DE
775 int pid = ptid_get_pid (ptid);
776 struct process_info *proc = find_process_pid (pid);
493e2a69
MS
777 struct i386_debug_reg_state *state
778 = &proc->private->arch_private->debug_reg_state;
aa5ca48f
DE
779
780 for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
6210a125
PA
781 if (state->dr_ref_count[i] > 0)
782 {
783 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
784
785 /* If we're setting a watchpoint, any change the inferior
786 had done itself to the debug registers needs to be
4be83cc2 787 discarded, otherwise, i386_dr_stopped_data_address can
6210a125
PA
788 get confused. */
789 clear_status = 1;
790 }
aa5ca48f
DE
791
792 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
793
794 lwp->arch_private->debug_registers_changed = 0;
795 }
b9a881c2 796
6210a125 797 if (clear_status || lwp->stopped_by_watchpoint)
b9a881c2 798 x86_linux_dr_set (ptid, DR_STATUS, 0);
aa5ca48f
DE
799}
800\f
d0722149
DE
801/* When GDBSERVER is built as a 64-bit application on linux, the
802 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
803 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
804 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
805 conversion in-place ourselves. */
806
807/* These types below (compat_*) define a siginfo type that is layout
808 compatible with the siginfo type exported by the 32-bit userspace
809 support. */
810
811#ifdef __x86_64__
812
813typedef int compat_int_t;
814typedef unsigned int compat_uptr_t;
815
816typedef int compat_time_t;
817typedef int compat_timer_t;
818typedef int compat_clock_t;
819
820struct compat_timeval
821{
822 compat_time_t tv_sec;
823 int tv_usec;
824};
825
826typedef union compat_sigval
827{
828 compat_int_t sival_int;
829 compat_uptr_t sival_ptr;
830} compat_sigval_t;
831
832typedef struct compat_siginfo
833{
834 int si_signo;
835 int si_errno;
836 int si_code;
837
838 union
839 {
840 int _pad[((128 / sizeof (int)) - 3)];
841
842 /* kill() */
843 struct
844 {
845 unsigned int _pid;
846 unsigned int _uid;
847 } _kill;
848
849 /* POSIX.1b timers */
850 struct
851 {
852 compat_timer_t _tid;
853 int _overrun;
854 compat_sigval_t _sigval;
855 } _timer;
856
857 /* POSIX.1b signals */
858 struct
859 {
860 unsigned int _pid;
861 unsigned int _uid;
862 compat_sigval_t _sigval;
863 } _rt;
864
865 /* SIGCHLD */
866 struct
867 {
868 unsigned int _pid;
869 unsigned int _uid;
870 int _status;
871 compat_clock_t _utime;
872 compat_clock_t _stime;
873 } _sigchld;
874
875 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
876 struct
877 {
878 unsigned int _addr;
879 } _sigfault;
880
881 /* SIGPOLL */
882 struct
883 {
884 int _band;
885 int _fd;
886 } _sigpoll;
887 } _sifields;
888} compat_siginfo_t;
889
c92b5177
L
890/* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
891typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
892
893typedef struct compat_x32_siginfo
894{
895 int si_signo;
896 int si_errno;
897 int si_code;
898
899 union
900 {
901 int _pad[((128 / sizeof (int)) - 3)];
902
903 /* kill() */
904 struct
905 {
906 unsigned int _pid;
907 unsigned int _uid;
908 } _kill;
909
910 /* POSIX.1b timers */
911 struct
912 {
913 compat_timer_t _tid;
914 int _overrun;
915 compat_sigval_t _sigval;
916 } _timer;
917
918 /* POSIX.1b signals */
919 struct
920 {
921 unsigned int _pid;
922 unsigned int _uid;
923 compat_sigval_t _sigval;
924 } _rt;
925
926 /* SIGCHLD */
927 struct
928 {
929 unsigned int _pid;
930 unsigned int _uid;
931 int _status;
932 compat_x32_clock_t _utime;
933 compat_x32_clock_t _stime;
934 } _sigchld;
935
936 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
937 struct
938 {
939 unsigned int _addr;
940 } _sigfault;
941
942 /* SIGPOLL */
943 struct
944 {
945 int _band;
946 int _fd;
947 } _sigpoll;
948 } _sifields;
949} compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
950
d0722149
DE
951#define cpt_si_pid _sifields._kill._pid
952#define cpt_si_uid _sifields._kill._uid
953#define cpt_si_timerid _sifields._timer._tid
954#define cpt_si_overrun _sifields._timer._overrun
955#define cpt_si_status _sifields._sigchld._status
956#define cpt_si_utime _sifields._sigchld._utime
957#define cpt_si_stime _sifields._sigchld._stime
958#define cpt_si_ptr _sifields._rt._sigval.sival_ptr
959#define cpt_si_addr _sifields._sigfault._addr
960#define cpt_si_band _sifields._sigpoll._band
961#define cpt_si_fd _sifields._sigpoll._fd
962
963/* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
964 In their place is si_timer1,si_timer2. */
965#ifndef si_timerid
966#define si_timerid si_timer1
967#endif
968#ifndef si_overrun
969#define si_overrun si_timer2
970#endif
971
972static void
973compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
974{
975 memset (to, 0, sizeof (*to));
976
977 to->si_signo = from->si_signo;
978 to->si_errno = from->si_errno;
979 to->si_code = from->si_code;
980
b53a1623 981 if (to->si_code == SI_TIMER)
d0722149 982 {
b53a1623
PA
983 to->cpt_si_timerid = from->si_timerid;
984 to->cpt_si_overrun = from->si_overrun;
d0722149
DE
985 to->cpt_si_ptr = (intptr_t) from->si_ptr;
986 }
987 else if (to->si_code == SI_USER)
988 {
989 to->cpt_si_pid = from->si_pid;
990 to->cpt_si_uid = from->si_uid;
991 }
b53a1623 992 else if (to->si_code < 0)
d0722149 993 {
b53a1623
PA
994 to->cpt_si_pid = from->si_pid;
995 to->cpt_si_uid = from->si_uid;
d0722149
DE
996 to->cpt_si_ptr = (intptr_t) from->si_ptr;
997 }
998 else
999 {
1000 switch (to->si_signo)
1001 {
1002 case SIGCHLD:
1003 to->cpt_si_pid = from->si_pid;
1004 to->cpt_si_uid = from->si_uid;
1005 to->cpt_si_status = from->si_status;
1006 to->cpt_si_utime = from->si_utime;
1007 to->cpt_si_stime = from->si_stime;
1008 break;
1009 case SIGILL:
1010 case SIGFPE:
1011 case SIGSEGV:
1012 case SIGBUS:
1013 to->cpt_si_addr = (intptr_t) from->si_addr;
1014 break;
1015 case SIGPOLL:
1016 to->cpt_si_band = from->si_band;
1017 to->cpt_si_fd = from->si_fd;
1018 break;
1019 default:
1020 to->cpt_si_pid = from->si_pid;
1021 to->cpt_si_uid = from->si_uid;
1022 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1023 break;
1024 }
1025 }
1026}
1027
1028static void
1029siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
1030{
1031 memset (to, 0, sizeof (*to));
1032
1033 to->si_signo = from->si_signo;
1034 to->si_errno = from->si_errno;
1035 to->si_code = from->si_code;
1036
b53a1623 1037 if (to->si_code == SI_TIMER)
d0722149 1038 {
b53a1623
PA
1039 to->si_timerid = from->cpt_si_timerid;
1040 to->si_overrun = from->cpt_si_overrun;
d0722149
DE
1041 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1042 }
1043 else if (to->si_code == SI_USER)
1044 {
1045 to->si_pid = from->cpt_si_pid;
1046 to->si_uid = from->cpt_si_uid;
1047 }
b53a1623 1048 else if (to->si_code < 0)
d0722149 1049 {
b53a1623
PA
1050 to->si_pid = from->cpt_si_pid;
1051 to->si_uid = from->cpt_si_uid;
d0722149
DE
1052 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1053 }
1054 else
1055 {
1056 switch (to->si_signo)
1057 {
1058 case SIGCHLD:
1059 to->si_pid = from->cpt_si_pid;
1060 to->si_uid = from->cpt_si_uid;
1061 to->si_status = from->cpt_si_status;
1062 to->si_utime = from->cpt_si_utime;
1063 to->si_stime = from->cpt_si_stime;
1064 break;
1065 case SIGILL:
1066 case SIGFPE:
1067 case SIGSEGV:
1068 case SIGBUS:
1069 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1070 break;
1071 case SIGPOLL:
1072 to->si_band = from->cpt_si_band;
1073 to->si_fd = from->cpt_si_fd;
1074 break;
1075 default:
1076 to->si_pid = from->cpt_si_pid;
1077 to->si_uid = from->cpt_si_uid;
1078 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1079 break;
1080 }
1081 }
1082}
1083
c92b5177
L
1084static void
1085compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
1086 siginfo_t *from)
1087{
1088 memset (to, 0, sizeof (*to));
1089
1090 to->si_signo = from->si_signo;
1091 to->si_errno = from->si_errno;
1092 to->si_code = from->si_code;
1093
1094 if (to->si_code == SI_TIMER)
1095 {
1096 to->cpt_si_timerid = from->si_timerid;
1097 to->cpt_si_overrun = from->si_overrun;
1098 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1099 }
1100 else if (to->si_code == SI_USER)
1101 {
1102 to->cpt_si_pid = from->si_pid;
1103 to->cpt_si_uid = from->si_uid;
1104 }
1105 else if (to->si_code < 0)
1106 {
1107 to->cpt_si_pid = from->si_pid;
1108 to->cpt_si_uid = from->si_uid;
1109 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1110 }
1111 else
1112 {
1113 switch (to->si_signo)
1114 {
1115 case SIGCHLD:
1116 to->cpt_si_pid = from->si_pid;
1117 to->cpt_si_uid = from->si_uid;
1118 to->cpt_si_status = from->si_status;
1119 to->cpt_si_utime = from->si_utime;
1120 to->cpt_si_stime = from->si_stime;
1121 break;
1122 case SIGILL:
1123 case SIGFPE:
1124 case SIGSEGV:
1125 case SIGBUS:
1126 to->cpt_si_addr = (intptr_t) from->si_addr;
1127 break;
1128 case SIGPOLL:
1129 to->cpt_si_band = from->si_band;
1130 to->cpt_si_fd = from->si_fd;
1131 break;
1132 default:
1133 to->cpt_si_pid = from->si_pid;
1134 to->cpt_si_uid = from->si_uid;
1135 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1136 break;
1137 }
1138 }
1139}
1140
1141static void
1142siginfo_from_compat_x32_siginfo (siginfo_t *to,
1143 compat_x32_siginfo_t *from)
1144{
1145 memset (to, 0, sizeof (*to));
1146
1147 to->si_signo = from->si_signo;
1148 to->si_errno = from->si_errno;
1149 to->si_code = from->si_code;
1150
1151 if (to->si_code == SI_TIMER)
1152 {
1153 to->si_timerid = from->cpt_si_timerid;
1154 to->si_overrun = from->cpt_si_overrun;
1155 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1156 }
1157 else if (to->si_code == SI_USER)
1158 {
1159 to->si_pid = from->cpt_si_pid;
1160 to->si_uid = from->cpt_si_uid;
1161 }
1162 else if (to->si_code < 0)
1163 {
1164 to->si_pid = from->cpt_si_pid;
1165 to->si_uid = from->cpt_si_uid;
1166 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1167 }
1168 else
1169 {
1170 switch (to->si_signo)
1171 {
1172 case SIGCHLD:
1173 to->si_pid = from->cpt_si_pid;
1174 to->si_uid = from->cpt_si_uid;
1175 to->si_status = from->cpt_si_status;
1176 to->si_utime = from->cpt_si_utime;
1177 to->si_stime = from->cpt_si_stime;
1178 break;
1179 case SIGILL:
1180 case SIGFPE:
1181 case SIGSEGV:
1182 case SIGBUS:
1183 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1184 break;
1185 case SIGPOLL:
1186 to->si_band = from->cpt_si_band;
1187 to->si_fd = from->cpt_si_fd;
1188 break;
1189 default:
1190 to->si_pid = from->cpt_si_pid;
1191 to->si_uid = from->cpt_si_uid;
1192 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1193 break;
1194 }
1195 }
1196}
1197
d0722149
DE
1198#endif /* __x86_64__ */
1199
1200/* Convert a native/host siginfo object, into/from the siginfo in the
1201 layout of the inferiors' architecture. Returns true if any
1202 conversion was done; false otherwise. If DIRECTION is 1, then copy
1203 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1204 INF. */
1205
1206static int
a5362b9a 1207x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
d0722149
DE
1208{
1209#ifdef __x86_64__
760256f9 1210 unsigned int machine;
d86d4aaf 1211 int tid = lwpid_of (current_inferior);
760256f9
PA
1212 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1213
d0722149 1214 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 1215 if (!is_64bit_tdesc ())
d0722149 1216 {
a5362b9a 1217 if (sizeof (siginfo_t) != sizeof (compat_siginfo_t))
9f1036c1 1218 fatal ("unexpected difference in siginfo");
d0722149
DE
1219
1220 if (direction == 0)
1221 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
1222 else
1223 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
1224
c92b5177
L
1225 return 1;
1226 }
1227 /* No fixup for native x32 GDB. */
760256f9 1228 else if (!is_elf64 && sizeof (void *) == 8)
c92b5177
L
1229 {
1230 if (sizeof (siginfo_t) != sizeof (compat_x32_siginfo_t))
1231 fatal ("unexpected difference in siginfo");
1232
1233 if (direction == 0)
1234 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
1235 native);
1236 else
1237 siginfo_from_compat_x32_siginfo (native,
1238 (struct compat_x32_siginfo *) inf);
1239
d0722149
DE
1240 return 1;
1241 }
1242#endif
1243
1244 return 0;
1245}
1246\f
1570b33e
L
1247static int use_xml;
1248
3aee8918
PA
1249/* Format of XSAVE extended state is:
1250 struct
1251 {
1252 fxsave_bytes[0..463]
1253 sw_usable_bytes[464..511]
1254 xstate_hdr_bytes[512..575]
1255 avx_bytes[576..831]
1256 future_state etc
1257 };
1258
1259 Same memory layout will be used for the coredump NT_X86_XSTATE
1260 representing the XSAVE extended state registers.
1261
1262 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1263 extended state mask, which is the same as the extended control register
1264 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1265 together with the mask saved in the xstate_hdr_bytes to determine what
1266 states the processor/OS supports and what state, used or initialized,
1267 the process/thread is in. */
1268#define I386_LINUX_XSAVE_XCR0_OFFSET 464
1269
1270/* Does the current host support the GETFPXREGS request? The header
1271 file may or may not define it, and even if it is defined, the
1272 kernel will return EIO if it's running on a pre-SSE processor. */
1273int have_ptrace_getfpxregs =
1274#ifdef HAVE_PTRACE_GETFPXREGS
1275 -1
1276#else
1277 0
1278#endif
1279;
1570b33e 1280
3aee8918
PA
1281/* Does the current host support PTRACE_GETREGSET? */
1282static int have_ptrace_getregset = -1;
1283
1284/* Get Linux/x86 target description from running target. */
1285
1286static const struct target_desc *
1287x86_linux_read_description (void)
1570b33e 1288{
3aee8918
PA
1289 unsigned int machine;
1290 int is_elf64;
a196ebeb 1291 int xcr0_features;
3aee8918
PA
1292 int tid;
1293 static uint64_t xcr0;
3a13a53b 1294 struct regset_info *regset;
1570b33e 1295
d86d4aaf 1296 tid = lwpid_of (current_inferior);
1570b33e 1297
3aee8918 1298 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 1299
3aee8918 1300 if (sizeof (void *) == 4)
3a13a53b 1301 {
3aee8918
PA
1302 if (is_elf64 > 0)
1303 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1304#ifndef __x86_64__
1305 else if (machine == EM_X86_64)
1306 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1307#endif
1308 }
3a13a53b 1309
3aee8918
PA
1310#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1311 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
1312 {
1313 elf_fpxregset_t fpxregs;
3a13a53b 1314
3aee8918 1315 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 1316 {
3aee8918
PA
1317 have_ptrace_getfpxregs = 0;
1318 have_ptrace_getregset = 0;
1319 return tdesc_i386_mmx_linux;
3a13a53b 1320 }
3aee8918
PA
1321 else
1322 have_ptrace_getfpxregs = 1;
3a13a53b 1323 }
1570b33e
L
1324#endif
1325
1326 if (!use_xml)
1327 {
3aee8918
PA
1328 x86_xcr0 = I386_XSTATE_SSE_MASK;
1329
1570b33e
L
1330 /* Don't use XML. */
1331#ifdef __x86_64__
3aee8918
PA
1332 if (machine == EM_X86_64)
1333 return tdesc_amd64_linux_no_xml;
1570b33e 1334 else
1570b33e 1335#endif
3aee8918 1336 return tdesc_i386_linux_no_xml;
1570b33e
L
1337 }
1338
1570b33e
L
1339 if (have_ptrace_getregset == -1)
1340 {
3aee8918 1341 uint64_t xstateregs[(I386_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 1342 struct iovec iov;
1570b33e
L
1343
1344 iov.iov_base = xstateregs;
1345 iov.iov_len = sizeof (xstateregs);
1346
1347 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
1348 if (ptrace (PTRACE_GETREGSET, tid,
1349 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
1350 have_ptrace_getregset = 0;
1351 else
1570b33e 1352 {
3aee8918
PA
1353 have_ptrace_getregset = 1;
1354
1355 /* Get XCR0 from XSAVE extended state. */
1356 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
1357 / sizeof (uint64_t))];
1358
1359 /* Use PTRACE_GETREGSET if it is available. */
1360 for (regset = x86_regsets;
1361 regset->fill_function != NULL; regset++)
1362 if (regset->get_request == PTRACE_GETREGSET)
1363 regset->size = I386_XSTATE_SIZE (xcr0);
1364 else if (regset->type != GENERAL_REGS)
1365 regset->size = 0;
1570b33e 1366 }
1570b33e
L
1367 }
1368
3aee8918 1369 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb
WT
1370 xcr0_features = (have_ptrace_getregset
1371 && (xcr0 & I386_XSTATE_ALL_MASK));
3aee8918 1372
a196ebeb 1373 if (xcr0_features)
3aee8918 1374 x86_xcr0 = xcr0;
1570b33e 1375
3aee8918
PA
1376 if (machine == EM_X86_64)
1377 {
1570b33e 1378#ifdef __x86_64__
a196ebeb 1379 if (is_elf64)
3aee8918 1380 {
a196ebeb
WT
1381 if (xcr0_features)
1382 {
1383 switch (xcr0 & I386_XSTATE_ALL_MASK)
1384 {
01f9f808
MS
1385 case I386_XSTATE_AVX512_MASK:
1386 return tdesc_amd64_avx512_linux;
1387
a196ebeb
WT
1388 case I386_XSTATE_MPX_MASK:
1389 return tdesc_amd64_mpx_linux;
1390
1391 case I386_XSTATE_AVX_MASK:
1392 return tdesc_amd64_avx_linux;
1393
1394 default:
1395 return tdesc_amd64_linux;
1396 }
1397 }
4d47af5c 1398 else
a196ebeb 1399 return tdesc_amd64_linux;
3aee8918
PA
1400 }
1401 else
1402 {
a196ebeb
WT
1403 if (xcr0_features)
1404 {
1405 switch (xcr0 & I386_XSTATE_ALL_MASK)
1406 {
01f9f808
MS
1407 case I386_XSTATE_AVX512_MASK:
1408 return tdesc_x32_avx512_linux;
1409
a196ebeb
WT
1410 case I386_XSTATE_MPX_MASK: /* No MPX on x32. */
1411 case I386_XSTATE_AVX_MASK:
1412 return tdesc_x32_avx_linux;
1413
1414 default:
1415 return tdesc_x32_linux;
1416 }
1417 }
3aee8918 1418 else
a196ebeb 1419 return tdesc_x32_linux;
1570b33e 1420 }
3aee8918 1421#endif
1570b33e 1422 }
3aee8918
PA
1423 else
1424 {
a196ebeb
WT
1425 if (xcr0_features)
1426 {
1427 switch (xcr0 & I386_XSTATE_ALL_MASK)
1428 {
01f9f808
MS
1429 case (I386_XSTATE_AVX512_MASK):
1430 return tdesc_i386_avx512_linux;
1431
a196ebeb
WT
1432 case (I386_XSTATE_MPX_MASK):
1433 return tdesc_i386_mpx_linux;
1434
1435 case (I386_XSTATE_AVX_MASK):
1436 return tdesc_i386_avx_linux;
1437
1438 default:
1439 return tdesc_i386_linux;
1440 }
1441 }
3aee8918
PA
1442 else
1443 return tdesc_i386_linux;
1444 }
1445
1446 gdb_assert_not_reached ("failed to return tdesc");
1447}
1448
1449/* Callback for find_inferior. Stops iteration when a thread with a
1450 given PID is found. */
1451
1452static int
1453same_process_callback (struct inferior_list_entry *entry, void *data)
1454{
1455 int pid = *(int *) data;
1456
1457 return (ptid_get_pid (entry->id) == pid);
1458}
1459
1460/* Callback for for_each_inferior. Calls the arch_setup routine for
1461 each process. */
1462
1463static void
1464x86_arch_setup_process_callback (struct inferior_list_entry *entry)
1465{
1466 int pid = ptid_get_pid (entry->id);
1467
1468 /* Look up any thread of this processes. */
1469 current_inferior
1470 = (struct thread_info *) find_inferior (&all_threads,
1471 same_process_callback, &pid);
1472
1473 the_low_target.arch_setup ();
1474}
1475
1476/* Update all the target description of all processes; a new GDB
1477 connected, and it may or not support xml target descriptions. */
1478
1479static void
1480x86_linux_update_xmltarget (void)
1481{
1482 struct thread_info *save_inferior = current_inferior;
1483
1484 /* Before changing the register cache's internal layout, flush the
1485 contents of the current valid caches back to the threads, and
1486 release the current regcache objects. */
1487 regcache_release ();
1488
1489 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
1490
1491 current_inferior = save_inferior;
1570b33e
L
1492}
1493
1494/* Process qSupported query, "xmlRegisters=". Update the buffer size for
1495 PTRACE_GETREGSET. */
1496
1497static void
1498x86_linux_process_qsupported (const char *query)
1499{
1500 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1501 with "i386" in qSupported query, it supports x86 XML target
1502 descriptions. */
1503 use_xml = 0;
1504 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1505 {
1506 char *copy = xstrdup (query + 13);
1507 char *p;
1508
1509 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1510 {
1511 if (strcmp (p, "i386") == 0)
1512 {
1513 use_xml = 1;
1514 break;
1515 }
1516 }
1517
1518 free (copy);
1519 }
1520
1521 x86_linux_update_xmltarget ();
1522}
1523
3aee8918 1524/* Common for x86/x86-64. */
d0722149 1525
3aee8918
PA
1526static struct regsets_info x86_regsets_info =
1527 {
1528 x86_regsets, /* regsets */
1529 0, /* num_regsets */
1530 NULL, /* disabled_regsets */
1531 };
214d508e
L
1532
1533#ifdef __x86_64__
3aee8918
PA
1534static struct regs_info amd64_linux_regs_info =
1535 {
1536 NULL, /* regset_bitmap */
1537 NULL, /* usrregs_info */
1538 &x86_regsets_info
1539 };
d0722149 1540#endif
3aee8918
PA
1541static struct usrregs_info i386_linux_usrregs_info =
1542 {
1543 I386_NUM_REGS,
1544 i386_regmap,
1545 };
d0722149 1546
3aee8918
PA
1547static struct regs_info i386_linux_regs_info =
1548 {
1549 NULL, /* regset_bitmap */
1550 &i386_linux_usrregs_info,
1551 &x86_regsets_info
1552 };
d0722149 1553
3aee8918
PA
1554const struct regs_info *
1555x86_linux_regs_info (void)
1556{
1557#ifdef __x86_64__
1558 if (is_64bit_tdesc ())
1559 return &amd64_linux_regs_info;
1560 else
1561#endif
1562 return &i386_linux_regs_info;
1563}
d0722149 1564
3aee8918
PA
1565/* Initialize the target description for the architecture of the
1566 inferior. */
1570b33e 1567
3aee8918
PA
1568static void
1569x86_arch_setup (void)
1570{
1571 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1572}
1573
219f2f23
PA
1574static int
1575x86_supports_tracepoints (void)
1576{
1577 return 1;
1578}
1579
fa593d66
PA
1580static void
1581append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1582{
1583 write_inferior_memory (*to, buf, len);
1584 *to += len;
1585}
1586
1587static int
1588push_opcode (unsigned char *buf, char *op)
1589{
1590 unsigned char *buf_org = buf;
1591
1592 while (1)
1593 {
1594 char *endptr;
1595 unsigned long ul = strtoul (op, &endptr, 16);
1596
1597 if (endptr == op)
1598 break;
1599
1600 *buf++ = ul;
1601 op = endptr;
1602 }
1603
1604 return buf - buf_org;
1605}
1606
1607#ifdef __x86_64__
1608
1609/* Build a jump pad that saves registers and calls a collection
1610 function. Writes a jump instruction to the jump pad to
1611 JJUMPAD_INSN. The caller is responsible to write it in at the
1612 tracepoint address. */
1613
1614static int
1615amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1616 CORE_ADDR collector,
1617 CORE_ADDR lockaddr,
1618 ULONGEST orig_size,
1619 CORE_ADDR *jump_entry,
405f8e94
SS
1620 CORE_ADDR *trampoline,
1621 ULONGEST *trampoline_size,
fa593d66
PA
1622 unsigned char *jjump_pad_insn,
1623 ULONGEST *jjump_pad_insn_size,
1624 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1625 CORE_ADDR *adjusted_insn_addr_end,
1626 char *err)
fa593d66
PA
1627{
1628 unsigned char buf[40];
1629 int i, offset;
f4647387
YQ
1630 int64_t loffset;
1631
fa593d66
PA
1632 CORE_ADDR buildaddr = *jump_entry;
1633
1634 /* Build the jump pad. */
1635
1636 /* First, do tracepoint data collection. Save registers. */
1637 i = 0;
1638 /* Need to ensure stack pointer saved first. */
1639 buf[i++] = 0x54; /* push %rsp */
1640 buf[i++] = 0x55; /* push %rbp */
1641 buf[i++] = 0x57; /* push %rdi */
1642 buf[i++] = 0x56; /* push %rsi */
1643 buf[i++] = 0x52; /* push %rdx */
1644 buf[i++] = 0x51; /* push %rcx */
1645 buf[i++] = 0x53; /* push %rbx */
1646 buf[i++] = 0x50; /* push %rax */
1647 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1648 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1649 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1650 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1651 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1652 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1653 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1654 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1655 buf[i++] = 0x9c; /* pushfq */
1656 buf[i++] = 0x48; /* movl <addr>,%rdi */
1657 buf[i++] = 0xbf;
1658 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1659 i += sizeof (unsigned long);
1660 buf[i++] = 0x57; /* push %rdi */
1661 append_insns (&buildaddr, i, buf);
1662
1663 /* Stack space for the collecting_t object. */
1664 i = 0;
1665 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1666 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1667 memcpy (buf + i, &tpoint, 8);
1668 i += 8;
1669 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1670 i += push_opcode (&buf[i],
1671 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1672 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1673 append_insns (&buildaddr, i, buf);
1674
1675 /* spin-lock. */
1676 i = 0;
1677 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1678 memcpy (&buf[i], (void *) &lockaddr, 8);
1679 i += 8;
1680 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1681 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1682 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1683 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1684 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1685 append_insns (&buildaddr, i, buf);
1686
1687 /* Set up the gdb_collect call. */
1688 /* At this point, (stack pointer + 0x18) is the base of our saved
1689 register block. */
1690
1691 i = 0;
1692 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1693 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1694
1695 /* tpoint address may be 64-bit wide. */
1696 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1697 memcpy (buf + i, &tpoint, 8);
1698 i += 8;
1699 append_insns (&buildaddr, i, buf);
1700
1701 /* The collector function being in the shared library, may be
1702 >31-bits away off the jump pad. */
1703 i = 0;
1704 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1705 memcpy (buf + i, &collector, 8);
1706 i += 8;
1707 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1708 append_insns (&buildaddr, i, buf);
1709
1710 /* Clear the spin-lock. */
1711 i = 0;
1712 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1713 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1714 memcpy (buf + i, &lockaddr, 8);
1715 i += 8;
1716 append_insns (&buildaddr, i, buf);
1717
1718 /* Remove stack that had been used for the collect_t object. */
1719 i = 0;
1720 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1721 append_insns (&buildaddr, i, buf);
1722
1723 /* Restore register state. */
1724 i = 0;
1725 buf[i++] = 0x48; /* add $0x8,%rsp */
1726 buf[i++] = 0x83;
1727 buf[i++] = 0xc4;
1728 buf[i++] = 0x08;
1729 buf[i++] = 0x9d; /* popfq */
1730 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1731 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1732 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1733 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1734 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1735 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1736 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1737 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1738 buf[i++] = 0x58; /* pop %rax */
1739 buf[i++] = 0x5b; /* pop %rbx */
1740 buf[i++] = 0x59; /* pop %rcx */
1741 buf[i++] = 0x5a; /* pop %rdx */
1742 buf[i++] = 0x5e; /* pop %rsi */
1743 buf[i++] = 0x5f; /* pop %rdi */
1744 buf[i++] = 0x5d; /* pop %rbp */
1745 buf[i++] = 0x5c; /* pop %rsp */
1746 append_insns (&buildaddr, i, buf);
1747
1748 /* Now, adjust the original instruction to execute in the jump
1749 pad. */
1750 *adjusted_insn_addr = buildaddr;
1751 relocate_instruction (&buildaddr, tpaddr);
1752 *adjusted_insn_addr_end = buildaddr;
1753
1754 /* Finally, write a jump back to the program. */
f4647387
YQ
1755
1756 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1757 if (loffset > INT_MAX || loffset < INT_MIN)
1758 {
1759 sprintf (err,
1760 "E.Jump back from jump pad too far from tracepoint "
1761 "(offset 0x%" PRIx64 " > int32).", loffset);
1762 return 1;
1763 }
1764
1765 offset = (int) loffset;
fa593d66
PA
1766 memcpy (buf, jump_insn, sizeof (jump_insn));
1767 memcpy (buf + 1, &offset, 4);
1768 append_insns (&buildaddr, sizeof (jump_insn), buf);
1769
1770 /* The jump pad is now built. Wire in a jump to our jump pad. This
1771 is always done last (by our caller actually), so that we can
1772 install fast tracepoints with threads running. This relies on
1773 the agent's atomic write support. */
f4647387
YQ
1774 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1775 if (loffset > INT_MAX || loffset < INT_MIN)
1776 {
1777 sprintf (err,
1778 "E.Jump pad too far from tracepoint "
1779 "(offset 0x%" PRIx64 " > int32).", loffset);
1780 return 1;
1781 }
1782
1783 offset = (int) loffset;
1784
fa593d66
PA
1785 memcpy (buf, jump_insn, sizeof (jump_insn));
1786 memcpy (buf + 1, &offset, 4);
1787 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1788 *jjump_pad_insn_size = sizeof (jump_insn);
1789
1790 /* Return the end address of our pad. */
1791 *jump_entry = buildaddr;
1792
1793 return 0;
1794}
1795
1796#endif /* __x86_64__ */
1797
1798/* Build a jump pad that saves registers and calls a collection
1799 function. Writes a jump instruction to the jump pad to
1800 JJUMPAD_INSN. The caller is responsible to write it in at the
1801 tracepoint address. */
1802
1803static int
1804i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1805 CORE_ADDR collector,
1806 CORE_ADDR lockaddr,
1807 ULONGEST orig_size,
1808 CORE_ADDR *jump_entry,
405f8e94
SS
1809 CORE_ADDR *trampoline,
1810 ULONGEST *trampoline_size,
fa593d66
PA
1811 unsigned char *jjump_pad_insn,
1812 ULONGEST *jjump_pad_insn_size,
1813 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1814 CORE_ADDR *adjusted_insn_addr_end,
1815 char *err)
fa593d66
PA
1816{
1817 unsigned char buf[0x100];
1818 int i, offset;
1819 CORE_ADDR buildaddr = *jump_entry;
1820
1821 /* Build the jump pad. */
1822
1823 /* First, do tracepoint data collection. Save registers. */
1824 i = 0;
1825 buf[i++] = 0x60; /* pushad */
1826 buf[i++] = 0x68; /* push tpaddr aka $pc */
1827 *((int *)(buf + i)) = (int) tpaddr;
1828 i += 4;
1829 buf[i++] = 0x9c; /* pushf */
1830 buf[i++] = 0x1e; /* push %ds */
1831 buf[i++] = 0x06; /* push %es */
1832 buf[i++] = 0x0f; /* push %fs */
1833 buf[i++] = 0xa0;
1834 buf[i++] = 0x0f; /* push %gs */
1835 buf[i++] = 0xa8;
1836 buf[i++] = 0x16; /* push %ss */
1837 buf[i++] = 0x0e; /* push %cs */
1838 append_insns (&buildaddr, i, buf);
1839
1840 /* Stack space for the collecting_t object. */
1841 i = 0;
1842 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1843
1844 /* Build the object. */
1845 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1846 memcpy (buf + i, &tpoint, 4);
1847 i += 4;
1848 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1849
1850 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1851 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1852 append_insns (&buildaddr, i, buf);
1853
1854 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1855 If we cared for it, this could be using xchg alternatively. */
1856
1857 i = 0;
1858 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1859 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1860 %esp,<lockaddr> */
1861 memcpy (&buf[i], (void *) &lockaddr, 4);
1862 i += 4;
1863 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1864 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1865 append_insns (&buildaddr, i, buf);
1866
1867
1868 /* Set up arguments to the gdb_collect call. */
1869 i = 0;
1870 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1871 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1872 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1873 append_insns (&buildaddr, i, buf);
1874
1875 i = 0;
1876 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1877 append_insns (&buildaddr, i, buf);
1878
1879 i = 0;
1880 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1881 memcpy (&buf[i], (void *) &tpoint, 4);
1882 i += 4;
1883 append_insns (&buildaddr, i, buf);
1884
1885 buf[0] = 0xe8; /* call <reladdr> */
1886 offset = collector - (buildaddr + sizeof (jump_insn));
1887 memcpy (buf + 1, &offset, 4);
1888 append_insns (&buildaddr, 5, buf);
1889 /* Clean up after the call. */
1890 buf[0] = 0x83; /* add $0x8,%esp */
1891 buf[1] = 0xc4;
1892 buf[2] = 0x08;
1893 append_insns (&buildaddr, 3, buf);
1894
1895
1896 /* Clear the spin-lock. This would need the LOCK prefix on older
1897 broken archs. */
1898 i = 0;
1899 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1900 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1901 memcpy (buf + i, &lockaddr, 4);
1902 i += 4;
1903 append_insns (&buildaddr, i, buf);
1904
1905
1906 /* Remove stack that had been used for the collect_t object. */
1907 i = 0;
1908 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1909 append_insns (&buildaddr, i, buf);
1910
1911 i = 0;
1912 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1913 buf[i++] = 0xc4;
1914 buf[i++] = 0x04;
1915 buf[i++] = 0x17; /* pop %ss */
1916 buf[i++] = 0x0f; /* pop %gs */
1917 buf[i++] = 0xa9;
1918 buf[i++] = 0x0f; /* pop %fs */
1919 buf[i++] = 0xa1;
1920 buf[i++] = 0x07; /* pop %es */
405f8e94 1921 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1922 buf[i++] = 0x9d; /* popf */
1923 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1924 buf[i++] = 0xc4;
1925 buf[i++] = 0x04;
1926 buf[i++] = 0x61; /* popad */
1927 append_insns (&buildaddr, i, buf);
1928
1929 /* Now, adjust the original instruction to execute in the jump
1930 pad. */
1931 *adjusted_insn_addr = buildaddr;
1932 relocate_instruction (&buildaddr, tpaddr);
1933 *adjusted_insn_addr_end = buildaddr;
1934
1935 /* Write the jump back to the program. */
1936 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1937 memcpy (buf, jump_insn, sizeof (jump_insn));
1938 memcpy (buf + 1, &offset, 4);
1939 append_insns (&buildaddr, sizeof (jump_insn), buf);
1940
1941 /* The jump pad is now built. Wire in a jump to our jump pad. This
1942 is always done last (by our caller actually), so that we can
1943 install fast tracepoints with threads running. This relies on
1944 the agent's atomic write support. */
405f8e94
SS
1945 if (orig_size == 4)
1946 {
1947 /* Create a trampoline. */
1948 *trampoline_size = sizeof (jump_insn);
1949 if (!claim_trampoline_space (*trampoline_size, trampoline))
1950 {
1951 /* No trampoline space available. */
1952 strcpy (err,
1953 "E.Cannot allocate trampoline space needed for fast "
1954 "tracepoints on 4-byte instructions.");
1955 return 1;
1956 }
1957
1958 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1959 memcpy (buf, jump_insn, sizeof (jump_insn));
1960 memcpy (buf + 1, &offset, 4);
1961 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1962
1963 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1964 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1965 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1966 memcpy (buf + 2, &offset, 2);
1967 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1968 *jjump_pad_insn_size = sizeof (small_jump_insn);
1969 }
1970 else
1971 {
1972 /* Else use a 32-bit relative jump instruction. */
1973 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1974 memcpy (buf, jump_insn, sizeof (jump_insn));
1975 memcpy (buf + 1, &offset, 4);
1976 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1977 *jjump_pad_insn_size = sizeof (jump_insn);
1978 }
fa593d66
PA
1979
1980 /* Return the end address of our pad. */
1981 *jump_entry = buildaddr;
1982
1983 return 0;
1984}
1985
1986static int
1987x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1988 CORE_ADDR collector,
1989 CORE_ADDR lockaddr,
1990 ULONGEST orig_size,
1991 CORE_ADDR *jump_entry,
405f8e94
SS
1992 CORE_ADDR *trampoline,
1993 ULONGEST *trampoline_size,
fa593d66
PA
1994 unsigned char *jjump_pad_insn,
1995 ULONGEST *jjump_pad_insn_size,
1996 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1997 CORE_ADDR *adjusted_insn_addr_end,
1998 char *err)
fa593d66
PA
1999{
2000#ifdef __x86_64__
3aee8918 2001 if (is_64bit_tdesc ())
fa593d66
PA
2002 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
2003 collector, lockaddr,
2004 orig_size, jump_entry,
405f8e94 2005 trampoline, trampoline_size,
fa593d66
PA
2006 jjump_pad_insn,
2007 jjump_pad_insn_size,
2008 adjusted_insn_addr,
405f8e94
SS
2009 adjusted_insn_addr_end,
2010 err);
fa593d66
PA
2011#endif
2012
2013 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
2014 collector, lockaddr,
2015 orig_size, jump_entry,
405f8e94 2016 trampoline, trampoline_size,
fa593d66
PA
2017 jjump_pad_insn,
2018 jjump_pad_insn_size,
2019 adjusted_insn_addr,
405f8e94
SS
2020 adjusted_insn_addr_end,
2021 err);
2022}
2023
2024/* Return the minimum instruction length for fast tracepoints on x86/x86-64
2025 architectures. */
2026
2027static int
2028x86_get_min_fast_tracepoint_insn_len (void)
2029{
2030 static int warned_about_fast_tracepoints = 0;
2031
2032#ifdef __x86_64__
2033 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
2034 used for fast tracepoints. */
3aee8918 2035 if (is_64bit_tdesc ())
405f8e94
SS
2036 return 5;
2037#endif
2038
58b4daa5 2039 if (agent_loaded_p ())
405f8e94
SS
2040 {
2041 char errbuf[IPA_BUFSIZ];
2042
2043 errbuf[0] = '\0';
2044
2045 /* On x86, if trampolines are available, then 4-byte jump instructions
2046 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2047 with a 4-byte offset are used instead. */
2048 if (have_fast_tracepoint_trampoline_buffer (errbuf))
2049 return 4;
2050 else
2051 {
2052 /* GDB has no channel to explain to user why a shorter fast
2053 tracepoint is not possible, but at least make GDBserver
2054 mention that something has gone awry. */
2055 if (!warned_about_fast_tracepoints)
2056 {
2057 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
2058 warned_about_fast_tracepoints = 1;
2059 }
2060 return 5;
2061 }
2062 }
2063 else
2064 {
2065 /* Indicate that the minimum length is currently unknown since the IPA
2066 has not loaded yet. */
2067 return 0;
2068 }
fa593d66
PA
2069}
2070
6a271cae
PA
2071static void
2072add_insns (unsigned char *start, int len)
2073{
2074 CORE_ADDR buildaddr = current_insn_ptr;
2075
2076 if (debug_threads)
87ce2a04
DE
2077 debug_printf ("Adding %d bytes of insn at %s\n",
2078 len, paddress (buildaddr));
6a271cae
PA
2079
2080 append_insns (&buildaddr, len, start);
2081 current_insn_ptr = buildaddr;
2082}
2083
6a271cae
PA
2084/* Our general strategy for emitting code is to avoid specifying raw
2085 bytes whenever possible, and instead copy a block of inline asm
2086 that is embedded in the function. This is a little messy, because
2087 we need to keep the compiler from discarding what looks like dead
2088 code, plus suppress various warnings. */
2089
9e4344e5
PA
2090#define EMIT_ASM(NAME, INSNS) \
2091 do \
2092 { \
2093 extern unsigned char start_ ## NAME, end_ ## NAME; \
2094 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 2095 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
2096 "\t" "start_" #NAME ":" \
2097 "\t" INSNS "\n" \
2098 "\t" "end_" #NAME ":"); \
2099 } while (0)
6a271cae
PA
2100
2101#ifdef __x86_64__
2102
2103#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
2104 do \
2105 { \
2106 extern unsigned char start_ ## NAME, end_ ## NAME; \
2107 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2108 __asm__ (".code32\n" \
2109 "\t" "jmp end_" #NAME "\n" \
2110 "\t" "start_" #NAME ":\n" \
2111 "\t" INSNS "\n" \
2112 "\t" "end_" #NAME ":\n" \
2113 ".code64\n"); \
2114 } while (0)
6a271cae
PA
2115
2116#else
2117
2118#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2119
2120#endif
2121
2122#ifdef __x86_64__
2123
2124static void
2125amd64_emit_prologue (void)
2126{
2127 EMIT_ASM (amd64_prologue,
2128 "pushq %rbp\n\t"
2129 "movq %rsp,%rbp\n\t"
2130 "sub $0x20,%rsp\n\t"
2131 "movq %rdi,-8(%rbp)\n\t"
2132 "movq %rsi,-16(%rbp)");
2133}
2134
2135
2136static void
2137amd64_emit_epilogue (void)
2138{
2139 EMIT_ASM (amd64_epilogue,
2140 "movq -16(%rbp),%rdi\n\t"
2141 "movq %rax,(%rdi)\n\t"
2142 "xor %rax,%rax\n\t"
2143 "leave\n\t"
2144 "ret");
2145}
2146
2147static void
2148amd64_emit_add (void)
2149{
2150 EMIT_ASM (amd64_add,
2151 "add (%rsp),%rax\n\t"
2152 "lea 0x8(%rsp),%rsp");
2153}
2154
2155static void
2156amd64_emit_sub (void)
2157{
2158 EMIT_ASM (amd64_sub,
2159 "sub %rax,(%rsp)\n\t"
2160 "pop %rax");
2161}
2162
2163static void
2164amd64_emit_mul (void)
2165{
2166 emit_error = 1;
2167}
2168
2169static void
2170amd64_emit_lsh (void)
2171{
2172 emit_error = 1;
2173}
2174
2175static void
2176amd64_emit_rsh_signed (void)
2177{
2178 emit_error = 1;
2179}
2180
2181static void
2182amd64_emit_rsh_unsigned (void)
2183{
2184 emit_error = 1;
2185}
2186
2187static void
2188amd64_emit_ext (int arg)
2189{
2190 switch (arg)
2191 {
2192 case 8:
2193 EMIT_ASM (amd64_ext_8,
2194 "cbtw\n\t"
2195 "cwtl\n\t"
2196 "cltq");
2197 break;
2198 case 16:
2199 EMIT_ASM (amd64_ext_16,
2200 "cwtl\n\t"
2201 "cltq");
2202 break;
2203 case 32:
2204 EMIT_ASM (amd64_ext_32,
2205 "cltq");
2206 break;
2207 default:
2208 emit_error = 1;
2209 }
2210}
2211
2212static void
2213amd64_emit_log_not (void)
2214{
2215 EMIT_ASM (amd64_log_not,
2216 "test %rax,%rax\n\t"
2217 "sete %cl\n\t"
2218 "movzbq %cl,%rax");
2219}
2220
2221static void
2222amd64_emit_bit_and (void)
2223{
2224 EMIT_ASM (amd64_and,
2225 "and (%rsp),%rax\n\t"
2226 "lea 0x8(%rsp),%rsp");
2227}
2228
2229static void
2230amd64_emit_bit_or (void)
2231{
2232 EMIT_ASM (amd64_or,
2233 "or (%rsp),%rax\n\t"
2234 "lea 0x8(%rsp),%rsp");
2235}
2236
2237static void
2238amd64_emit_bit_xor (void)
2239{
2240 EMIT_ASM (amd64_xor,
2241 "xor (%rsp),%rax\n\t"
2242 "lea 0x8(%rsp),%rsp");
2243}
2244
2245static void
2246amd64_emit_bit_not (void)
2247{
2248 EMIT_ASM (amd64_bit_not,
2249 "xorq $0xffffffffffffffff,%rax");
2250}
2251
2252static void
2253amd64_emit_equal (void)
2254{
2255 EMIT_ASM (amd64_equal,
2256 "cmp %rax,(%rsp)\n\t"
2257 "je .Lamd64_equal_true\n\t"
2258 "xor %rax,%rax\n\t"
2259 "jmp .Lamd64_equal_end\n\t"
2260 ".Lamd64_equal_true:\n\t"
2261 "mov $0x1,%rax\n\t"
2262 ".Lamd64_equal_end:\n\t"
2263 "lea 0x8(%rsp),%rsp");
2264}
2265
2266static void
2267amd64_emit_less_signed (void)
2268{
2269 EMIT_ASM (amd64_less_signed,
2270 "cmp %rax,(%rsp)\n\t"
2271 "jl .Lamd64_less_signed_true\n\t"
2272 "xor %rax,%rax\n\t"
2273 "jmp .Lamd64_less_signed_end\n\t"
2274 ".Lamd64_less_signed_true:\n\t"
2275 "mov $1,%rax\n\t"
2276 ".Lamd64_less_signed_end:\n\t"
2277 "lea 0x8(%rsp),%rsp");
2278}
2279
2280static void
2281amd64_emit_less_unsigned (void)
2282{
2283 EMIT_ASM (amd64_less_unsigned,
2284 "cmp %rax,(%rsp)\n\t"
2285 "jb .Lamd64_less_unsigned_true\n\t"
2286 "xor %rax,%rax\n\t"
2287 "jmp .Lamd64_less_unsigned_end\n\t"
2288 ".Lamd64_less_unsigned_true:\n\t"
2289 "mov $1,%rax\n\t"
2290 ".Lamd64_less_unsigned_end:\n\t"
2291 "lea 0x8(%rsp),%rsp");
2292}
2293
2294static void
2295amd64_emit_ref (int size)
2296{
2297 switch (size)
2298 {
2299 case 1:
2300 EMIT_ASM (amd64_ref1,
2301 "movb (%rax),%al");
2302 break;
2303 case 2:
2304 EMIT_ASM (amd64_ref2,
2305 "movw (%rax),%ax");
2306 break;
2307 case 4:
2308 EMIT_ASM (amd64_ref4,
2309 "movl (%rax),%eax");
2310 break;
2311 case 8:
2312 EMIT_ASM (amd64_ref8,
2313 "movq (%rax),%rax");
2314 break;
2315 }
2316}
2317
2318static void
2319amd64_emit_if_goto (int *offset_p, int *size_p)
2320{
2321 EMIT_ASM (amd64_if_goto,
2322 "mov %rax,%rcx\n\t"
2323 "pop %rax\n\t"
2324 "cmp $0,%rcx\n\t"
2325 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2326 if (offset_p)
2327 *offset_p = 10;
2328 if (size_p)
2329 *size_p = 4;
2330}
2331
2332static void
2333amd64_emit_goto (int *offset_p, int *size_p)
2334{
2335 EMIT_ASM (amd64_goto,
2336 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2337 if (offset_p)
2338 *offset_p = 1;
2339 if (size_p)
2340 *size_p = 4;
2341}
2342
2343static void
2344amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2345{
2346 int diff = (to - (from + size));
2347 unsigned char buf[sizeof (int)];
2348
2349 if (size != 4)
2350 {
2351 emit_error = 1;
2352 return;
2353 }
2354
2355 memcpy (buf, &diff, sizeof (int));
2356 write_inferior_memory (from, buf, sizeof (int));
2357}
2358
2359static void
4e29fb54 2360amd64_emit_const (LONGEST num)
6a271cae
PA
2361{
2362 unsigned char buf[16];
2363 int i;
2364 CORE_ADDR buildaddr = current_insn_ptr;
2365
2366 i = 0;
2367 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 2368 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
2369 i += 8;
2370 append_insns (&buildaddr, i, buf);
2371 current_insn_ptr = buildaddr;
2372}
2373
2374static void
2375amd64_emit_call (CORE_ADDR fn)
2376{
2377 unsigned char buf[16];
2378 int i;
2379 CORE_ADDR buildaddr;
4e29fb54 2380 LONGEST offset64;
6a271cae
PA
2381
2382 /* The destination function being in the shared library, may be
2383 >31-bits away off the compiled code pad. */
2384
2385 buildaddr = current_insn_ptr;
2386
2387 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
2388
2389 i = 0;
2390
2391 if (offset64 > INT_MAX || offset64 < INT_MIN)
2392 {
2393 /* Offset is too large for a call. Use callq, but that requires
2394 a register, so avoid it if possible. Use r10, since it is
2395 call-clobbered, we don't have to push/pop it. */
2396 buf[i++] = 0x48; /* mov $fn,%r10 */
2397 buf[i++] = 0xba;
2398 memcpy (buf + i, &fn, 8);
2399 i += 8;
2400 buf[i++] = 0xff; /* callq *%r10 */
2401 buf[i++] = 0xd2;
2402 }
2403 else
2404 {
2405 int offset32 = offset64; /* we know we can't overflow here. */
2406 memcpy (buf + i, &offset32, 4);
2407 i += 4;
2408 }
2409
2410 append_insns (&buildaddr, i, buf);
2411 current_insn_ptr = buildaddr;
2412}
2413
2414static void
2415amd64_emit_reg (int reg)
2416{
2417 unsigned char buf[16];
2418 int i;
2419 CORE_ADDR buildaddr;
2420
2421 /* Assume raw_regs is still in %rdi. */
2422 buildaddr = current_insn_ptr;
2423 i = 0;
2424 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 2425 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2426 i += 4;
2427 append_insns (&buildaddr, i, buf);
2428 current_insn_ptr = buildaddr;
2429 amd64_emit_call (get_raw_reg_func_addr ());
2430}
2431
2432static void
2433amd64_emit_pop (void)
2434{
2435 EMIT_ASM (amd64_pop,
2436 "pop %rax");
2437}
2438
2439static void
2440amd64_emit_stack_flush (void)
2441{
2442 EMIT_ASM (amd64_stack_flush,
2443 "push %rax");
2444}
2445
2446static void
2447amd64_emit_zero_ext (int arg)
2448{
2449 switch (arg)
2450 {
2451 case 8:
2452 EMIT_ASM (amd64_zero_ext_8,
2453 "and $0xff,%rax");
2454 break;
2455 case 16:
2456 EMIT_ASM (amd64_zero_ext_16,
2457 "and $0xffff,%rax");
2458 break;
2459 case 32:
2460 EMIT_ASM (amd64_zero_ext_32,
2461 "mov $0xffffffff,%rcx\n\t"
2462 "and %rcx,%rax");
2463 break;
2464 default:
2465 emit_error = 1;
2466 }
2467}
2468
2469static void
2470amd64_emit_swap (void)
2471{
2472 EMIT_ASM (amd64_swap,
2473 "mov %rax,%rcx\n\t"
2474 "pop %rax\n\t"
2475 "push %rcx");
2476}
2477
2478static void
2479amd64_emit_stack_adjust (int n)
2480{
2481 unsigned char buf[16];
2482 int i;
2483 CORE_ADDR buildaddr = current_insn_ptr;
2484
2485 i = 0;
2486 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2487 buf[i++] = 0x8d;
2488 buf[i++] = 0x64;
2489 buf[i++] = 0x24;
2490 /* This only handles adjustments up to 16, but we don't expect any more. */
2491 buf[i++] = n * 8;
2492 append_insns (&buildaddr, i, buf);
2493 current_insn_ptr = buildaddr;
2494}
2495
2496/* FN's prototype is `LONGEST(*fn)(int)'. */
2497
2498static void
2499amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2500{
2501 unsigned char buf[16];
2502 int i;
2503 CORE_ADDR buildaddr;
2504
2505 buildaddr = current_insn_ptr;
2506 i = 0;
2507 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2508 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2509 i += 4;
2510 append_insns (&buildaddr, i, buf);
2511 current_insn_ptr = buildaddr;
2512 amd64_emit_call (fn);
2513}
2514
4e29fb54 2515/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2516
2517static void
2518amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2519{
2520 unsigned char buf[16];
2521 int i;
2522 CORE_ADDR buildaddr;
2523
2524 buildaddr = current_insn_ptr;
2525 i = 0;
2526 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2527 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2528 i += 4;
2529 append_insns (&buildaddr, i, buf);
2530 current_insn_ptr = buildaddr;
2531 EMIT_ASM (amd64_void_call_2_a,
2532 /* Save away a copy of the stack top. */
2533 "push %rax\n\t"
2534 /* Also pass top as the second argument. */
2535 "mov %rax,%rsi");
2536 amd64_emit_call (fn);
2537 EMIT_ASM (amd64_void_call_2_b,
2538 /* Restore the stack top, %rax may have been trashed. */
2539 "pop %rax");
2540}
2541
6b9801d4
SS
2542void
2543amd64_emit_eq_goto (int *offset_p, int *size_p)
2544{
2545 EMIT_ASM (amd64_eq,
2546 "cmp %rax,(%rsp)\n\t"
2547 "jne .Lamd64_eq_fallthru\n\t"
2548 "lea 0x8(%rsp),%rsp\n\t"
2549 "pop %rax\n\t"
2550 /* jmp, but don't trust the assembler to choose the right jump */
2551 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2552 ".Lamd64_eq_fallthru:\n\t"
2553 "lea 0x8(%rsp),%rsp\n\t"
2554 "pop %rax");
2555
2556 if (offset_p)
2557 *offset_p = 13;
2558 if (size_p)
2559 *size_p = 4;
2560}
2561
2562void
2563amd64_emit_ne_goto (int *offset_p, int *size_p)
2564{
2565 EMIT_ASM (amd64_ne,
2566 "cmp %rax,(%rsp)\n\t"
2567 "je .Lamd64_ne_fallthru\n\t"
2568 "lea 0x8(%rsp),%rsp\n\t"
2569 "pop %rax\n\t"
2570 /* jmp, but don't trust the assembler to choose the right jump */
2571 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2572 ".Lamd64_ne_fallthru:\n\t"
2573 "lea 0x8(%rsp),%rsp\n\t"
2574 "pop %rax");
2575
2576 if (offset_p)
2577 *offset_p = 13;
2578 if (size_p)
2579 *size_p = 4;
2580}
2581
2582void
2583amd64_emit_lt_goto (int *offset_p, int *size_p)
2584{
2585 EMIT_ASM (amd64_lt,
2586 "cmp %rax,(%rsp)\n\t"
2587 "jnl .Lamd64_lt_fallthru\n\t"
2588 "lea 0x8(%rsp),%rsp\n\t"
2589 "pop %rax\n\t"
2590 /* jmp, but don't trust the assembler to choose the right jump */
2591 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2592 ".Lamd64_lt_fallthru:\n\t"
2593 "lea 0x8(%rsp),%rsp\n\t"
2594 "pop %rax");
2595
2596 if (offset_p)
2597 *offset_p = 13;
2598 if (size_p)
2599 *size_p = 4;
2600}
2601
2602void
2603amd64_emit_le_goto (int *offset_p, int *size_p)
2604{
2605 EMIT_ASM (amd64_le,
2606 "cmp %rax,(%rsp)\n\t"
2607 "jnle .Lamd64_le_fallthru\n\t"
2608 "lea 0x8(%rsp),%rsp\n\t"
2609 "pop %rax\n\t"
2610 /* jmp, but don't trust the assembler to choose the right jump */
2611 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2612 ".Lamd64_le_fallthru:\n\t"
2613 "lea 0x8(%rsp),%rsp\n\t"
2614 "pop %rax");
2615
2616 if (offset_p)
2617 *offset_p = 13;
2618 if (size_p)
2619 *size_p = 4;
2620}
2621
2622void
2623amd64_emit_gt_goto (int *offset_p, int *size_p)
2624{
2625 EMIT_ASM (amd64_gt,
2626 "cmp %rax,(%rsp)\n\t"
2627 "jng .Lamd64_gt_fallthru\n\t"
2628 "lea 0x8(%rsp),%rsp\n\t"
2629 "pop %rax\n\t"
2630 /* jmp, but don't trust the assembler to choose the right jump */
2631 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2632 ".Lamd64_gt_fallthru:\n\t"
2633 "lea 0x8(%rsp),%rsp\n\t"
2634 "pop %rax");
2635
2636 if (offset_p)
2637 *offset_p = 13;
2638 if (size_p)
2639 *size_p = 4;
2640}
2641
2642void
2643amd64_emit_ge_goto (int *offset_p, int *size_p)
2644{
2645 EMIT_ASM (amd64_ge,
2646 "cmp %rax,(%rsp)\n\t"
2647 "jnge .Lamd64_ge_fallthru\n\t"
2648 ".Lamd64_ge_jump:\n\t"
2649 "lea 0x8(%rsp),%rsp\n\t"
2650 "pop %rax\n\t"
2651 /* jmp, but don't trust the assembler to choose the right jump */
2652 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2653 ".Lamd64_ge_fallthru:\n\t"
2654 "lea 0x8(%rsp),%rsp\n\t"
2655 "pop %rax");
2656
2657 if (offset_p)
2658 *offset_p = 13;
2659 if (size_p)
2660 *size_p = 4;
2661}
2662
6a271cae
PA
2663struct emit_ops amd64_emit_ops =
2664 {
2665 amd64_emit_prologue,
2666 amd64_emit_epilogue,
2667 amd64_emit_add,
2668 amd64_emit_sub,
2669 amd64_emit_mul,
2670 amd64_emit_lsh,
2671 amd64_emit_rsh_signed,
2672 amd64_emit_rsh_unsigned,
2673 amd64_emit_ext,
2674 amd64_emit_log_not,
2675 amd64_emit_bit_and,
2676 amd64_emit_bit_or,
2677 amd64_emit_bit_xor,
2678 amd64_emit_bit_not,
2679 amd64_emit_equal,
2680 amd64_emit_less_signed,
2681 amd64_emit_less_unsigned,
2682 amd64_emit_ref,
2683 amd64_emit_if_goto,
2684 amd64_emit_goto,
2685 amd64_write_goto_address,
2686 amd64_emit_const,
2687 amd64_emit_call,
2688 amd64_emit_reg,
2689 amd64_emit_pop,
2690 amd64_emit_stack_flush,
2691 amd64_emit_zero_ext,
2692 amd64_emit_swap,
2693 amd64_emit_stack_adjust,
2694 amd64_emit_int_call_1,
6b9801d4
SS
2695 amd64_emit_void_call_2,
2696 amd64_emit_eq_goto,
2697 amd64_emit_ne_goto,
2698 amd64_emit_lt_goto,
2699 amd64_emit_le_goto,
2700 amd64_emit_gt_goto,
2701 amd64_emit_ge_goto
6a271cae
PA
2702 };
2703
2704#endif /* __x86_64__ */
2705
2706static void
2707i386_emit_prologue (void)
2708{
2709 EMIT_ASM32 (i386_prologue,
2710 "push %ebp\n\t"
bf15cbda
SS
2711 "mov %esp,%ebp\n\t"
2712 "push %ebx");
6a271cae
PA
2713 /* At this point, the raw regs base address is at 8(%ebp), and the
2714 value pointer is at 12(%ebp). */
2715}
2716
2717static void
2718i386_emit_epilogue (void)
2719{
2720 EMIT_ASM32 (i386_epilogue,
2721 "mov 12(%ebp),%ecx\n\t"
2722 "mov %eax,(%ecx)\n\t"
2723 "mov %ebx,0x4(%ecx)\n\t"
2724 "xor %eax,%eax\n\t"
bf15cbda 2725 "pop %ebx\n\t"
6a271cae
PA
2726 "pop %ebp\n\t"
2727 "ret");
2728}
2729
2730static void
2731i386_emit_add (void)
2732{
2733 EMIT_ASM32 (i386_add,
2734 "add (%esp),%eax\n\t"
2735 "adc 0x4(%esp),%ebx\n\t"
2736 "lea 0x8(%esp),%esp");
2737}
2738
2739static void
2740i386_emit_sub (void)
2741{
2742 EMIT_ASM32 (i386_sub,
2743 "subl %eax,(%esp)\n\t"
2744 "sbbl %ebx,4(%esp)\n\t"
2745 "pop %eax\n\t"
2746 "pop %ebx\n\t");
2747}
2748
2749static void
2750i386_emit_mul (void)
2751{
2752 emit_error = 1;
2753}
2754
2755static void
2756i386_emit_lsh (void)
2757{
2758 emit_error = 1;
2759}
2760
2761static void
2762i386_emit_rsh_signed (void)
2763{
2764 emit_error = 1;
2765}
2766
2767static void
2768i386_emit_rsh_unsigned (void)
2769{
2770 emit_error = 1;
2771}
2772
2773static void
2774i386_emit_ext (int arg)
2775{
2776 switch (arg)
2777 {
2778 case 8:
2779 EMIT_ASM32 (i386_ext_8,
2780 "cbtw\n\t"
2781 "cwtl\n\t"
2782 "movl %eax,%ebx\n\t"
2783 "sarl $31,%ebx");
2784 break;
2785 case 16:
2786 EMIT_ASM32 (i386_ext_16,
2787 "cwtl\n\t"
2788 "movl %eax,%ebx\n\t"
2789 "sarl $31,%ebx");
2790 break;
2791 case 32:
2792 EMIT_ASM32 (i386_ext_32,
2793 "movl %eax,%ebx\n\t"
2794 "sarl $31,%ebx");
2795 break;
2796 default:
2797 emit_error = 1;
2798 }
2799}
2800
2801static void
2802i386_emit_log_not (void)
2803{
2804 EMIT_ASM32 (i386_log_not,
2805 "or %ebx,%eax\n\t"
2806 "test %eax,%eax\n\t"
2807 "sete %cl\n\t"
2808 "xor %ebx,%ebx\n\t"
2809 "movzbl %cl,%eax");
2810}
2811
2812static void
2813i386_emit_bit_and (void)
2814{
2815 EMIT_ASM32 (i386_and,
2816 "and (%esp),%eax\n\t"
2817 "and 0x4(%esp),%ebx\n\t"
2818 "lea 0x8(%esp),%esp");
2819}
2820
2821static void
2822i386_emit_bit_or (void)
2823{
2824 EMIT_ASM32 (i386_or,
2825 "or (%esp),%eax\n\t"
2826 "or 0x4(%esp),%ebx\n\t"
2827 "lea 0x8(%esp),%esp");
2828}
2829
2830static void
2831i386_emit_bit_xor (void)
2832{
2833 EMIT_ASM32 (i386_xor,
2834 "xor (%esp),%eax\n\t"
2835 "xor 0x4(%esp),%ebx\n\t"
2836 "lea 0x8(%esp),%esp");
2837}
2838
2839static void
2840i386_emit_bit_not (void)
2841{
2842 EMIT_ASM32 (i386_bit_not,
2843 "xor $0xffffffff,%eax\n\t"
2844 "xor $0xffffffff,%ebx\n\t");
2845}
2846
2847static void
2848i386_emit_equal (void)
2849{
2850 EMIT_ASM32 (i386_equal,
2851 "cmpl %ebx,4(%esp)\n\t"
2852 "jne .Li386_equal_false\n\t"
2853 "cmpl %eax,(%esp)\n\t"
2854 "je .Li386_equal_true\n\t"
2855 ".Li386_equal_false:\n\t"
2856 "xor %eax,%eax\n\t"
2857 "jmp .Li386_equal_end\n\t"
2858 ".Li386_equal_true:\n\t"
2859 "mov $1,%eax\n\t"
2860 ".Li386_equal_end:\n\t"
2861 "xor %ebx,%ebx\n\t"
2862 "lea 0x8(%esp),%esp");
2863}
2864
2865static void
2866i386_emit_less_signed (void)
2867{
2868 EMIT_ASM32 (i386_less_signed,
2869 "cmpl %ebx,4(%esp)\n\t"
2870 "jl .Li386_less_signed_true\n\t"
2871 "jne .Li386_less_signed_false\n\t"
2872 "cmpl %eax,(%esp)\n\t"
2873 "jl .Li386_less_signed_true\n\t"
2874 ".Li386_less_signed_false:\n\t"
2875 "xor %eax,%eax\n\t"
2876 "jmp .Li386_less_signed_end\n\t"
2877 ".Li386_less_signed_true:\n\t"
2878 "mov $1,%eax\n\t"
2879 ".Li386_less_signed_end:\n\t"
2880 "xor %ebx,%ebx\n\t"
2881 "lea 0x8(%esp),%esp");
2882}
2883
2884static void
2885i386_emit_less_unsigned (void)
2886{
2887 EMIT_ASM32 (i386_less_unsigned,
2888 "cmpl %ebx,4(%esp)\n\t"
2889 "jb .Li386_less_unsigned_true\n\t"
2890 "jne .Li386_less_unsigned_false\n\t"
2891 "cmpl %eax,(%esp)\n\t"
2892 "jb .Li386_less_unsigned_true\n\t"
2893 ".Li386_less_unsigned_false:\n\t"
2894 "xor %eax,%eax\n\t"
2895 "jmp .Li386_less_unsigned_end\n\t"
2896 ".Li386_less_unsigned_true:\n\t"
2897 "mov $1,%eax\n\t"
2898 ".Li386_less_unsigned_end:\n\t"
2899 "xor %ebx,%ebx\n\t"
2900 "lea 0x8(%esp),%esp");
2901}
2902
2903static void
2904i386_emit_ref (int size)
2905{
2906 switch (size)
2907 {
2908 case 1:
2909 EMIT_ASM32 (i386_ref1,
2910 "movb (%eax),%al");
2911 break;
2912 case 2:
2913 EMIT_ASM32 (i386_ref2,
2914 "movw (%eax),%ax");
2915 break;
2916 case 4:
2917 EMIT_ASM32 (i386_ref4,
2918 "movl (%eax),%eax");
2919 break;
2920 case 8:
2921 EMIT_ASM32 (i386_ref8,
2922 "movl 4(%eax),%ebx\n\t"
2923 "movl (%eax),%eax");
2924 break;
2925 }
2926}
2927
2928static void
2929i386_emit_if_goto (int *offset_p, int *size_p)
2930{
2931 EMIT_ASM32 (i386_if_goto,
2932 "mov %eax,%ecx\n\t"
2933 "or %ebx,%ecx\n\t"
2934 "pop %eax\n\t"
2935 "pop %ebx\n\t"
2936 "cmpl $0,%ecx\n\t"
2937 /* Don't trust the assembler to choose the right jump */
2938 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2939
2940 if (offset_p)
2941 *offset_p = 11; /* be sure that this matches the sequence above */
2942 if (size_p)
2943 *size_p = 4;
2944}
2945
2946static void
2947i386_emit_goto (int *offset_p, int *size_p)
2948{
2949 EMIT_ASM32 (i386_goto,
2950 /* Don't trust the assembler to choose the right jump */
2951 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2952 if (offset_p)
2953 *offset_p = 1;
2954 if (size_p)
2955 *size_p = 4;
2956}
2957
2958static void
2959i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2960{
2961 int diff = (to - (from + size));
2962 unsigned char buf[sizeof (int)];
2963
2964 /* We're only doing 4-byte sizes at the moment. */
2965 if (size != 4)
2966 {
2967 emit_error = 1;
2968 return;
2969 }
2970
2971 memcpy (buf, &diff, sizeof (int));
2972 write_inferior_memory (from, buf, sizeof (int));
2973}
2974
2975static void
4e29fb54 2976i386_emit_const (LONGEST num)
6a271cae
PA
2977{
2978 unsigned char buf[16];
b00ad6ff 2979 int i, hi, lo;
6a271cae
PA
2980 CORE_ADDR buildaddr = current_insn_ptr;
2981
2982 i = 0;
2983 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2984 lo = num & 0xffffffff;
2985 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2986 i += 4;
2987 hi = ((num >> 32) & 0xffffffff);
2988 if (hi)
2989 {
2990 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2991 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2992 i += 4;
2993 }
2994 else
2995 {
2996 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2997 }
2998 append_insns (&buildaddr, i, buf);
2999 current_insn_ptr = buildaddr;
3000}
3001
3002static void
3003i386_emit_call (CORE_ADDR fn)
3004{
3005 unsigned char buf[16];
3006 int i, offset;
3007 CORE_ADDR buildaddr;
3008
3009 buildaddr = current_insn_ptr;
3010 i = 0;
3011 buf[i++] = 0xe8; /* call <reladdr> */
3012 offset = ((int) fn) - (buildaddr + 5);
3013 memcpy (buf + 1, &offset, 4);
3014 append_insns (&buildaddr, 5, buf);
3015 current_insn_ptr = buildaddr;
3016}
3017
3018static void
3019i386_emit_reg (int reg)
3020{
3021 unsigned char buf[16];
3022 int i;
3023 CORE_ADDR buildaddr;
3024
3025 EMIT_ASM32 (i386_reg_a,
3026 "sub $0x8,%esp");
3027 buildaddr = current_insn_ptr;
3028 i = 0;
3029 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 3030 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
3031 i += 4;
3032 append_insns (&buildaddr, i, buf);
3033 current_insn_ptr = buildaddr;
3034 EMIT_ASM32 (i386_reg_b,
3035 "mov %eax,4(%esp)\n\t"
3036 "mov 8(%ebp),%eax\n\t"
3037 "mov %eax,(%esp)");
3038 i386_emit_call (get_raw_reg_func_addr ());
3039 EMIT_ASM32 (i386_reg_c,
3040 "xor %ebx,%ebx\n\t"
3041 "lea 0x8(%esp),%esp");
3042}
3043
3044static void
3045i386_emit_pop (void)
3046{
3047 EMIT_ASM32 (i386_pop,
3048 "pop %eax\n\t"
3049 "pop %ebx");
3050}
3051
3052static void
3053i386_emit_stack_flush (void)
3054{
3055 EMIT_ASM32 (i386_stack_flush,
3056 "push %ebx\n\t"
3057 "push %eax");
3058}
3059
3060static void
3061i386_emit_zero_ext (int arg)
3062{
3063 switch (arg)
3064 {
3065 case 8:
3066 EMIT_ASM32 (i386_zero_ext_8,
3067 "and $0xff,%eax\n\t"
3068 "xor %ebx,%ebx");
3069 break;
3070 case 16:
3071 EMIT_ASM32 (i386_zero_ext_16,
3072 "and $0xffff,%eax\n\t"
3073 "xor %ebx,%ebx");
3074 break;
3075 case 32:
3076 EMIT_ASM32 (i386_zero_ext_32,
3077 "xor %ebx,%ebx");
3078 break;
3079 default:
3080 emit_error = 1;
3081 }
3082}
3083
3084static void
3085i386_emit_swap (void)
3086{
3087 EMIT_ASM32 (i386_swap,
3088 "mov %eax,%ecx\n\t"
3089 "mov %ebx,%edx\n\t"
3090 "pop %eax\n\t"
3091 "pop %ebx\n\t"
3092 "push %edx\n\t"
3093 "push %ecx");
3094}
3095
3096static void
3097i386_emit_stack_adjust (int n)
3098{
3099 unsigned char buf[16];
3100 int i;
3101 CORE_ADDR buildaddr = current_insn_ptr;
3102
3103 i = 0;
3104 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
3105 buf[i++] = 0x64;
3106 buf[i++] = 0x24;
3107 buf[i++] = n * 8;
3108 append_insns (&buildaddr, i, buf);
3109 current_insn_ptr = buildaddr;
3110}
3111
3112/* FN's prototype is `LONGEST(*fn)(int)'. */
3113
3114static void
3115i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
3116{
3117 unsigned char buf[16];
3118 int i;
3119 CORE_ADDR buildaddr;
3120
3121 EMIT_ASM32 (i386_int_call_1_a,
3122 /* Reserve a bit of stack space. */
3123 "sub $0x8,%esp");
3124 /* Put the one argument on the stack. */
3125 buildaddr = current_insn_ptr;
3126 i = 0;
3127 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3128 buf[i++] = 0x04;
3129 buf[i++] = 0x24;
b00ad6ff 3130 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
3131 i += 4;
3132 append_insns (&buildaddr, i, buf);
3133 current_insn_ptr = buildaddr;
3134 i386_emit_call (fn);
3135 EMIT_ASM32 (i386_int_call_1_c,
3136 "mov %edx,%ebx\n\t"
3137 "lea 0x8(%esp),%esp");
3138}
3139
4e29fb54 3140/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
3141
3142static void
3143i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
3144{
3145 unsigned char buf[16];
3146 int i;
3147 CORE_ADDR buildaddr;
3148
3149 EMIT_ASM32 (i386_void_call_2_a,
3150 /* Preserve %eax only; we don't have to worry about %ebx. */
3151 "push %eax\n\t"
3152 /* Reserve a bit of stack space for arguments. */
3153 "sub $0x10,%esp\n\t"
3154 /* Copy "top" to the second argument position. (Note that
3155 we can't assume function won't scribble on its
3156 arguments, so don't try to restore from this.) */
3157 "mov %eax,4(%esp)\n\t"
3158 "mov %ebx,8(%esp)");
3159 /* Put the first argument on the stack. */
3160 buildaddr = current_insn_ptr;
3161 i = 0;
3162 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3163 buf[i++] = 0x04;
3164 buf[i++] = 0x24;
b00ad6ff 3165 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
3166 i += 4;
3167 append_insns (&buildaddr, i, buf);
3168 current_insn_ptr = buildaddr;
3169 i386_emit_call (fn);
3170 EMIT_ASM32 (i386_void_call_2_b,
3171 "lea 0x10(%esp),%esp\n\t"
3172 /* Restore original stack top. */
3173 "pop %eax");
3174}
3175
6b9801d4
SS
3176
3177void
3178i386_emit_eq_goto (int *offset_p, int *size_p)
3179{
3180 EMIT_ASM32 (eq,
3181 /* Check low half first, more likely to be decider */
3182 "cmpl %eax,(%esp)\n\t"
3183 "jne .Leq_fallthru\n\t"
3184 "cmpl %ebx,4(%esp)\n\t"
3185 "jne .Leq_fallthru\n\t"
3186 "lea 0x8(%esp),%esp\n\t"
3187 "pop %eax\n\t"
3188 "pop %ebx\n\t"
3189 /* jmp, but don't trust the assembler to choose the right jump */
3190 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3191 ".Leq_fallthru:\n\t"
3192 "lea 0x8(%esp),%esp\n\t"
3193 "pop %eax\n\t"
3194 "pop %ebx");
3195
3196 if (offset_p)
3197 *offset_p = 18;
3198 if (size_p)
3199 *size_p = 4;
3200}
3201
3202void
3203i386_emit_ne_goto (int *offset_p, int *size_p)
3204{
3205 EMIT_ASM32 (ne,
3206 /* Check low half first, more likely to be decider */
3207 "cmpl %eax,(%esp)\n\t"
3208 "jne .Lne_jump\n\t"
3209 "cmpl %ebx,4(%esp)\n\t"
3210 "je .Lne_fallthru\n\t"
3211 ".Lne_jump:\n\t"
3212 "lea 0x8(%esp),%esp\n\t"
3213 "pop %eax\n\t"
3214 "pop %ebx\n\t"
3215 /* jmp, but don't trust the assembler to choose the right jump */
3216 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3217 ".Lne_fallthru:\n\t"
3218 "lea 0x8(%esp),%esp\n\t"
3219 "pop %eax\n\t"
3220 "pop %ebx");
3221
3222 if (offset_p)
3223 *offset_p = 18;
3224 if (size_p)
3225 *size_p = 4;
3226}
3227
3228void
3229i386_emit_lt_goto (int *offset_p, int *size_p)
3230{
3231 EMIT_ASM32 (lt,
3232 "cmpl %ebx,4(%esp)\n\t"
3233 "jl .Llt_jump\n\t"
3234 "jne .Llt_fallthru\n\t"
3235 "cmpl %eax,(%esp)\n\t"
3236 "jnl .Llt_fallthru\n\t"
3237 ".Llt_jump:\n\t"
3238 "lea 0x8(%esp),%esp\n\t"
3239 "pop %eax\n\t"
3240 "pop %ebx\n\t"
3241 /* jmp, but don't trust the assembler to choose the right jump */
3242 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3243 ".Llt_fallthru:\n\t"
3244 "lea 0x8(%esp),%esp\n\t"
3245 "pop %eax\n\t"
3246 "pop %ebx");
3247
3248 if (offset_p)
3249 *offset_p = 20;
3250 if (size_p)
3251 *size_p = 4;
3252}
3253
3254void
3255i386_emit_le_goto (int *offset_p, int *size_p)
3256{
3257 EMIT_ASM32 (le,
3258 "cmpl %ebx,4(%esp)\n\t"
3259 "jle .Lle_jump\n\t"
3260 "jne .Lle_fallthru\n\t"
3261 "cmpl %eax,(%esp)\n\t"
3262 "jnle .Lle_fallthru\n\t"
3263 ".Lle_jump:\n\t"
3264 "lea 0x8(%esp),%esp\n\t"
3265 "pop %eax\n\t"
3266 "pop %ebx\n\t"
3267 /* jmp, but don't trust the assembler to choose the right jump */
3268 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3269 ".Lle_fallthru:\n\t"
3270 "lea 0x8(%esp),%esp\n\t"
3271 "pop %eax\n\t"
3272 "pop %ebx");
3273
3274 if (offset_p)
3275 *offset_p = 20;
3276 if (size_p)
3277 *size_p = 4;
3278}
3279
3280void
3281i386_emit_gt_goto (int *offset_p, int *size_p)
3282{
3283 EMIT_ASM32 (gt,
3284 "cmpl %ebx,4(%esp)\n\t"
3285 "jg .Lgt_jump\n\t"
3286 "jne .Lgt_fallthru\n\t"
3287 "cmpl %eax,(%esp)\n\t"
3288 "jng .Lgt_fallthru\n\t"
3289 ".Lgt_jump:\n\t"
3290 "lea 0x8(%esp),%esp\n\t"
3291 "pop %eax\n\t"
3292 "pop %ebx\n\t"
3293 /* jmp, but don't trust the assembler to choose the right jump */
3294 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3295 ".Lgt_fallthru:\n\t"
3296 "lea 0x8(%esp),%esp\n\t"
3297 "pop %eax\n\t"
3298 "pop %ebx");
3299
3300 if (offset_p)
3301 *offset_p = 20;
3302 if (size_p)
3303 *size_p = 4;
3304}
3305
3306void
3307i386_emit_ge_goto (int *offset_p, int *size_p)
3308{
3309 EMIT_ASM32 (ge,
3310 "cmpl %ebx,4(%esp)\n\t"
3311 "jge .Lge_jump\n\t"
3312 "jne .Lge_fallthru\n\t"
3313 "cmpl %eax,(%esp)\n\t"
3314 "jnge .Lge_fallthru\n\t"
3315 ".Lge_jump:\n\t"
3316 "lea 0x8(%esp),%esp\n\t"
3317 "pop %eax\n\t"
3318 "pop %ebx\n\t"
3319 /* jmp, but don't trust the assembler to choose the right jump */
3320 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3321 ".Lge_fallthru:\n\t"
3322 "lea 0x8(%esp),%esp\n\t"
3323 "pop %eax\n\t"
3324 "pop %ebx");
3325
3326 if (offset_p)
3327 *offset_p = 20;
3328 if (size_p)
3329 *size_p = 4;
3330}
3331
6a271cae
PA
3332struct emit_ops i386_emit_ops =
3333 {
3334 i386_emit_prologue,
3335 i386_emit_epilogue,
3336 i386_emit_add,
3337 i386_emit_sub,
3338 i386_emit_mul,
3339 i386_emit_lsh,
3340 i386_emit_rsh_signed,
3341 i386_emit_rsh_unsigned,
3342 i386_emit_ext,
3343 i386_emit_log_not,
3344 i386_emit_bit_and,
3345 i386_emit_bit_or,
3346 i386_emit_bit_xor,
3347 i386_emit_bit_not,
3348 i386_emit_equal,
3349 i386_emit_less_signed,
3350 i386_emit_less_unsigned,
3351 i386_emit_ref,
3352 i386_emit_if_goto,
3353 i386_emit_goto,
3354 i386_write_goto_address,
3355 i386_emit_const,
3356 i386_emit_call,
3357 i386_emit_reg,
3358 i386_emit_pop,
3359 i386_emit_stack_flush,
3360 i386_emit_zero_ext,
3361 i386_emit_swap,
3362 i386_emit_stack_adjust,
3363 i386_emit_int_call_1,
6b9801d4
SS
3364 i386_emit_void_call_2,
3365 i386_emit_eq_goto,
3366 i386_emit_ne_goto,
3367 i386_emit_lt_goto,
3368 i386_emit_le_goto,
3369 i386_emit_gt_goto,
3370 i386_emit_ge_goto
6a271cae
PA
3371 };
3372
3373
3374static struct emit_ops *
3375x86_emit_ops (void)
3376{
3377#ifdef __x86_64__
3aee8918 3378 if (is_64bit_tdesc ())
6a271cae
PA
3379 return &amd64_emit_ops;
3380 else
3381#endif
3382 return &i386_emit_ops;
3383}
3384
c2d6af84
PA
3385static int
3386x86_supports_range_stepping (void)
3387{
3388 return 1;
3389}
3390
d0722149
DE
3391/* This is initialized assuming an amd64 target.
3392 x86_arch_setup will correct it for i386 or amd64 targets. */
3393
3394struct linux_target_ops the_low_target =
3395{
3396 x86_arch_setup,
3aee8918
PA
3397 x86_linux_regs_info,
3398 x86_cannot_fetch_register,
3399 x86_cannot_store_register,
c14dfd32 3400 NULL, /* fetch_register */
d0722149
DE
3401 x86_get_pc,
3402 x86_set_pc,
3403 x86_breakpoint,
3404 x86_breakpoint_len,
3405 NULL,
3406 1,
3407 x86_breakpoint_at,
802e8e6d 3408 x86_supports_z_point_type,
aa5ca48f
DE
3409 x86_insert_point,
3410 x86_remove_point,
3411 x86_stopped_by_watchpoint,
3412 x86_stopped_data_address,
d0722149
DE
3413 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3414 native i386 case (no registers smaller than an xfer unit), and are not
3415 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3416 NULL,
3417 NULL,
3418 /* need to fix up i386 siginfo if host is amd64 */
3419 x86_siginfo_fixup,
aa5ca48f
DE
3420 x86_linux_new_process,
3421 x86_linux_new_thread,
1570b33e 3422 x86_linux_prepare_to_resume,
219f2f23 3423 x86_linux_process_qsupported,
fa593d66
PA
3424 x86_supports_tracepoints,
3425 x86_get_thread_area,
6a271cae 3426 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
3427 x86_emit_ops,
3428 x86_get_min_fast_tracepoint_insn_len,
c2d6af84 3429 x86_supports_range_stepping,
d0722149 3430};
3aee8918
PA
3431
3432void
3433initialize_low_arch (void)
3434{
3435 /* Initialize the Linux target descriptions. */
3436#ifdef __x86_64__
3437 init_registers_amd64_linux ();
3438 init_registers_amd64_avx_linux ();
01f9f808 3439 init_registers_amd64_avx512_linux ();
a196ebeb
WT
3440 init_registers_amd64_mpx_linux ();
3441
3aee8918 3442 init_registers_x32_linux ();
7e5aaa09 3443 init_registers_x32_avx_linux ();
01f9f808 3444 init_registers_x32_avx512_linux ();
3aee8918
PA
3445
3446 tdesc_amd64_linux_no_xml = xmalloc (sizeof (struct target_desc));
3447 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
3448 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3449#endif
3450 init_registers_i386_linux ();
3451 init_registers_i386_mmx_linux ();
3452 init_registers_i386_avx_linux ();
01f9f808 3453 init_registers_i386_avx512_linux ();
a196ebeb 3454 init_registers_i386_mpx_linux ();
3aee8918
PA
3455
3456 tdesc_i386_linux_no_xml = xmalloc (sizeof (struct target_desc));
3457 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
3458 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3459
3460 initialize_regsets_info (&x86_regsets_info);
3461}
This page took 0.896023 seconds and 4 git commands to generate.