New gdbserver option --debug-format=timestamp.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
ecd75fc8 3 Copyright (C) 2002-2014 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
aa5ca48f 20#include <stddef.h>
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "server.h"
25#include "linux-low.h"
26#include "i387-fp.h"
aa5ca48f 27#include "i386-low.h"
1570b33e 28#include "i386-xstate.h"
d0722149
DE
29
30#include "gdb_proc_service.h"
b5737fa9
PA
31/* Don't include elf/common.h if linux/elf.h got included by
32 gdb_proc_service.h. */
33#ifndef ELFMAG0
34#include "elf/common.h"
35#endif
36
58b4daa5 37#include "agent.h"
3aee8918 38#include "tdesc.h"
c144c7a0 39#include "tracepoint.h"
f699aaba 40#include "ax.h"
d0722149 41
3aee8918 42#ifdef __x86_64__
90884b2b
L
43/* Defined in auto-generated file amd64-linux.c. */
44void init_registers_amd64_linux (void);
3aee8918
PA
45extern const struct target_desc *tdesc_amd64_linux;
46
1570b33e
L
47/* Defined in auto-generated file amd64-avx-linux.c. */
48void init_registers_amd64_avx_linux (void);
3aee8918
PA
49extern const struct target_desc *tdesc_amd64_avx_linux;
50
a196ebeb
WT
51/* Defined in auto-generated file amd64-mpx-linux.c. */
52void init_registers_amd64_mpx_linux (void);
53extern const struct target_desc *tdesc_amd64_mpx_linux;
54
4d47af5c
L
55/* Defined in auto-generated file x32-linux.c. */
56void init_registers_x32_linux (void);
3aee8918
PA
57extern const struct target_desc *tdesc_x32_linux;
58
4d47af5c
L
59/* Defined in auto-generated file x32-avx-linux.c. */
60void init_registers_x32_avx_linux (void);
3aee8918 61extern const struct target_desc *tdesc_x32_avx_linux;
a196ebeb 62
3aee8918
PA
63#endif
64
65/* Defined in auto-generated file i386-linux.c. */
66void init_registers_i386_linux (void);
67extern const struct target_desc *tdesc_i386_linux;
68
69/* Defined in auto-generated file i386-mmx-linux.c. */
70void init_registers_i386_mmx_linux (void);
71extern const struct target_desc *tdesc_i386_mmx_linux;
72
73/* Defined in auto-generated file i386-avx-linux.c. */
74void init_registers_i386_avx_linux (void);
75extern const struct target_desc *tdesc_i386_avx_linux;
76
a196ebeb
WT
77/* Defined in auto-generated file i386-mpx-linux.c. */
78void init_registers_i386_mpx_linux (void);
79extern const struct target_desc *tdesc_i386_mpx_linux;
80
3aee8918
PA
81#ifdef __x86_64__
82static struct target_desc *tdesc_amd64_linux_no_xml;
83#endif
84static struct target_desc *tdesc_i386_linux_no_xml;
85
1570b33e 86
fa593d66 87static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 88static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 89
1570b33e
L
90/* Backward compatibility for gdb without XML support. */
91
92static const char *xmltarget_i386_linux_no_xml = "@<target>\
93<architecture>i386</architecture>\
94<osabi>GNU/Linux</osabi>\
95</target>";
f6d1620c
L
96
97#ifdef __x86_64__
1570b33e
L
98static const char *xmltarget_amd64_linux_no_xml = "@<target>\
99<architecture>i386:x86-64</architecture>\
100<osabi>GNU/Linux</osabi>\
101</target>";
f6d1620c 102#endif
d0722149
DE
103
104#include <sys/reg.h>
105#include <sys/procfs.h>
106#include <sys/ptrace.h>
1570b33e
L
107#include <sys/uio.h>
108
109#ifndef PTRACE_GETREGSET
110#define PTRACE_GETREGSET 0x4204
111#endif
112
113#ifndef PTRACE_SETREGSET
114#define PTRACE_SETREGSET 0x4205
115#endif
116
d0722149
DE
117
118#ifndef PTRACE_GET_THREAD_AREA
119#define PTRACE_GET_THREAD_AREA 25
120#endif
121
122/* This definition comes from prctl.h, but some kernels may not have it. */
123#ifndef PTRACE_ARCH_PRCTL
124#define PTRACE_ARCH_PRCTL 30
125#endif
126
127/* The following definitions come from prctl.h, but may be absent
128 for certain configurations. */
129#ifndef ARCH_GET_FS
130#define ARCH_SET_GS 0x1001
131#define ARCH_SET_FS 0x1002
132#define ARCH_GET_FS 0x1003
133#define ARCH_GET_GS 0x1004
134#endif
135
aa5ca48f
DE
136/* Per-process arch-specific data we want to keep. */
137
138struct arch_process_info
139{
140 struct i386_debug_reg_state debug_reg_state;
141};
142
143/* Per-thread arch-specific data we want to keep. */
144
145struct arch_lwp_info
146{
147 /* Non-zero if our copy differs from what's recorded in the thread. */
148 int debug_registers_changed;
149};
150
d0722149
DE
151#ifdef __x86_64__
152
153/* Mapping between the general-purpose registers in `struct user'
154 format and GDB's register array layout.
155 Note that the transfer layout uses 64-bit regs. */
156static /*const*/ int i386_regmap[] =
157{
158 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
159 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
160 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
161 DS * 8, ES * 8, FS * 8, GS * 8
162};
163
164#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
165
166/* So code below doesn't have to care, i386 or amd64. */
167#define ORIG_EAX ORIG_RAX
168
169static const int x86_64_regmap[] =
170{
171 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
172 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
173 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
174 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
175 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
176 DS * 8, ES * 8, FS * 8, GS * 8,
177 -1, -1, -1, -1, -1, -1, -1, -1,
178 -1, -1, -1, -1, -1, -1, -1, -1,
179 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
180 -1,
181 -1, -1, -1, -1, -1, -1, -1, -1,
182 ORIG_RAX * 8,
183 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
184 -1, -1 /* MPX registers BNDCFGU, BNDSTATUS. */
d0722149
DE
185};
186
187#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
188
189#else /* ! __x86_64__ */
190
191/* Mapping between the general-purpose registers in `struct user'
192 format and GDB's register array layout. */
193static /*const*/ int i386_regmap[] =
194{
195 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
196 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
197 EIP * 4, EFL * 4, CS * 4, SS * 4,
198 DS * 4, ES * 4, FS * 4, GS * 4
199};
200
201#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
202
203#endif
3aee8918
PA
204
205#ifdef __x86_64__
206
207/* Returns true if the current inferior belongs to a x86-64 process,
208 per the tdesc. */
209
210static int
211is_64bit_tdesc (void)
212{
213 struct regcache *regcache = get_thread_regcache (current_inferior, 0);
214
215 return register_size (regcache->tdesc, 0) == 8;
216}
217
218#endif
219
d0722149
DE
220\f
221/* Called by libthread_db. */
222
223ps_err_e
224ps_get_thread_area (const struct ps_prochandle *ph,
225 lwpid_t lwpid, int idx, void **base)
226{
227#ifdef __x86_64__
3aee8918 228 int use_64bit = is_64bit_tdesc ();
d0722149
DE
229
230 if (use_64bit)
231 {
232 switch (idx)
233 {
234 case FS:
235 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
236 return PS_OK;
237 break;
238 case GS:
239 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
240 return PS_OK;
241 break;
242 default:
243 return PS_BADADDR;
244 }
245 return PS_ERR;
246 }
247#endif
248
249 {
250 unsigned int desc[4];
251
252 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
253 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
254 return PS_ERR;
255
d1ec4ce7
DE
256 /* Ensure we properly extend the value to 64-bits for x86_64. */
257 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
258 return PS_OK;
259 }
260}
fa593d66
PA
261
262/* Get the thread area address. This is used to recognize which
263 thread is which when tracing with the in-process agent library. We
264 don't read anything from the address, and treat it as opaque; it's
265 the address itself that we assume is unique per-thread. */
266
267static int
268x86_get_thread_area (int lwpid, CORE_ADDR *addr)
269{
270#ifdef __x86_64__
3aee8918 271 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
272
273 if (use_64bit)
274 {
275 void *base;
276 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
277 {
278 *addr = (CORE_ADDR) (uintptr_t) base;
279 return 0;
280 }
281
282 return -1;
283 }
284#endif
285
286 {
287 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
288 struct regcache *regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
289 unsigned int desc[4];
290 ULONGEST gs = 0;
291 const int reg_thread_area = 3; /* bits to scale down register value. */
292 int idx;
293
294 collect_register_by_name (regcache, "gs", &gs);
295
296 idx = gs >> reg_thread_area;
297
298 if (ptrace (PTRACE_GET_THREAD_AREA,
493e2a69
MS
299 lwpid_of (lwp),
300 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
301 return -1;
302
303 *addr = desc[1];
304 return 0;
305 }
306}
307
308
d0722149
DE
309\f
310static int
3aee8918 311x86_cannot_store_register (int regno)
d0722149 312{
3aee8918
PA
313#ifdef __x86_64__
314 if (is_64bit_tdesc ())
315 return 0;
316#endif
317
d0722149
DE
318 return regno >= I386_NUM_REGS;
319}
320
321static int
3aee8918 322x86_cannot_fetch_register (int regno)
d0722149 323{
3aee8918
PA
324#ifdef __x86_64__
325 if (is_64bit_tdesc ())
326 return 0;
327#endif
328
d0722149
DE
329 return regno >= I386_NUM_REGS;
330}
331
332static void
442ea881 333x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
334{
335 int i;
336
337#ifdef __x86_64__
3aee8918 338 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
339 {
340 for (i = 0; i < X86_64_NUM_REGS; i++)
341 if (x86_64_regmap[i] != -1)
442ea881 342 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
343 return;
344 }
345#endif
346
347 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 348 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 349
442ea881
PA
350 collect_register_by_name (regcache, "orig_eax",
351 ((char *) buf) + ORIG_EAX * 4);
d0722149
DE
352}
353
354static void
442ea881 355x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
356{
357 int i;
358
359#ifdef __x86_64__
3aee8918 360 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
361 {
362 for (i = 0; i < X86_64_NUM_REGS; i++)
363 if (x86_64_regmap[i] != -1)
442ea881 364 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
365 return;
366 }
367#endif
368
369 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 370 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 371
442ea881
PA
372 supply_register_by_name (regcache, "orig_eax",
373 ((char *) buf) + ORIG_EAX * 4);
d0722149
DE
374}
375
376static void
442ea881 377x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
378{
379#ifdef __x86_64__
442ea881 380 i387_cache_to_fxsave (regcache, buf);
d0722149 381#else
442ea881 382 i387_cache_to_fsave (regcache, buf);
d0722149
DE
383#endif
384}
385
386static void
442ea881 387x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
388{
389#ifdef __x86_64__
442ea881 390 i387_fxsave_to_cache (regcache, buf);
d0722149 391#else
442ea881 392 i387_fsave_to_cache (regcache, buf);
d0722149
DE
393#endif
394}
395
396#ifndef __x86_64__
397
398static void
442ea881 399x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 400{
442ea881 401 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
402}
403
404static void
442ea881 405x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 406{
442ea881 407 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
408}
409
410#endif
411
1570b33e
L
412static void
413x86_fill_xstateregset (struct regcache *regcache, void *buf)
414{
415 i387_cache_to_xsave (regcache, buf);
416}
417
418static void
419x86_store_xstateregset (struct regcache *regcache, const void *buf)
420{
421 i387_xsave_to_cache (regcache, buf);
422}
423
d0722149
DE
424/* ??? The non-biarch i386 case stores all the i387 regs twice.
425 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
426 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
427 doesn't work. IWBN to avoid the duplication in the case where it
428 does work. Maybe the arch_setup routine could check whether it works
3aee8918 429 and update the supported regsets accordingly. */
d0722149 430
3aee8918 431static struct regset_info x86_regsets[] =
d0722149
DE
432{
433#ifdef HAVE_PTRACE_GETREGS
1570b33e 434 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
435 GENERAL_REGS,
436 x86_fill_gregset, x86_store_gregset },
1570b33e
L
437 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
438 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
439# ifndef __x86_64__
440# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 441 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
442 EXTENDED_REGS,
443 x86_fill_fpxregset, x86_store_fpxregset },
444# endif
445# endif
1570b33e 446 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
447 FP_REGS,
448 x86_fill_fpregset, x86_store_fpregset },
449#endif /* HAVE_PTRACE_GETREGS */
1570b33e 450 { 0, 0, 0, -1, -1, NULL, NULL }
d0722149
DE
451};
452
453static CORE_ADDR
442ea881 454x86_get_pc (struct regcache *regcache)
d0722149 455{
3aee8918 456 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
457
458 if (use_64bit)
459 {
460 unsigned long pc;
442ea881 461 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
462 return (CORE_ADDR) pc;
463 }
464 else
465 {
466 unsigned int pc;
442ea881 467 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
468 return (CORE_ADDR) pc;
469 }
470}
471
472static void
442ea881 473x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
d0722149 474{
3aee8918 475 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
476
477 if (use_64bit)
478 {
479 unsigned long newpc = pc;
442ea881 480 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
481 }
482 else
483 {
484 unsigned int newpc = pc;
442ea881 485 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
486 }
487}
488\f
489static const unsigned char x86_breakpoint[] = { 0xCC };
490#define x86_breakpoint_len 1
491
492static int
493x86_breakpoint_at (CORE_ADDR pc)
494{
495 unsigned char c;
496
fc7238bb 497 (*the_target->read_memory) (pc, &c, 1);
d0722149
DE
498 if (c == 0xCC)
499 return 1;
500
501 return 0;
502}
503\f
aa5ca48f
DE
504/* Support for debug registers. */
505
506static unsigned long
507x86_linux_dr_get (ptid_t ptid, int regnum)
508{
509 int tid;
510 unsigned long value;
511
512 tid = ptid_get_lwp (ptid);
513
514 errno = 0;
515 value = ptrace (PTRACE_PEEKUSER, tid,
516 offsetof (struct user, u_debugreg[regnum]), 0);
517 if (errno != 0)
518 error ("Couldn't read debug register");
519
520 return value;
521}
522
523static void
524x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
525{
526 int tid;
527
528 tid = ptid_get_lwp (ptid);
529
530 errno = 0;
531 ptrace (PTRACE_POKEUSER, tid,
532 offsetof (struct user, u_debugreg[regnum]), value);
533 if (errno != 0)
534 error ("Couldn't write debug register");
535}
536
964e4306
PA
537static int
538update_debug_registers_callback (struct inferior_list_entry *entry,
539 void *pid_p)
540{
541 struct lwp_info *lwp = (struct lwp_info *) entry;
542 int pid = *(int *) pid_p;
543
544 /* Only update the threads of this process. */
545 if (pid_of (lwp) == pid)
546 {
547 /* The actual update is done later just before resuming the lwp,
548 we just mark that the registers need updating. */
549 lwp->arch_private->debug_registers_changed = 1;
550
551 /* If the lwp isn't stopped, force it to momentarily pause, so
552 we can update its debug registers. */
553 if (!lwp->stopped)
554 linux_stop_lwp (lwp);
555 }
556
557 return 0;
558}
559
aa5ca48f
DE
560/* Update the inferior's debug register REGNUM from STATE. */
561
562void
563i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum)
564{
964e4306 565 /* Only update the threads of this process. */
aa5ca48f
DE
566 int pid = pid_of (get_thread_lwp (current_inferior));
567
568 if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
569 fatal ("Invalid debug register %d", regnum);
570
964e4306
PA
571 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
572}
aa5ca48f 573
964e4306 574/* Return the inferior's debug register REGNUM. */
aa5ca48f 575
964e4306
PA
576CORE_ADDR
577i386_dr_low_get_addr (int regnum)
578{
579 struct lwp_info *lwp = get_thread_lwp (current_inferior);
580 ptid_t ptid = ptid_of (lwp);
581
582 /* DR6 and DR7 are retrieved with some other way. */
0a5b1e09 583 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
964e4306
PA
584
585 return x86_linux_dr_get (ptid, regnum);
aa5ca48f
DE
586}
587
588/* Update the inferior's DR7 debug control register from STATE. */
589
590void
591i386_dr_low_set_control (const struct i386_debug_reg_state *state)
592{
964e4306 593 /* Only update the threads of this process. */
aa5ca48f
DE
594 int pid = pid_of (get_thread_lwp (current_inferior));
595
964e4306
PA
596 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
597}
aa5ca48f 598
964e4306
PA
599/* Return the inferior's DR7 debug control register. */
600
601unsigned
602i386_dr_low_get_control (void)
603{
604 struct lwp_info *lwp = get_thread_lwp (current_inferior);
605 ptid_t ptid = ptid_of (lwp);
606
607 return x86_linux_dr_get (ptid, DR_CONTROL);
aa5ca48f
DE
608}
609
610/* Get the value of the DR6 debug status register from the inferior
611 and record it in STATE. */
612
964e4306
PA
613unsigned
614i386_dr_low_get_status (void)
aa5ca48f
DE
615{
616 struct lwp_info *lwp = get_thread_lwp (current_inferior);
617 ptid_t ptid = ptid_of (lwp);
618
964e4306 619 return x86_linux_dr_get (ptid, DR_STATUS);
aa5ca48f
DE
620}
621\f
90d74c30 622/* Breakpoint/Watchpoint support. */
aa5ca48f
DE
623
624static int
625x86_insert_point (char type, CORE_ADDR addr, int len)
626{
627 struct process_info *proc = current_process ();
628 switch (type)
629 {
961bd387 630 case '0': /* software-breakpoint */
90d74c30
PA
631 {
632 int ret;
633
634 ret = prepare_to_access_memory ();
635 if (ret)
636 return -1;
637 ret = set_gdb_breakpoint_at (addr);
0146f85b 638 done_accessing_memory ();
90d74c30
PA
639 return ret;
640 }
961bd387
ME
641 case '1': /* hardware-breakpoint */
642 case '2': /* write watchpoint */
643 case '3': /* read watchpoint */
644 case '4': /* access watchpoint */
aa5ca48f
DE
645 return i386_low_insert_watchpoint (&proc->private->arch_private->debug_reg_state,
646 type, addr, len);
961bd387 647
aa5ca48f
DE
648 default:
649 /* Unsupported. */
650 return 1;
651 }
652}
653
654static int
655x86_remove_point (char type, CORE_ADDR addr, int len)
656{
657 struct process_info *proc = current_process ();
658 switch (type)
659 {
961bd387 660 case '0': /* software-breakpoint */
90d74c30
PA
661 {
662 int ret;
663
664 ret = prepare_to_access_memory ();
665 if (ret)
666 return -1;
667 ret = delete_gdb_breakpoint_at (addr);
0146f85b 668 done_accessing_memory ();
90d74c30
PA
669 return ret;
670 }
961bd387
ME
671 case '1': /* hardware-breakpoint */
672 case '2': /* write watchpoint */
673 case '3': /* read watchpoint */
674 case '4': /* access watchpoint */
aa5ca48f
DE
675 return i386_low_remove_watchpoint (&proc->private->arch_private->debug_reg_state,
676 type, addr, len);
677 default:
678 /* Unsupported. */
679 return 1;
680 }
681}
682
683static int
684x86_stopped_by_watchpoint (void)
685{
686 struct process_info *proc = current_process ();
687 return i386_low_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
688}
689
690static CORE_ADDR
691x86_stopped_data_address (void)
692{
693 struct process_info *proc = current_process ();
694 CORE_ADDR addr;
695 if (i386_low_stopped_data_address (&proc->private->arch_private->debug_reg_state,
696 &addr))
697 return addr;
698 return 0;
699}
700\f
701/* Called when a new process is created. */
702
703static struct arch_process_info *
704x86_linux_new_process (void)
705{
706 struct arch_process_info *info = xcalloc (1, sizeof (*info));
707
708 i386_low_init_dregs (&info->debug_reg_state);
709
710 return info;
711}
712
713/* Called when a new thread is detected. */
714
715static struct arch_lwp_info *
716x86_linux_new_thread (void)
717{
718 struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
719
720 info->debug_registers_changed = 1;
721
722 return info;
723}
724
725/* Called when resuming a thread.
726 If the debug regs have changed, update the thread's copies. */
727
728static void
729x86_linux_prepare_to_resume (struct lwp_info *lwp)
730{
b9a881c2 731 ptid_t ptid = ptid_of (lwp);
6210a125 732 int clear_status = 0;
b9a881c2 733
aa5ca48f
DE
734 if (lwp->arch_private->debug_registers_changed)
735 {
736 int i;
aa5ca48f
DE
737 int pid = ptid_get_pid (ptid);
738 struct process_info *proc = find_process_pid (pid);
493e2a69
MS
739 struct i386_debug_reg_state *state
740 = &proc->private->arch_private->debug_reg_state;
aa5ca48f
DE
741
742 for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
6210a125
PA
743 if (state->dr_ref_count[i] > 0)
744 {
745 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
746
747 /* If we're setting a watchpoint, any change the inferior
748 had done itself to the debug registers needs to be
749 discarded, otherwise, i386_low_stopped_data_address can
750 get confused. */
751 clear_status = 1;
752 }
aa5ca48f
DE
753
754 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
755
756 lwp->arch_private->debug_registers_changed = 0;
757 }
b9a881c2 758
6210a125 759 if (clear_status || lwp->stopped_by_watchpoint)
b9a881c2 760 x86_linux_dr_set (ptid, DR_STATUS, 0);
aa5ca48f
DE
761}
762\f
d0722149
DE
763/* When GDBSERVER is built as a 64-bit application on linux, the
764 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
765 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
766 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
767 conversion in-place ourselves. */
768
769/* These types below (compat_*) define a siginfo type that is layout
770 compatible with the siginfo type exported by the 32-bit userspace
771 support. */
772
773#ifdef __x86_64__
774
775typedef int compat_int_t;
776typedef unsigned int compat_uptr_t;
777
778typedef int compat_time_t;
779typedef int compat_timer_t;
780typedef int compat_clock_t;
781
782struct compat_timeval
783{
784 compat_time_t tv_sec;
785 int tv_usec;
786};
787
788typedef union compat_sigval
789{
790 compat_int_t sival_int;
791 compat_uptr_t sival_ptr;
792} compat_sigval_t;
793
794typedef struct compat_siginfo
795{
796 int si_signo;
797 int si_errno;
798 int si_code;
799
800 union
801 {
802 int _pad[((128 / sizeof (int)) - 3)];
803
804 /* kill() */
805 struct
806 {
807 unsigned int _pid;
808 unsigned int _uid;
809 } _kill;
810
811 /* POSIX.1b timers */
812 struct
813 {
814 compat_timer_t _tid;
815 int _overrun;
816 compat_sigval_t _sigval;
817 } _timer;
818
819 /* POSIX.1b signals */
820 struct
821 {
822 unsigned int _pid;
823 unsigned int _uid;
824 compat_sigval_t _sigval;
825 } _rt;
826
827 /* SIGCHLD */
828 struct
829 {
830 unsigned int _pid;
831 unsigned int _uid;
832 int _status;
833 compat_clock_t _utime;
834 compat_clock_t _stime;
835 } _sigchld;
836
837 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
838 struct
839 {
840 unsigned int _addr;
841 } _sigfault;
842
843 /* SIGPOLL */
844 struct
845 {
846 int _band;
847 int _fd;
848 } _sigpoll;
849 } _sifields;
850} compat_siginfo_t;
851
c92b5177
L
852/* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
853typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
854
855typedef struct compat_x32_siginfo
856{
857 int si_signo;
858 int si_errno;
859 int si_code;
860
861 union
862 {
863 int _pad[((128 / sizeof (int)) - 3)];
864
865 /* kill() */
866 struct
867 {
868 unsigned int _pid;
869 unsigned int _uid;
870 } _kill;
871
872 /* POSIX.1b timers */
873 struct
874 {
875 compat_timer_t _tid;
876 int _overrun;
877 compat_sigval_t _sigval;
878 } _timer;
879
880 /* POSIX.1b signals */
881 struct
882 {
883 unsigned int _pid;
884 unsigned int _uid;
885 compat_sigval_t _sigval;
886 } _rt;
887
888 /* SIGCHLD */
889 struct
890 {
891 unsigned int _pid;
892 unsigned int _uid;
893 int _status;
894 compat_x32_clock_t _utime;
895 compat_x32_clock_t _stime;
896 } _sigchld;
897
898 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
899 struct
900 {
901 unsigned int _addr;
902 } _sigfault;
903
904 /* SIGPOLL */
905 struct
906 {
907 int _band;
908 int _fd;
909 } _sigpoll;
910 } _sifields;
911} compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
912
d0722149
DE
913#define cpt_si_pid _sifields._kill._pid
914#define cpt_si_uid _sifields._kill._uid
915#define cpt_si_timerid _sifields._timer._tid
916#define cpt_si_overrun _sifields._timer._overrun
917#define cpt_si_status _sifields._sigchld._status
918#define cpt_si_utime _sifields._sigchld._utime
919#define cpt_si_stime _sifields._sigchld._stime
920#define cpt_si_ptr _sifields._rt._sigval.sival_ptr
921#define cpt_si_addr _sifields._sigfault._addr
922#define cpt_si_band _sifields._sigpoll._band
923#define cpt_si_fd _sifields._sigpoll._fd
924
925/* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
926 In their place is si_timer1,si_timer2. */
927#ifndef si_timerid
928#define si_timerid si_timer1
929#endif
930#ifndef si_overrun
931#define si_overrun si_timer2
932#endif
933
934static void
935compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
936{
937 memset (to, 0, sizeof (*to));
938
939 to->si_signo = from->si_signo;
940 to->si_errno = from->si_errno;
941 to->si_code = from->si_code;
942
b53a1623 943 if (to->si_code == SI_TIMER)
d0722149 944 {
b53a1623
PA
945 to->cpt_si_timerid = from->si_timerid;
946 to->cpt_si_overrun = from->si_overrun;
d0722149
DE
947 to->cpt_si_ptr = (intptr_t) from->si_ptr;
948 }
949 else if (to->si_code == SI_USER)
950 {
951 to->cpt_si_pid = from->si_pid;
952 to->cpt_si_uid = from->si_uid;
953 }
b53a1623 954 else if (to->si_code < 0)
d0722149 955 {
b53a1623
PA
956 to->cpt_si_pid = from->si_pid;
957 to->cpt_si_uid = from->si_uid;
d0722149
DE
958 to->cpt_si_ptr = (intptr_t) from->si_ptr;
959 }
960 else
961 {
962 switch (to->si_signo)
963 {
964 case SIGCHLD:
965 to->cpt_si_pid = from->si_pid;
966 to->cpt_si_uid = from->si_uid;
967 to->cpt_si_status = from->si_status;
968 to->cpt_si_utime = from->si_utime;
969 to->cpt_si_stime = from->si_stime;
970 break;
971 case SIGILL:
972 case SIGFPE:
973 case SIGSEGV:
974 case SIGBUS:
975 to->cpt_si_addr = (intptr_t) from->si_addr;
976 break;
977 case SIGPOLL:
978 to->cpt_si_band = from->si_band;
979 to->cpt_si_fd = from->si_fd;
980 break;
981 default:
982 to->cpt_si_pid = from->si_pid;
983 to->cpt_si_uid = from->si_uid;
984 to->cpt_si_ptr = (intptr_t) from->si_ptr;
985 break;
986 }
987 }
988}
989
990static void
991siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
992{
993 memset (to, 0, sizeof (*to));
994
995 to->si_signo = from->si_signo;
996 to->si_errno = from->si_errno;
997 to->si_code = from->si_code;
998
b53a1623 999 if (to->si_code == SI_TIMER)
d0722149 1000 {
b53a1623
PA
1001 to->si_timerid = from->cpt_si_timerid;
1002 to->si_overrun = from->cpt_si_overrun;
d0722149
DE
1003 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1004 }
1005 else if (to->si_code == SI_USER)
1006 {
1007 to->si_pid = from->cpt_si_pid;
1008 to->si_uid = from->cpt_si_uid;
1009 }
b53a1623 1010 else if (to->si_code < 0)
d0722149 1011 {
b53a1623
PA
1012 to->si_pid = from->cpt_si_pid;
1013 to->si_uid = from->cpt_si_uid;
d0722149
DE
1014 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1015 }
1016 else
1017 {
1018 switch (to->si_signo)
1019 {
1020 case SIGCHLD:
1021 to->si_pid = from->cpt_si_pid;
1022 to->si_uid = from->cpt_si_uid;
1023 to->si_status = from->cpt_si_status;
1024 to->si_utime = from->cpt_si_utime;
1025 to->si_stime = from->cpt_si_stime;
1026 break;
1027 case SIGILL:
1028 case SIGFPE:
1029 case SIGSEGV:
1030 case SIGBUS:
1031 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1032 break;
1033 case SIGPOLL:
1034 to->si_band = from->cpt_si_band;
1035 to->si_fd = from->cpt_si_fd;
1036 break;
1037 default:
1038 to->si_pid = from->cpt_si_pid;
1039 to->si_uid = from->cpt_si_uid;
1040 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1041 break;
1042 }
1043 }
1044}
1045
c92b5177
L
1046static void
1047compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
1048 siginfo_t *from)
1049{
1050 memset (to, 0, sizeof (*to));
1051
1052 to->si_signo = from->si_signo;
1053 to->si_errno = from->si_errno;
1054 to->si_code = from->si_code;
1055
1056 if (to->si_code == SI_TIMER)
1057 {
1058 to->cpt_si_timerid = from->si_timerid;
1059 to->cpt_si_overrun = from->si_overrun;
1060 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1061 }
1062 else if (to->si_code == SI_USER)
1063 {
1064 to->cpt_si_pid = from->si_pid;
1065 to->cpt_si_uid = from->si_uid;
1066 }
1067 else if (to->si_code < 0)
1068 {
1069 to->cpt_si_pid = from->si_pid;
1070 to->cpt_si_uid = from->si_uid;
1071 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1072 }
1073 else
1074 {
1075 switch (to->si_signo)
1076 {
1077 case SIGCHLD:
1078 to->cpt_si_pid = from->si_pid;
1079 to->cpt_si_uid = from->si_uid;
1080 to->cpt_si_status = from->si_status;
1081 to->cpt_si_utime = from->si_utime;
1082 to->cpt_si_stime = from->si_stime;
1083 break;
1084 case SIGILL:
1085 case SIGFPE:
1086 case SIGSEGV:
1087 case SIGBUS:
1088 to->cpt_si_addr = (intptr_t) from->si_addr;
1089 break;
1090 case SIGPOLL:
1091 to->cpt_si_band = from->si_band;
1092 to->cpt_si_fd = from->si_fd;
1093 break;
1094 default:
1095 to->cpt_si_pid = from->si_pid;
1096 to->cpt_si_uid = from->si_uid;
1097 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1098 break;
1099 }
1100 }
1101}
1102
1103static void
1104siginfo_from_compat_x32_siginfo (siginfo_t *to,
1105 compat_x32_siginfo_t *from)
1106{
1107 memset (to, 0, sizeof (*to));
1108
1109 to->si_signo = from->si_signo;
1110 to->si_errno = from->si_errno;
1111 to->si_code = from->si_code;
1112
1113 if (to->si_code == SI_TIMER)
1114 {
1115 to->si_timerid = from->cpt_si_timerid;
1116 to->si_overrun = from->cpt_si_overrun;
1117 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1118 }
1119 else if (to->si_code == SI_USER)
1120 {
1121 to->si_pid = from->cpt_si_pid;
1122 to->si_uid = from->cpt_si_uid;
1123 }
1124 else if (to->si_code < 0)
1125 {
1126 to->si_pid = from->cpt_si_pid;
1127 to->si_uid = from->cpt_si_uid;
1128 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1129 }
1130 else
1131 {
1132 switch (to->si_signo)
1133 {
1134 case SIGCHLD:
1135 to->si_pid = from->cpt_si_pid;
1136 to->si_uid = from->cpt_si_uid;
1137 to->si_status = from->cpt_si_status;
1138 to->si_utime = from->cpt_si_utime;
1139 to->si_stime = from->cpt_si_stime;
1140 break;
1141 case SIGILL:
1142 case SIGFPE:
1143 case SIGSEGV:
1144 case SIGBUS:
1145 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1146 break;
1147 case SIGPOLL:
1148 to->si_band = from->cpt_si_band;
1149 to->si_fd = from->cpt_si_fd;
1150 break;
1151 default:
1152 to->si_pid = from->cpt_si_pid;
1153 to->si_uid = from->cpt_si_uid;
1154 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1155 break;
1156 }
1157 }
1158}
1159
d0722149
DE
1160#endif /* __x86_64__ */
1161
1162/* Convert a native/host siginfo object, into/from the siginfo in the
1163 layout of the inferiors' architecture. Returns true if any
1164 conversion was done; false otherwise. If DIRECTION is 1, then copy
1165 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1166 INF. */
1167
1168static int
a5362b9a 1169x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
d0722149
DE
1170{
1171#ifdef __x86_64__
760256f9
PA
1172 unsigned int machine;
1173 int tid = lwpid_of (get_thread_lwp (current_inferior));
1174 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1175
d0722149 1176 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 1177 if (!is_64bit_tdesc ())
d0722149 1178 {
a5362b9a 1179 if (sizeof (siginfo_t) != sizeof (compat_siginfo_t))
9f1036c1 1180 fatal ("unexpected difference in siginfo");
d0722149
DE
1181
1182 if (direction == 0)
1183 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
1184 else
1185 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
1186
c92b5177
L
1187 return 1;
1188 }
1189 /* No fixup for native x32 GDB. */
760256f9 1190 else if (!is_elf64 && sizeof (void *) == 8)
c92b5177
L
1191 {
1192 if (sizeof (siginfo_t) != sizeof (compat_x32_siginfo_t))
1193 fatal ("unexpected difference in siginfo");
1194
1195 if (direction == 0)
1196 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
1197 native);
1198 else
1199 siginfo_from_compat_x32_siginfo (native,
1200 (struct compat_x32_siginfo *) inf);
1201
d0722149
DE
1202 return 1;
1203 }
1204#endif
1205
1206 return 0;
1207}
1208\f
1570b33e
L
1209static int use_xml;
1210
3aee8918
PA
1211/* Format of XSAVE extended state is:
1212 struct
1213 {
1214 fxsave_bytes[0..463]
1215 sw_usable_bytes[464..511]
1216 xstate_hdr_bytes[512..575]
1217 avx_bytes[576..831]
1218 future_state etc
1219 };
1220
1221 Same memory layout will be used for the coredump NT_X86_XSTATE
1222 representing the XSAVE extended state registers.
1223
1224 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1225 extended state mask, which is the same as the extended control register
1226 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1227 together with the mask saved in the xstate_hdr_bytes to determine what
1228 states the processor/OS supports and what state, used or initialized,
1229 the process/thread is in. */
1230#define I386_LINUX_XSAVE_XCR0_OFFSET 464
1231
1232/* Does the current host support the GETFPXREGS request? The header
1233 file may or may not define it, and even if it is defined, the
1234 kernel will return EIO if it's running on a pre-SSE processor. */
1235int have_ptrace_getfpxregs =
1236#ifdef HAVE_PTRACE_GETFPXREGS
1237 -1
1238#else
1239 0
1240#endif
1241;
1570b33e 1242
3aee8918
PA
1243/* Does the current host support PTRACE_GETREGSET? */
1244static int have_ptrace_getregset = -1;
1245
1246/* Get Linux/x86 target description from running target. */
1247
1248static const struct target_desc *
1249x86_linux_read_description (void)
1570b33e 1250{
3aee8918
PA
1251 unsigned int machine;
1252 int is_elf64;
a196ebeb 1253 int xcr0_features;
3aee8918
PA
1254 int tid;
1255 static uint64_t xcr0;
3a13a53b 1256 struct regset_info *regset;
1570b33e 1257
3aee8918 1258 tid = lwpid_of (get_thread_lwp (current_inferior));
1570b33e 1259
3aee8918 1260 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 1261
3aee8918 1262 if (sizeof (void *) == 4)
3a13a53b 1263 {
3aee8918
PA
1264 if (is_elf64 > 0)
1265 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1266#ifndef __x86_64__
1267 else if (machine == EM_X86_64)
1268 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1269#endif
1270 }
3a13a53b 1271
3aee8918
PA
1272#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1273 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
1274 {
1275 elf_fpxregset_t fpxregs;
3a13a53b 1276
3aee8918 1277 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 1278 {
3aee8918
PA
1279 have_ptrace_getfpxregs = 0;
1280 have_ptrace_getregset = 0;
1281 return tdesc_i386_mmx_linux;
3a13a53b 1282 }
3aee8918
PA
1283 else
1284 have_ptrace_getfpxregs = 1;
3a13a53b 1285 }
1570b33e
L
1286#endif
1287
1288 if (!use_xml)
1289 {
3aee8918
PA
1290 x86_xcr0 = I386_XSTATE_SSE_MASK;
1291
1570b33e
L
1292 /* Don't use XML. */
1293#ifdef __x86_64__
3aee8918
PA
1294 if (machine == EM_X86_64)
1295 return tdesc_amd64_linux_no_xml;
1570b33e 1296 else
1570b33e 1297#endif
3aee8918 1298 return tdesc_i386_linux_no_xml;
1570b33e
L
1299 }
1300
1570b33e
L
1301 if (have_ptrace_getregset == -1)
1302 {
3aee8918 1303 uint64_t xstateregs[(I386_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 1304 struct iovec iov;
1570b33e
L
1305
1306 iov.iov_base = xstateregs;
1307 iov.iov_len = sizeof (xstateregs);
1308
1309 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
1310 if (ptrace (PTRACE_GETREGSET, tid,
1311 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
1312 have_ptrace_getregset = 0;
1313 else
1570b33e 1314 {
3aee8918
PA
1315 have_ptrace_getregset = 1;
1316
1317 /* Get XCR0 from XSAVE extended state. */
1318 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
1319 / sizeof (uint64_t))];
1320
1321 /* Use PTRACE_GETREGSET if it is available. */
1322 for (regset = x86_regsets;
1323 regset->fill_function != NULL; regset++)
1324 if (regset->get_request == PTRACE_GETREGSET)
1325 regset->size = I386_XSTATE_SIZE (xcr0);
1326 else if (regset->type != GENERAL_REGS)
1327 regset->size = 0;
1570b33e 1328 }
1570b33e
L
1329 }
1330
3aee8918 1331 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb
WT
1332 xcr0_features = (have_ptrace_getregset
1333 && (xcr0 & I386_XSTATE_ALL_MASK));
3aee8918 1334
a196ebeb 1335 if (xcr0_features)
3aee8918 1336 x86_xcr0 = xcr0;
1570b33e 1337
3aee8918
PA
1338 if (machine == EM_X86_64)
1339 {
1570b33e 1340#ifdef __x86_64__
a196ebeb 1341 if (is_elf64)
3aee8918 1342 {
a196ebeb
WT
1343 if (xcr0_features)
1344 {
1345 switch (xcr0 & I386_XSTATE_ALL_MASK)
1346 {
1347 case I386_XSTATE_MPX_MASK:
1348 return tdesc_amd64_mpx_linux;
1349
1350 case I386_XSTATE_AVX_MASK:
1351 return tdesc_amd64_avx_linux;
1352
1353 default:
1354 return tdesc_amd64_linux;
1355 }
1356 }
4d47af5c 1357 else
a196ebeb 1358 return tdesc_amd64_linux;
3aee8918
PA
1359 }
1360 else
1361 {
a196ebeb
WT
1362 if (xcr0_features)
1363 {
1364 switch (xcr0 & I386_XSTATE_ALL_MASK)
1365 {
1366 case I386_XSTATE_MPX_MASK: /* No MPX on x32. */
1367 case I386_XSTATE_AVX_MASK:
1368 return tdesc_x32_avx_linux;
1369
1370 default:
1371 return tdesc_x32_linux;
1372 }
1373 }
3aee8918 1374 else
a196ebeb 1375 return tdesc_x32_linux;
1570b33e 1376 }
3aee8918 1377#endif
1570b33e 1378 }
3aee8918
PA
1379 else
1380 {
a196ebeb
WT
1381 if (xcr0_features)
1382 {
1383 switch (xcr0 & I386_XSTATE_ALL_MASK)
1384 {
1385 case (I386_XSTATE_MPX_MASK):
1386 return tdesc_i386_mpx_linux;
1387
1388 case (I386_XSTATE_AVX_MASK):
1389 return tdesc_i386_avx_linux;
1390
1391 default:
1392 return tdesc_i386_linux;
1393 }
1394 }
3aee8918
PA
1395 else
1396 return tdesc_i386_linux;
1397 }
1398
1399 gdb_assert_not_reached ("failed to return tdesc");
1400}
1401
1402/* Callback for find_inferior. Stops iteration when a thread with a
1403 given PID is found. */
1404
1405static int
1406same_process_callback (struct inferior_list_entry *entry, void *data)
1407{
1408 int pid = *(int *) data;
1409
1410 return (ptid_get_pid (entry->id) == pid);
1411}
1412
1413/* Callback for for_each_inferior. Calls the arch_setup routine for
1414 each process. */
1415
1416static void
1417x86_arch_setup_process_callback (struct inferior_list_entry *entry)
1418{
1419 int pid = ptid_get_pid (entry->id);
1420
1421 /* Look up any thread of this processes. */
1422 current_inferior
1423 = (struct thread_info *) find_inferior (&all_threads,
1424 same_process_callback, &pid);
1425
1426 the_low_target.arch_setup ();
1427}
1428
1429/* Update all the target description of all processes; a new GDB
1430 connected, and it may or not support xml target descriptions. */
1431
1432static void
1433x86_linux_update_xmltarget (void)
1434{
1435 struct thread_info *save_inferior = current_inferior;
1436
1437 /* Before changing the register cache's internal layout, flush the
1438 contents of the current valid caches back to the threads, and
1439 release the current regcache objects. */
1440 regcache_release ();
1441
1442 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
1443
1444 current_inferior = save_inferior;
1570b33e
L
1445}
1446
1447/* Process qSupported query, "xmlRegisters=". Update the buffer size for
1448 PTRACE_GETREGSET. */
1449
1450static void
1451x86_linux_process_qsupported (const char *query)
1452{
1453 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1454 with "i386" in qSupported query, it supports x86 XML target
1455 descriptions. */
1456 use_xml = 0;
1457 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1458 {
1459 char *copy = xstrdup (query + 13);
1460 char *p;
1461
1462 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1463 {
1464 if (strcmp (p, "i386") == 0)
1465 {
1466 use_xml = 1;
1467 break;
1468 }
1469 }
1470
1471 free (copy);
1472 }
1473
1474 x86_linux_update_xmltarget ();
1475}
1476
3aee8918 1477/* Common for x86/x86-64. */
d0722149 1478
3aee8918
PA
1479static struct regsets_info x86_regsets_info =
1480 {
1481 x86_regsets, /* regsets */
1482 0, /* num_regsets */
1483 NULL, /* disabled_regsets */
1484 };
214d508e
L
1485
1486#ifdef __x86_64__
3aee8918
PA
1487static struct regs_info amd64_linux_regs_info =
1488 {
1489 NULL, /* regset_bitmap */
1490 NULL, /* usrregs_info */
1491 &x86_regsets_info
1492 };
d0722149 1493#endif
3aee8918
PA
1494static struct usrregs_info i386_linux_usrregs_info =
1495 {
1496 I386_NUM_REGS,
1497 i386_regmap,
1498 };
d0722149 1499
3aee8918
PA
1500static struct regs_info i386_linux_regs_info =
1501 {
1502 NULL, /* regset_bitmap */
1503 &i386_linux_usrregs_info,
1504 &x86_regsets_info
1505 };
d0722149 1506
3aee8918
PA
1507const struct regs_info *
1508x86_linux_regs_info (void)
1509{
1510#ifdef __x86_64__
1511 if (is_64bit_tdesc ())
1512 return &amd64_linux_regs_info;
1513 else
1514#endif
1515 return &i386_linux_regs_info;
1516}
d0722149 1517
3aee8918
PA
1518/* Initialize the target description for the architecture of the
1519 inferior. */
1570b33e 1520
3aee8918
PA
1521static void
1522x86_arch_setup (void)
1523{
1524 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1525}
1526
219f2f23
PA
1527static int
1528x86_supports_tracepoints (void)
1529{
1530 return 1;
1531}
1532
fa593d66
PA
1533static void
1534append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1535{
1536 write_inferior_memory (*to, buf, len);
1537 *to += len;
1538}
1539
1540static int
1541push_opcode (unsigned char *buf, char *op)
1542{
1543 unsigned char *buf_org = buf;
1544
1545 while (1)
1546 {
1547 char *endptr;
1548 unsigned long ul = strtoul (op, &endptr, 16);
1549
1550 if (endptr == op)
1551 break;
1552
1553 *buf++ = ul;
1554 op = endptr;
1555 }
1556
1557 return buf - buf_org;
1558}
1559
1560#ifdef __x86_64__
1561
1562/* Build a jump pad that saves registers and calls a collection
1563 function. Writes a jump instruction to the jump pad to
1564 JJUMPAD_INSN. The caller is responsible to write it in at the
1565 tracepoint address. */
1566
1567static int
1568amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1569 CORE_ADDR collector,
1570 CORE_ADDR lockaddr,
1571 ULONGEST orig_size,
1572 CORE_ADDR *jump_entry,
405f8e94
SS
1573 CORE_ADDR *trampoline,
1574 ULONGEST *trampoline_size,
fa593d66
PA
1575 unsigned char *jjump_pad_insn,
1576 ULONGEST *jjump_pad_insn_size,
1577 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1578 CORE_ADDR *adjusted_insn_addr_end,
1579 char *err)
fa593d66
PA
1580{
1581 unsigned char buf[40];
1582 int i, offset;
f4647387
YQ
1583 int64_t loffset;
1584
fa593d66
PA
1585 CORE_ADDR buildaddr = *jump_entry;
1586
1587 /* Build the jump pad. */
1588
1589 /* First, do tracepoint data collection. Save registers. */
1590 i = 0;
1591 /* Need to ensure stack pointer saved first. */
1592 buf[i++] = 0x54; /* push %rsp */
1593 buf[i++] = 0x55; /* push %rbp */
1594 buf[i++] = 0x57; /* push %rdi */
1595 buf[i++] = 0x56; /* push %rsi */
1596 buf[i++] = 0x52; /* push %rdx */
1597 buf[i++] = 0x51; /* push %rcx */
1598 buf[i++] = 0x53; /* push %rbx */
1599 buf[i++] = 0x50; /* push %rax */
1600 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1601 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1602 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1603 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1604 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1605 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1606 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1607 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1608 buf[i++] = 0x9c; /* pushfq */
1609 buf[i++] = 0x48; /* movl <addr>,%rdi */
1610 buf[i++] = 0xbf;
1611 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1612 i += sizeof (unsigned long);
1613 buf[i++] = 0x57; /* push %rdi */
1614 append_insns (&buildaddr, i, buf);
1615
1616 /* Stack space for the collecting_t object. */
1617 i = 0;
1618 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1619 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1620 memcpy (buf + i, &tpoint, 8);
1621 i += 8;
1622 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1623 i += push_opcode (&buf[i],
1624 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1625 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1626 append_insns (&buildaddr, i, buf);
1627
1628 /* spin-lock. */
1629 i = 0;
1630 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1631 memcpy (&buf[i], (void *) &lockaddr, 8);
1632 i += 8;
1633 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1634 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1635 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1636 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1637 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1638 append_insns (&buildaddr, i, buf);
1639
1640 /* Set up the gdb_collect call. */
1641 /* At this point, (stack pointer + 0x18) is the base of our saved
1642 register block. */
1643
1644 i = 0;
1645 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1646 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1647
1648 /* tpoint address may be 64-bit wide. */
1649 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1650 memcpy (buf + i, &tpoint, 8);
1651 i += 8;
1652 append_insns (&buildaddr, i, buf);
1653
1654 /* The collector function being in the shared library, may be
1655 >31-bits away off the jump pad. */
1656 i = 0;
1657 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1658 memcpy (buf + i, &collector, 8);
1659 i += 8;
1660 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1661 append_insns (&buildaddr, i, buf);
1662
1663 /* Clear the spin-lock. */
1664 i = 0;
1665 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1666 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1667 memcpy (buf + i, &lockaddr, 8);
1668 i += 8;
1669 append_insns (&buildaddr, i, buf);
1670
1671 /* Remove stack that had been used for the collect_t object. */
1672 i = 0;
1673 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1674 append_insns (&buildaddr, i, buf);
1675
1676 /* Restore register state. */
1677 i = 0;
1678 buf[i++] = 0x48; /* add $0x8,%rsp */
1679 buf[i++] = 0x83;
1680 buf[i++] = 0xc4;
1681 buf[i++] = 0x08;
1682 buf[i++] = 0x9d; /* popfq */
1683 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1684 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1685 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1686 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1687 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1688 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1689 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1690 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1691 buf[i++] = 0x58; /* pop %rax */
1692 buf[i++] = 0x5b; /* pop %rbx */
1693 buf[i++] = 0x59; /* pop %rcx */
1694 buf[i++] = 0x5a; /* pop %rdx */
1695 buf[i++] = 0x5e; /* pop %rsi */
1696 buf[i++] = 0x5f; /* pop %rdi */
1697 buf[i++] = 0x5d; /* pop %rbp */
1698 buf[i++] = 0x5c; /* pop %rsp */
1699 append_insns (&buildaddr, i, buf);
1700
1701 /* Now, adjust the original instruction to execute in the jump
1702 pad. */
1703 *adjusted_insn_addr = buildaddr;
1704 relocate_instruction (&buildaddr, tpaddr);
1705 *adjusted_insn_addr_end = buildaddr;
1706
1707 /* Finally, write a jump back to the program. */
f4647387
YQ
1708
1709 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1710 if (loffset > INT_MAX || loffset < INT_MIN)
1711 {
1712 sprintf (err,
1713 "E.Jump back from jump pad too far from tracepoint "
1714 "(offset 0x%" PRIx64 " > int32).", loffset);
1715 return 1;
1716 }
1717
1718 offset = (int) loffset;
fa593d66
PA
1719 memcpy (buf, jump_insn, sizeof (jump_insn));
1720 memcpy (buf + 1, &offset, 4);
1721 append_insns (&buildaddr, sizeof (jump_insn), buf);
1722
1723 /* The jump pad is now built. Wire in a jump to our jump pad. This
1724 is always done last (by our caller actually), so that we can
1725 install fast tracepoints with threads running. This relies on
1726 the agent's atomic write support. */
f4647387
YQ
1727 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1728 if (loffset > INT_MAX || loffset < INT_MIN)
1729 {
1730 sprintf (err,
1731 "E.Jump pad too far from tracepoint "
1732 "(offset 0x%" PRIx64 " > int32).", loffset);
1733 return 1;
1734 }
1735
1736 offset = (int) loffset;
1737
fa593d66
PA
1738 memcpy (buf, jump_insn, sizeof (jump_insn));
1739 memcpy (buf + 1, &offset, 4);
1740 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1741 *jjump_pad_insn_size = sizeof (jump_insn);
1742
1743 /* Return the end address of our pad. */
1744 *jump_entry = buildaddr;
1745
1746 return 0;
1747}
1748
1749#endif /* __x86_64__ */
1750
1751/* Build a jump pad that saves registers and calls a collection
1752 function. Writes a jump instruction to the jump pad to
1753 JJUMPAD_INSN. The caller is responsible to write it in at the
1754 tracepoint address. */
1755
1756static int
1757i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1758 CORE_ADDR collector,
1759 CORE_ADDR lockaddr,
1760 ULONGEST orig_size,
1761 CORE_ADDR *jump_entry,
405f8e94
SS
1762 CORE_ADDR *trampoline,
1763 ULONGEST *trampoline_size,
fa593d66
PA
1764 unsigned char *jjump_pad_insn,
1765 ULONGEST *jjump_pad_insn_size,
1766 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1767 CORE_ADDR *adjusted_insn_addr_end,
1768 char *err)
fa593d66
PA
1769{
1770 unsigned char buf[0x100];
1771 int i, offset;
1772 CORE_ADDR buildaddr = *jump_entry;
1773
1774 /* Build the jump pad. */
1775
1776 /* First, do tracepoint data collection. Save registers. */
1777 i = 0;
1778 buf[i++] = 0x60; /* pushad */
1779 buf[i++] = 0x68; /* push tpaddr aka $pc */
1780 *((int *)(buf + i)) = (int) tpaddr;
1781 i += 4;
1782 buf[i++] = 0x9c; /* pushf */
1783 buf[i++] = 0x1e; /* push %ds */
1784 buf[i++] = 0x06; /* push %es */
1785 buf[i++] = 0x0f; /* push %fs */
1786 buf[i++] = 0xa0;
1787 buf[i++] = 0x0f; /* push %gs */
1788 buf[i++] = 0xa8;
1789 buf[i++] = 0x16; /* push %ss */
1790 buf[i++] = 0x0e; /* push %cs */
1791 append_insns (&buildaddr, i, buf);
1792
1793 /* Stack space for the collecting_t object. */
1794 i = 0;
1795 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1796
1797 /* Build the object. */
1798 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1799 memcpy (buf + i, &tpoint, 4);
1800 i += 4;
1801 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1802
1803 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1804 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1805 append_insns (&buildaddr, i, buf);
1806
1807 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1808 If we cared for it, this could be using xchg alternatively. */
1809
1810 i = 0;
1811 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1812 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1813 %esp,<lockaddr> */
1814 memcpy (&buf[i], (void *) &lockaddr, 4);
1815 i += 4;
1816 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1817 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1818 append_insns (&buildaddr, i, buf);
1819
1820
1821 /* Set up arguments to the gdb_collect call. */
1822 i = 0;
1823 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1824 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1825 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1826 append_insns (&buildaddr, i, buf);
1827
1828 i = 0;
1829 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1830 append_insns (&buildaddr, i, buf);
1831
1832 i = 0;
1833 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1834 memcpy (&buf[i], (void *) &tpoint, 4);
1835 i += 4;
1836 append_insns (&buildaddr, i, buf);
1837
1838 buf[0] = 0xe8; /* call <reladdr> */
1839 offset = collector - (buildaddr + sizeof (jump_insn));
1840 memcpy (buf + 1, &offset, 4);
1841 append_insns (&buildaddr, 5, buf);
1842 /* Clean up after the call. */
1843 buf[0] = 0x83; /* add $0x8,%esp */
1844 buf[1] = 0xc4;
1845 buf[2] = 0x08;
1846 append_insns (&buildaddr, 3, buf);
1847
1848
1849 /* Clear the spin-lock. This would need the LOCK prefix on older
1850 broken archs. */
1851 i = 0;
1852 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1853 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1854 memcpy (buf + i, &lockaddr, 4);
1855 i += 4;
1856 append_insns (&buildaddr, i, buf);
1857
1858
1859 /* Remove stack that had been used for the collect_t object. */
1860 i = 0;
1861 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1862 append_insns (&buildaddr, i, buf);
1863
1864 i = 0;
1865 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1866 buf[i++] = 0xc4;
1867 buf[i++] = 0x04;
1868 buf[i++] = 0x17; /* pop %ss */
1869 buf[i++] = 0x0f; /* pop %gs */
1870 buf[i++] = 0xa9;
1871 buf[i++] = 0x0f; /* pop %fs */
1872 buf[i++] = 0xa1;
1873 buf[i++] = 0x07; /* pop %es */
405f8e94 1874 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1875 buf[i++] = 0x9d; /* popf */
1876 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1877 buf[i++] = 0xc4;
1878 buf[i++] = 0x04;
1879 buf[i++] = 0x61; /* popad */
1880 append_insns (&buildaddr, i, buf);
1881
1882 /* Now, adjust the original instruction to execute in the jump
1883 pad. */
1884 *adjusted_insn_addr = buildaddr;
1885 relocate_instruction (&buildaddr, tpaddr);
1886 *adjusted_insn_addr_end = buildaddr;
1887
1888 /* Write the jump back to the program. */
1889 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1890 memcpy (buf, jump_insn, sizeof (jump_insn));
1891 memcpy (buf + 1, &offset, 4);
1892 append_insns (&buildaddr, sizeof (jump_insn), buf);
1893
1894 /* The jump pad is now built. Wire in a jump to our jump pad. This
1895 is always done last (by our caller actually), so that we can
1896 install fast tracepoints with threads running. This relies on
1897 the agent's atomic write support. */
405f8e94
SS
1898 if (orig_size == 4)
1899 {
1900 /* Create a trampoline. */
1901 *trampoline_size = sizeof (jump_insn);
1902 if (!claim_trampoline_space (*trampoline_size, trampoline))
1903 {
1904 /* No trampoline space available. */
1905 strcpy (err,
1906 "E.Cannot allocate trampoline space needed for fast "
1907 "tracepoints on 4-byte instructions.");
1908 return 1;
1909 }
1910
1911 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1912 memcpy (buf, jump_insn, sizeof (jump_insn));
1913 memcpy (buf + 1, &offset, 4);
1914 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1915
1916 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1917 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1918 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1919 memcpy (buf + 2, &offset, 2);
1920 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1921 *jjump_pad_insn_size = sizeof (small_jump_insn);
1922 }
1923 else
1924 {
1925 /* Else use a 32-bit relative jump instruction. */
1926 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1927 memcpy (buf, jump_insn, sizeof (jump_insn));
1928 memcpy (buf + 1, &offset, 4);
1929 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1930 *jjump_pad_insn_size = sizeof (jump_insn);
1931 }
fa593d66
PA
1932
1933 /* Return the end address of our pad. */
1934 *jump_entry = buildaddr;
1935
1936 return 0;
1937}
1938
1939static int
1940x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1941 CORE_ADDR collector,
1942 CORE_ADDR lockaddr,
1943 ULONGEST orig_size,
1944 CORE_ADDR *jump_entry,
405f8e94
SS
1945 CORE_ADDR *trampoline,
1946 ULONGEST *trampoline_size,
fa593d66
PA
1947 unsigned char *jjump_pad_insn,
1948 ULONGEST *jjump_pad_insn_size,
1949 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1950 CORE_ADDR *adjusted_insn_addr_end,
1951 char *err)
fa593d66
PA
1952{
1953#ifdef __x86_64__
3aee8918 1954 if (is_64bit_tdesc ())
fa593d66
PA
1955 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1956 collector, lockaddr,
1957 orig_size, jump_entry,
405f8e94 1958 trampoline, trampoline_size,
fa593d66
PA
1959 jjump_pad_insn,
1960 jjump_pad_insn_size,
1961 adjusted_insn_addr,
405f8e94
SS
1962 adjusted_insn_addr_end,
1963 err);
fa593d66
PA
1964#endif
1965
1966 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1967 collector, lockaddr,
1968 orig_size, jump_entry,
405f8e94 1969 trampoline, trampoline_size,
fa593d66
PA
1970 jjump_pad_insn,
1971 jjump_pad_insn_size,
1972 adjusted_insn_addr,
405f8e94
SS
1973 adjusted_insn_addr_end,
1974 err);
1975}
1976
1977/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1978 architectures. */
1979
1980static int
1981x86_get_min_fast_tracepoint_insn_len (void)
1982{
1983 static int warned_about_fast_tracepoints = 0;
1984
1985#ifdef __x86_64__
1986 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1987 used for fast tracepoints. */
3aee8918 1988 if (is_64bit_tdesc ())
405f8e94
SS
1989 return 5;
1990#endif
1991
58b4daa5 1992 if (agent_loaded_p ())
405f8e94
SS
1993 {
1994 char errbuf[IPA_BUFSIZ];
1995
1996 errbuf[0] = '\0';
1997
1998 /* On x86, if trampolines are available, then 4-byte jump instructions
1999 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2000 with a 4-byte offset are used instead. */
2001 if (have_fast_tracepoint_trampoline_buffer (errbuf))
2002 return 4;
2003 else
2004 {
2005 /* GDB has no channel to explain to user why a shorter fast
2006 tracepoint is not possible, but at least make GDBserver
2007 mention that something has gone awry. */
2008 if (!warned_about_fast_tracepoints)
2009 {
2010 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
2011 warned_about_fast_tracepoints = 1;
2012 }
2013 return 5;
2014 }
2015 }
2016 else
2017 {
2018 /* Indicate that the minimum length is currently unknown since the IPA
2019 has not loaded yet. */
2020 return 0;
2021 }
fa593d66
PA
2022}
2023
6a271cae
PA
2024static void
2025add_insns (unsigned char *start, int len)
2026{
2027 CORE_ADDR buildaddr = current_insn_ptr;
2028
2029 if (debug_threads)
87ce2a04
DE
2030 debug_printf ("Adding %d bytes of insn at %s\n",
2031 len, paddress (buildaddr));
6a271cae
PA
2032
2033 append_insns (&buildaddr, len, start);
2034 current_insn_ptr = buildaddr;
2035}
2036
6a271cae
PA
2037/* Our general strategy for emitting code is to avoid specifying raw
2038 bytes whenever possible, and instead copy a block of inline asm
2039 that is embedded in the function. This is a little messy, because
2040 we need to keep the compiler from discarding what looks like dead
2041 code, plus suppress various warnings. */
2042
9e4344e5
PA
2043#define EMIT_ASM(NAME, INSNS) \
2044 do \
2045 { \
2046 extern unsigned char start_ ## NAME, end_ ## NAME; \
2047 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 2048 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
2049 "\t" "start_" #NAME ":" \
2050 "\t" INSNS "\n" \
2051 "\t" "end_" #NAME ":"); \
2052 } while (0)
6a271cae
PA
2053
2054#ifdef __x86_64__
2055
2056#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
2057 do \
2058 { \
2059 extern unsigned char start_ ## NAME, end_ ## NAME; \
2060 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2061 __asm__ (".code32\n" \
2062 "\t" "jmp end_" #NAME "\n" \
2063 "\t" "start_" #NAME ":\n" \
2064 "\t" INSNS "\n" \
2065 "\t" "end_" #NAME ":\n" \
2066 ".code64\n"); \
2067 } while (0)
6a271cae
PA
2068
2069#else
2070
2071#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2072
2073#endif
2074
2075#ifdef __x86_64__
2076
2077static void
2078amd64_emit_prologue (void)
2079{
2080 EMIT_ASM (amd64_prologue,
2081 "pushq %rbp\n\t"
2082 "movq %rsp,%rbp\n\t"
2083 "sub $0x20,%rsp\n\t"
2084 "movq %rdi,-8(%rbp)\n\t"
2085 "movq %rsi,-16(%rbp)");
2086}
2087
2088
2089static void
2090amd64_emit_epilogue (void)
2091{
2092 EMIT_ASM (amd64_epilogue,
2093 "movq -16(%rbp),%rdi\n\t"
2094 "movq %rax,(%rdi)\n\t"
2095 "xor %rax,%rax\n\t"
2096 "leave\n\t"
2097 "ret");
2098}
2099
2100static void
2101amd64_emit_add (void)
2102{
2103 EMIT_ASM (amd64_add,
2104 "add (%rsp),%rax\n\t"
2105 "lea 0x8(%rsp),%rsp");
2106}
2107
2108static void
2109amd64_emit_sub (void)
2110{
2111 EMIT_ASM (amd64_sub,
2112 "sub %rax,(%rsp)\n\t"
2113 "pop %rax");
2114}
2115
2116static void
2117amd64_emit_mul (void)
2118{
2119 emit_error = 1;
2120}
2121
2122static void
2123amd64_emit_lsh (void)
2124{
2125 emit_error = 1;
2126}
2127
2128static void
2129amd64_emit_rsh_signed (void)
2130{
2131 emit_error = 1;
2132}
2133
2134static void
2135amd64_emit_rsh_unsigned (void)
2136{
2137 emit_error = 1;
2138}
2139
2140static void
2141amd64_emit_ext (int arg)
2142{
2143 switch (arg)
2144 {
2145 case 8:
2146 EMIT_ASM (amd64_ext_8,
2147 "cbtw\n\t"
2148 "cwtl\n\t"
2149 "cltq");
2150 break;
2151 case 16:
2152 EMIT_ASM (amd64_ext_16,
2153 "cwtl\n\t"
2154 "cltq");
2155 break;
2156 case 32:
2157 EMIT_ASM (amd64_ext_32,
2158 "cltq");
2159 break;
2160 default:
2161 emit_error = 1;
2162 }
2163}
2164
2165static void
2166amd64_emit_log_not (void)
2167{
2168 EMIT_ASM (amd64_log_not,
2169 "test %rax,%rax\n\t"
2170 "sete %cl\n\t"
2171 "movzbq %cl,%rax");
2172}
2173
2174static void
2175amd64_emit_bit_and (void)
2176{
2177 EMIT_ASM (amd64_and,
2178 "and (%rsp),%rax\n\t"
2179 "lea 0x8(%rsp),%rsp");
2180}
2181
2182static void
2183amd64_emit_bit_or (void)
2184{
2185 EMIT_ASM (amd64_or,
2186 "or (%rsp),%rax\n\t"
2187 "lea 0x8(%rsp),%rsp");
2188}
2189
2190static void
2191amd64_emit_bit_xor (void)
2192{
2193 EMIT_ASM (amd64_xor,
2194 "xor (%rsp),%rax\n\t"
2195 "lea 0x8(%rsp),%rsp");
2196}
2197
2198static void
2199amd64_emit_bit_not (void)
2200{
2201 EMIT_ASM (amd64_bit_not,
2202 "xorq $0xffffffffffffffff,%rax");
2203}
2204
2205static void
2206amd64_emit_equal (void)
2207{
2208 EMIT_ASM (amd64_equal,
2209 "cmp %rax,(%rsp)\n\t"
2210 "je .Lamd64_equal_true\n\t"
2211 "xor %rax,%rax\n\t"
2212 "jmp .Lamd64_equal_end\n\t"
2213 ".Lamd64_equal_true:\n\t"
2214 "mov $0x1,%rax\n\t"
2215 ".Lamd64_equal_end:\n\t"
2216 "lea 0x8(%rsp),%rsp");
2217}
2218
2219static void
2220amd64_emit_less_signed (void)
2221{
2222 EMIT_ASM (amd64_less_signed,
2223 "cmp %rax,(%rsp)\n\t"
2224 "jl .Lamd64_less_signed_true\n\t"
2225 "xor %rax,%rax\n\t"
2226 "jmp .Lamd64_less_signed_end\n\t"
2227 ".Lamd64_less_signed_true:\n\t"
2228 "mov $1,%rax\n\t"
2229 ".Lamd64_less_signed_end:\n\t"
2230 "lea 0x8(%rsp),%rsp");
2231}
2232
2233static void
2234amd64_emit_less_unsigned (void)
2235{
2236 EMIT_ASM (amd64_less_unsigned,
2237 "cmp %rax,(%rsp)\n\t"
2238 "jb .Lamd64_less_unsigned_true\n\t"
2239 "xor %rax,%rax\n\t"
2240 "jmp .Lamd64_less_unsigned_end\n\t"
2241 ".Lamd64_less_unsigned_true:\n\t"
2242 "mov $1,%rax\n\t"
2243 ".Lamd64_less_unsigned_end:\n\t"
2244 "lea 0x8(%rsp),%rsp");
2245}
2246
2247static void
2248amd64_emit_ref (int size)
2249{
2250 switch (size)
2251 {
2252 case 1:
2253 EMIT_ASM (amd64_ref1,
2254 "movb (%rax),%al");
2255 break;
2256 case 2:
2257 EMIT_ASM (amd64_ref2,
2258 "movw (%rax),%ax");
2259 break;
2260 case 4:
2261 EMIT_ASM (amd64_ref4,
2262 "movl (%rax),%eax");
2263 break;
2264 case 8:
2265 EMIT_ASM (amd64_ref8,
2266 "movq (%rax),%rax");
2267 break;
2268 }
2269}
2270
2271static void
2272amd64_emit_if_goto (int *offset_p, int *size_p)
2273{
2274 EMIT_ASM (amd64_if_goto,
2275 "mov %rax,%rcx\n\t"
2276 "pop %rax\n\t"
2277 "cmp $0,%rcx\n\t"
2278 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2279 if (offset_p)
2280 *offset_p = 10;
2281 if (size_p)
2282 *size_p = 4;
2283}
2284
2285static void
2286amd64_emit_goto (int *offset_p, int *size_p)
2287{
2288 EMIT_ASM (amd64_goto,
2289 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2290 if (offset_p)
2291 *offset_p = 1;
2292 if (size_p)
2293 *size_p = 4;
2294}
2295
2296static void
2297amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2298{
2299 int diff = (to - (from + size));
2300 unsigned char buf[sizeof (int)];
2301
2302 if (size != 4)
2303 {
2304 emit_error = 1;
2305 return;
2306 }
2307
2308 memcpy (buf, &diff, sizeof (int));
2309 write_inferior_memory (from, buf, sizeof (int));
2310}
2311
2312static void
4e29fb54 2313amd64_emit_const (LONGEST num)
6a271cae
PA
2314{
2315 unsigned char buf[16];
2316 int i;
2317 CORE_ADDR buildaddr = current_insn_ptr;
2318
2319 i = 0;
2320 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 2321 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
2322 i += 8;
2323 append_insns (&buildaddr, i, buf);
2324 current_insn_ptr = buildaddr;
2325}
2326
2327static void
2328amd64_emit_call (CORE_ADDR fn)
2329{
2330 unsigned char buf[16];
2331 int i;
2332 CORE_ADDR buildaddr;
4e29fb54 2333 LONGEST offset64;
6a271cae
PA
2334
2335 /* The destination function being in the shared library, may be
2336 >31-bits away off the compiled code pad. */
2337
2338 buildaddr = current_insn_ptr;
2339
2340 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
2341
2342 i = 0;
2343
2344 if (offset64 > INT_MAX || offset64 < INT_MIN)
2345 {
2346 /* Offset is too large for a call. Use callq, but that requires
2347 a register, so avoid it if possible. Use r10, since it is
2348 call-clobbered, we don't have to push/pop it. */
2349 buf[i++] = 0x48; /* mov $fn,%r10 */
2350 buf[i++] = 0xba;
2351 memcpy (buf + i, &fn, 8);
2352 i += 8;
2353 buf[i++] = 0xff; /* callq *%r10 */
2354 buf[i++] = 0xd2;
2355 }
2356 else
2357 {
2358 int offset32 = offset64; /* we know we can't overflow here. */
2359 memcpy (buf + i, &offset32, 4);
2360 i += 4;
2361 }
2362
2363 append_insns (&buildaddr, i, buf);
2364 current_insn_ptr = buildaddr;
2365}
2366
2367static void
2368amd64_emit_reg (int reg)
2369{
2370 unsigned char buf[16];
2371 int i;
2372 CORE_ADDR buildaddr;
2373
2374 /* Assume raw_regs is still in %rdi. */
2375 buildaddr = current_insn_ptr;
2376 i = 0;
2377 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 2378 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2379 i += 4;
2380 append_insns (&buildaddr, i, buf);
2381 current_insn_ptr = buildaddr;
2382 amd64_emit_call (get_raw_reg_func_addr ());
2383}
2384
2385static void
2386amd64_emit_pop (void)
2387{
2388 EMIT_ASM (amd64_pop,
2389 "pop %rax");
2390}
2391
2392static void
2393amd64_emit_stack_flush (void)
2394{
2395 EMIT_ASM (amd64_stack_flush,
2396 "push %rax");
2397}
2398
2399static void
2400amd64_emit_zero_ext (int arg)
2401{
2402 switch (arg)
2403 {
2404 case 8:
2405 EMIT_ASM (amd64_zero_ext_8,
2406 "and $0xff,%rax");
2407 break;
2408 case 16:
2409 EMIT_ASM (amd64_zero_ext_16,
2410 "and $0xffff,%rax");
2411 break;
2412 case 32:
2413 EMIT_ASM (amd64_zero_ext_32,
2414 "mov $0xffffffff,%rcx\n\t"
2415 "and %rcx,%rax");
2416 break;
2417 default:
2418 emit_error = 1;
2419 }
2420}
2421
2422static void
2423amd64_emit_swap (void)
2424{
2425 EMIT_ASM (amd64_swap,
2426 "mov %rax,%rcx\n\t"
2427 "pop %rax\n\t"
2428 "push %rcx");
2429}
2430
2431static void
2432amd64_emit_stack_adjust (int n)
2433{
2434 unsigned char buf[16];
2435 int i;
2436 CORE_ADDR buildaddr = current_insn_ptr;
2437
2438 i = 0;
2439 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2440 buf[i++] = 0x8d;
2441 buf[i++] = 0x64;
2442 buf[i++] = 0x24;
2443 /* This only handles adjustments up to 16, but we don't expect any more. */
2444 buf[i++] = n * 8;
2445 append_insns (&buildaddr, i, buf);
2446 current_insn_ptr = buildaddr;
2447}
2448
2449/* FN's prototype is `LONGEST(*fn)(int)'. */
2450
2451static void
2452amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2453{
2454 unsigned char buf[16];
2455 int i;
2456 CORE_ADDR buildaddr;
2457
2458 buildaddr = current_insn_ptr;
2459 i = 0;
2460 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2461 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2462 i += 4;
2463 append_insns (&buildaddr, i, buf);
2464 current_insn_ptr = buildaddr;
2465 amd64_emit_call (fn);
2466}
2467
4e29fb54 2468/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2469
2470static void
2471amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2472{
2473 unsigned char buf[16];
2474 int i;
2475 CORE_ADDR buildaddr;
2476
2477 buildaddr = current_insn_ptr;
2478 i = 0;
2479 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2480 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2481 i += 4;
2482 append_insns (&buildaddr, i, buf);
2483 current_insn_ptr = buildaddr;
2484 EMIT_ASM (amd64_void_call_2_a,
2485 /* Save away a copy of the stack top. */
2486 "push %rax\n\t"
2487 /* Also pass top as the second argument. */
2488 "mov %rax,%rsi");
2489 amd64_emit_call (fn);
2490 EMIT_ASM (amd64_void_call_2_b,
2491 /* Restore the stack top, %rax may have been trashed. */
2492 "pop %rax");
2493}
2494
6b9801d4
SS
2495void
2496amd64_emit_eq_goto (int *offset_p, int *size_p)
2497{
2498 EMIT_ASM (amd64_eq,
2499 "cmp %rax,(%rsp)\n\t"
2500 "jne .Lamd64_eq_fallthru\n\t"
2501 "lea 0x8(%rsp),%rsp\n\t"
2502 "pop %rax\n\t"
2503 /* jmp, but don't trust the assembler to choose the right jump */
2504 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2505 ".Lamd64_eq_fallthru:\n\t"
2506 "lea 0x8(%rsp),%rsp\n\t"
2507 "pop %rax");
2508
2509 if (offset_p)
2510 *offset_p = 13;
2511 if (size_p)
2512 *size_p = 4;
2513}
2514
2515void
2516amd64_emit_ne_goto (int *offset_p, int *size_p)
2517{
2518 EMIT_ASM (amd64_ne,
2519 "cmp %rax,(%rsp)\n\t"
2520 "je .Lamd64_ne_fallthru\n\t"
2521 "lea 0x8(%rsp),%rsp\n\t"
2522 "pop %rax\n\t"
2523 /* jmp, but don't trust the assembler to choose the right jump */
2524 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2525 ".Lamd64_ne_fallthru:\n\t"
2526 "lea 0x8(%rsp),%rsp\n\t"
2527 "pop %rax");
2528
2529 if (offset_p)
2530 *offset_p = 13;
2531 if (size_p)
2532 *size_p = 4;
2533}
2534
2535void
2536amd64_emit_lt_goto (int *offset_p, int *size_p)
2537{
2538 EMIT_ASM (amd64_lt,
2539 "cmp %rax,(%rsp)\n\t"
2540 "jnl .Lamd64_lt_fallthru\n\t"
2541 "lea 0x8(%rsp),%rsp\n\t"
2542 "pop %rax\n\t"
2543 /* jmp, but don't trust the assembler to choose the right jump */
2544 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2545 ".Lamd64_lt_fallthru:\n\t"
2546 "lea 0x8(%rsp),%rsp\n\t"
2547 "pop %rax");
2548
2549 if (offset_p)
2550 *offset_p = 13;
2551 if (size_p)
2552 *size_p = 4;
2553}
2554
2555void
2556amd64_emit_le_goto (int *offset_p, int *size_p)
2557{
2558 EMIT_ASM (amd64_le,
2559 "cmp %rax,(%rsp)\n\t"
2560 "jnle .Lamd64_le_fallthru\n\t"
2561 "lea 0x8(%rsp),%rsp\n\t"
2562 "pop %rax\n\t"
2563 /* jmp, but don't trust the assembler to choose the right jump */
2564 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2565 ".Lamd64_le_fallthru:\n\t"
2566 "lea 0x8(%rsp),%rsp\n\t"
2567 "pop %rax");
2568
2569 if (offset_p)
2570 *offset_p = 13;
2571 if (size_p)
2572 *size_p = 4;
2573}
2574
2575void
2576amd64_emit_gt_goto (int *offset_p, int *size_p)
2577{
2578 EMIT_ASM (amd64_gt,
2579 "cmp %rax,(%rsp)\n\t"
2580 "jng .Lamd64_gt_fallthru\n\t"
2581 "lea 0x8(%rsp),%rsp\n\t"
2582 "pop %rax\n\t"
2583 /* jmp, but don't trust the assembler to choose the right jump */
2584 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2585 ".Lamd64_gt_fallthru:\n\t"
2586 "lea 0x8(%rsp),%rsp\n\t"
2587 "pop %rax");
2588
2589 if (offset_p)
2590 *offset_p = 13;
2591 if (size_p)
2592 *size_p = 4;
2593}
2594
2595void
2596amd64_emit_ge_goto (int *offset_p, int *size_p)
2597{
2598 EMIT_ASM (amd64_ge,
2599 "cmp %rax,(%rsp)\n\t"
2600 "jnge .Lamd64_ge_fallthru\n\t"
2601 ".Lamd64_ge_jump:\n\t"
2602 "lea 0x8(%rsp),%rsp\n\t"
2603 "pop %rax\n\t"
2604 /* jmp, but don't trust the assembler to choose the right jump */
2605 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2606 ".Lamd64_ge_fallthru:\n\t"
2607 "lea 0x8(%rsp),%rsp\n\t"
2608 "pop %rax");
2609
2610 if (offset_p)
2611 *offset_p = 13;
2612 if (size_p)
2613 *size_p = 4;
2614}
2615
6a271cae
PA
2616struct emit_ops amd64_emit_ops =
2617 {
2618 amd64_emit_prologue,
2619 amd64_emit_epilogue,
2620 amd64_emit_add,
2621 amd64_emit_sub,
2622 amd64_emit_mul,
2623 amd64_emit_lsh,
2624 amd64_emit_rsh_signed,
2625 amd64_emit_rsh_unsigned,
2626 amd64_emit_ext,
2627 amd64_emit_log_not,
2628 amd64_emit_bit_and,
2629 amd64_emit_bit_or,
2630 amd64_emit_bit_xor,
2631 amd64_emit_bit_not,
2632 amd64_emit_equal,
2633 amd64_emit_less_signed,
2634 amd64_emit_less_unsigned,
2635 amd64_emit_ref,
2636 amd64_emit_if_goto,
2637 amd64_emit_goto,
2638 amd64_write_goto_address,
2639 amd64_emit_const,
2640 amd64_emit_call,
2641 amd64_emit_reg,
2642 amd64_emit_pop,
2643 amd64_emit_stack_flush,
2644 amd64_emit_zero_ext,
2645 amd64_emit_swap,
2646 amd64_emit_stack_adjust,
2647 amd64_emit_int_call_1,
6b9801d4
SS
2648 amd64_emit_void_call_2,
2649 amd64_emit_eq_goto,
2650 amd64_emit_ne_goto,
2651 amd64_emit_lt_goto,
2652 amd64_emit_le_goto,
2653 amd64_emit_gt_goto,
2654 amd64_emit_ge_goto
6a271cae
PA
2655 };
2656
2657#endif /* __x86_64__ */
2658
2659static void
2660i386_emit_prologue (void)
2661{
2662 EMIT_ASM32 (i386_prologue,
2663 "push %ebp\n\t"
bf15cbda
SS
2664 "mov %esp,%ebp\n\t"
2665 "push %ebx");
6a271cae
PA
2666 /* At this point, the raw regs base address is at 8(%ebp), and the
2667 value pointer is at 12(%ebp). */
2668}
2669
2670static void
2671i386_emit_epilogue (void)
2672{
2673 EMIT_ASM32 (i386_epilogue,
2674 "mov 12(%ebp),%ecx\n\t"
2675 "mov %eax,(%ecx)\n\t"
2676 "mov %ebx,0x4(%ecx)\n\t"
2677 "xor %eax,%eax\n\t"
bf15cbda 2678 "pop %ebx\n\t"
6a271cae
PA
2679 "pop %ebp\n\t"
2680 "ret");
2681}
2682
2683static void
2684i386_emit_add (void)
2685{
2686 EMIT_ASM32 (i386_add,
2687 "add (%esp),%eax\n\t"
2688 "adc 0x4(%esp),%ebx\n\t"
2689 "lea 0x8(%esp),%esp");
2690}
2691
2692static void
2693i386_emit_sub (void)
2694{
2695 EMIT_ASM32 (i386_sub,
2696 "subl %eax,(%esp)\n\t"
2697 "sbbl %ebx,4(%esp)\n\t"
2698 "pop %eax\n\t"
2699 "pop %ebx\n\t");
2700}
2701
2702static void
2703i386_emit_mul (void)
2704{
2705 emit_error = 1;
2706}
2707
2708static void
2709i386_emit_lsh (void)
2710{
2711 emit_error = 1;
2712}
2713
2714static void
2715i386_emit_rsh_signed (void)
2716{
2717 emit_error = 1;
2718}
2719
2720static void
2721i386_emit_rsh_unsigned (void)
2722{
2723 emit_error = 1;
2724}
2725
2726static void
2727i386_emit_ext (int arg)
2728{
2729 switch (arg)
2730 {
2731 case 8:
2732 EMIT_ASM32 (i386_ext_8,
2733 "cbtw\n\t"
2734 "cwtl\n\t"
2735 "movl %eax,%ebx\n\t"
2736 "sarl $31,%ebx");
2737 break;
2738 case 16:
2739 EMIT_ASM32 (i386_ext_16,
2740 "cwtl\n\t"
2741 "movl %eax,%ebx\n\t"
2742 "sarl $31,%ebx");
2743 break;
2744 case 32:
2745 EMIT_ASM32 (i386_ext_32,
2746 "movl %eax,%ebx\n\t"
2747 "sarl $31,%ebx");
2748 break;
2749 default:
2750 emit_error = 1;
2751 }
2752}
2753
2754static void
2755i386_emit_log_not (void)
2756{
2757 EMIT_ASM32 (i386_log_not,
2758 "or %ebx,%eax\n\t"
2759 "test %eax,%eax\n\t"
2760 "sete %cl\n\t"
2761 "xor %ebx,%ebx\n\t"
2762 "movzbl %cl,%eax");
2763}
2764
2765static void
2766i386_emit_bit_and (void)
2767{
2768 EMIT_ASM32 (i386_and,
2769 "and (%esp),%eax\n\t"
2770 "and 0x4(%esp),%ebx\n\t"
2771 "lea 0x8(%esp),%esp");
2772}
2773
2774static void
2775i386_emit_bit_or (void)
2776{
2777 EMIT_ASM32 (i386_or,
2778 "or (%esp),%eax\n\t"
2779 "or 0x4(%esp),%ebx\n\t"
2780 "lea 0x8(%esp),%esp");
2781}
2782
2783static void
2784i386_emit_bit_xor (void)
2785{
2786 EMIT_ASM32 (i386_xor,
2787 "xor (%esp),%eax\n\t"
2788 "xor 0x4(%esp),%ebx\n\t"
2789 "lea 0x8(%esp),%esp");
2790}
2791
2792static void
2793i386_emit_bit_not (void)
2794{
2795 EMIT_ASM32 (i386_bit_not,
2796 "xor $0xffffffff,%eax\n\t"
2797 "xor $0xffffffff,%ebx\n\t");
2798}
2799
2800static void
2801i386_emit_equal (void)
2802{
2803 EMIT_ASM32 (i386_equal,
2804 "cmpl %ebx,4(%esp)\n\t"
2805 "jne .Li386_equal_false\n\t"
2806 "cmpl %eax,(%esp)\n\t"
2807 "je .Li386_equal_true\n\t"
2808 ".Li386_equal_false:\n\t"
2809 "xor %eax,%eax\n\t"
2810 "jmp .Li386_equal_end\n\t"
2811 ".Li386_equal_true:\n\t"
2812 "mov $1,%eax\n\t"
2813 ".Li386_equal_end:\n\t"
2814 "xor %ebx,%ebx\n\t"
2815 "lea 0x8(%esp),%esp");
2816}
2817
2818static void
2819i386_emit_less_signed (void)
2820{
2821 EMIT_ASM32 (i386_less_signed,
2822 "cmpl %ebx,4(%esp)\n\t"
2823 "jl .Li386_less_signed_true\n\t"
2824 "jne .Li386_less_signed_false\n\t"
2825 "cmpl %eax,(%esp)\n\t"
2826 "jl .Li386_less_signed_true\n\t"
2827 ".Li386_less_signed_false:\n\t"
2828 "xor %eax,%eax\n\t"
2829 "jmp .Li386_less_signed_end\n\t"
2830 ".Li386_less_signed_true:\n\t"
2831 "mov $1,%eax\n\t"
2832 ".Li386_less_signed_end:\n\t"
2833 "xor %ebx,%ebx\n\t"
2834 "lea 0x8(%esp),%esp");
2835}
2836
2837static void
2838i386_emit_less_unsigned (void)
2839{
2840 EMIT_ASM32 (i386_less_unsigned,
2841 "cmpl %ebx,4(%esp)\n\t"
2842 "jb .Li386_less_unsigned_true\n\t"
2843 "jne .Li386_less_unsigned_false\n\t"
2844 "cmpl %eax,(%esp)\n\t"
2845 "jb .Li386_less_unsigned_true\n\t"
2846 ".Li386_less_unsigned_false:\n\t"
2847 "xor %eax,%eax\n\t"
2848 "jmp .Li386_less_unsigned_end\n\t"
2849 ".Li386_less_unsigned_true:\n\t"
2850 "mov $1,%eax\n\t"
2851 ".Li386_less_unsigned_end:\n\t"
2852 "xor %ebx,%ebx\n\t"
2853 "lea 0x8(%esp),%esp");
2854}
2855
2856static void
2857i386_emit_ref (int size)
2858{
2859 switch (size)
2860 {
2861 case 1:
2862 EMIT_ASM32 (i386_ref1,
2863 "movb (%eax),%al");
2864 break;
2865 case 2:
2866 EMIT_ASM32 (i386_ref2,
2867 "movw (%eax),%ax");
2868 break;
2869 case 4:
2870 EMIT_ASM32 (i386_ref4,
2871 "movl (%eax),%eax");
2872 break;
2873 case 8:
2874 EMIT_ASM32 (i386_ref8,
2875 "movl 4(%eax),%ebx\n\t"
2876 "movl (%eax),%eax");
2877 break;
2878 }
2879}
2880
2881static void
2882i386_emit_if_goto (int *offset_p, int *size_p)
2883{
2884 EMIT_ASM32 (i386_if_goto,
2885 "mov %eax,%ecx\n\t"
2886 "or %ebx,%ecx\n\t"
2887 "pop %eax\n\t"
2888 "pop %ebx\n\t"
2889 "cmpl $0,%ecx\n\t"
2890 /* Don't trust the assembler to choose the right jump */
2891 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2892
2893 if (offset_p)
2894 *offset_p = 11; /* be sure that this matches the sequence above */
2895 if (size_p)
2896 *size_p = 4;
2897}
2898
2899static void
2900i386_emit_goto (int *offset_p, int *size_p)
2901{
2902 EMIT_ASM32 (i386_goto,
2903 /* Don't trust the assembler to choose the right jump */
2904 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2905 if (offset_p)
2906 *offset_p = 1;
2907 if (size_p)
2908 *size_p = 4;
2909}
2910
2911static void
2912i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2913{
2914 int diff = (to - (from + size));
2915 unsigned char buf[sizeof (int)];
2916
2917 /* We're only doing 4-byte sizes at the moment. */
2918 if (size != 4)
2919 {
2920 emit_error = 1;
2921 return;
2922 }
2923
2924 memcpy (buf, &diff, sizeof (int));
2925 write_inferior_memory (from, buf, sizeof (int));
2926}
2927
2928static void
4e29fb54 2929i386_emit_const (LONGEST num)
6a271cae
PA
2930{
2931 unsigned char buf[16];
b00ad6ff 2932 int i, hi, lo;
6a271cae
PA
2933 CORE_ADDR buildaddr = current_insn_ptr;
2934
2935 i = 0;
2936 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2937 lo = num & 0xffffffff;
2938 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2939 i += 4;
2940 hi = ((num >> 32) & 0xffffffff);
2941 if (hi)
2942 {
2943 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2944 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2945 i += 4;
2946 }
2947 else
2948 {
2949 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2950 }
2951 append_insns (&buildaddr, i, buf);
2952 current_insn_ptr = buildaddr;
2953}
2954
2955static void
2956i386_emit_call (CORE_ADDR fn)
2957{
2958 unsigned char buf[16];
2959 int i, offset;
2960 CORE_ADDR buildaddr;
2961
2962 buildaddr = current_insn_ptr;
2963 i = 0;
2964 buf[i++] = 0xe8; /* call <reladdr> */
2965 offset = ((int) fn) - (buildaddr + 5);
2966 memcpy (buf + 1, &offset, 4);
2967 append_insns (&buildaddr, 5, buf);
2968 current_insn_ptr = buildaddr;
2969}
2970
2971static void
2972i386_emit_reg (int reg)
2973{
2974 unsigned char buf[16];
2975 int i;
2976 CORE_ADDR buildaddr;
2977
2978 EMIT_ASM32 (i386_reg_a,
2979 "sub $0x8,%esp");
2980 buildaddr = current_insn_ptr;
2981 i = 0;
2982 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2983 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2984 i += 4;
2985 append_insns (&buildaddr, i, buf);
2986 current_insn_ptr = buildaddr;
2987 EMIT_ASM32 (i386_reg_b,
2988 "mov %eax,4(%esp)\n\t"
2989 "mov 8(%ebp),%eax\n\t"
2990 "mov %eax,(%esp)");
2991 i386_emit_call (get_raw_reg_func_addr ());
2992 EMIT_ASM32 (i386_reg_c,
2993 "xor %ebx,%ebx\n\t"
2994 "lea 0x8(%esp),%esp");
2995}
2996
2997static void
2998i386_emit_pop (void)
2999{
3000 EMIT_ASM32 (i386_pop,
3001 "pop %eax\n\t"
3002 "pop %ebx");
3003}
3004
3005static void
3006i386_emit_stack_flush (void)
3007{
3008 EMIT_ASM32 (i386_stack_flush,
3009 "push %ebx\n\t"
3010 "push %eax");
3011}
3012
3013static void
3014i386_emit_zero_ext (int arg)
3015{
3016 switch (arg)
3017 {
3018 case 8:
3019 EMIT_ASM32 (i386_zero_ext_8,
3020 "and $0xff,%eax\n\t"
3021 "xor %ebx,%ebx");
3022 break;
3023 case 16:
3024 EMIT_ASM32 (i386_zero_ext_16,
3025 "and $0xffff,%eax\n\t"
3026 "xor %ebx,%ebx");
3027 break;
3028 case 32:
3029 EMIT_ASM32 (i386_zero_ext_32,
3030 "xor %ebx,%ebx");
3031 break;
3032 default:
3033 emit_error = 1;
3034 }
3035}
3036
3037static void
3038i386_emit_swap (void)
3039{
3040 EMIT_ASM32 (i386_swap,
3041 "mov %eax,%ecx\n\t"
3042 "mov %ebx,%edx\n\t"
3043 "pop %eax\n\t"
3044 "pop %ebx\n\t"
3045 "push %edx\n\t"
3046 "push %ecx");
3047}
3048
3049static void
3050i386_emit_stack_adjust (int n)
3051{
3052 unsigned char buf[16];
3053 int i;
3054 CORE_ADDR buildaddr = current_insn_ptr;
3055
3056 i = 0;
3057 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
3058 buf[i++] = 0x64;
3059 buf[i++] = 0x24;
3060 buf[i++] = n * 8;
3061 append_insns (&buildaddr, i, buf);
3062 current_insn_ptr = buildaddr;
3063}
3064
3065/* FN's prototype is `LONGEST(*fn)(int)'. */
3066
3067static void
3068i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
3069{
3070 unsigned char buf[16];
3071 int i;
3072 CORE_ADDR buildaddr;
3073
3074 EMIT_ASM32 (i386_int_call_1_a,
3075 /* Reserve a bit of stack space. */
3076 "sub $0x8,%esp");
3077 /* Put the one argument on the stack. */
3078 buildaddr = current_insn_ptr;
3079 i = 0;
3080 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3081 buf[i++] = 0x04;
3082 buf[i++] = 0x24;
b00ad6ff 3083 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
3084 i += 4;
3085 append_insns (&buildaddr, i, buf);
3086 current_insn_ptr = buildaddr;
3087 i386_emit_call (fn);
3088 EMIT_ASM32 (i386_int_call_1_c,
3089 "mov %edx,%ebx\n\t"
3090 "lea 0x8(%esp),%esp");
3091}
3092
4e29fb54 3093/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
3094
3095static void
3096i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
3097{
3098 unsigned char buf[16];
3099 int i;
3100 CORE_ADDR buildaddr;
3101
3102 EMIT_ASM32 (i386_void_call_2_a,
3103 /* Preserve %eax only; we don't have to worry about %ebx. */
3104 "push %eax\n\t"
3105 /* Reserve a bit of stack space for arguments. */
3106 "sub $0x10,%esp\n\t"
3107 /* Copy "top" to the second argument position. (Note that
3108 we can't assume function won't scribble on its
3109 arguments, so don't try to restore from this.) */
3110 "mov %eax,4(%esp)\n\t"
3111 "mov %ebx,8(%esp)");
3112 /* Put the first argument on the stack. */
3113 buildaddr = current_insn_ptr;
3114 i = 0;
3115 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3116 buf[i++] = 0x04;
3117 buf[i++] = 0x24;
b00ad6ff 3118 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
3119 i += 4;
3120 append_insns (&buildaddr, i, buf);
3121 current_insn_ptr = buildaddr;
3122 i386_emit_call (fn);
3123 EMIT_ASM32 (i386_void_call_2_b,
3124 "lea 0x10(%esp),%esp\n\t"
3125 /* Restore original stack top. */
3126 "pop %eax");
3127}
3128
6b9801d4
SS
3129
3130void
3131i386_emit_eq_goto (int *offset_p, int *size_p)
3132{
3133 EMIT_ASM32 (eq,
3134 /* Check low half first, more likely to be decider */
3135 "cmpl %eax,(%esp)\n\t"
3136 "jne .Leq_fallthru\n\t"
3137 "cmpl %ebx,4(%esp)\n\t"
3138 "jne .Leq_fallthru\n\t"
3139 "lea 0x8(%esp),%esp\n\t"
3140 "pop %eax\n\t"
3141 "pop %ebx\n\t"
3142 /* jmp, but don't trust the assembler to choose the right jump */
3143 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3144 ".Leq_fallthru:\n\t"
3145 "lea 0x8(%esp),%esp\n\t"
3146 "pop %eax\n\t"
3147 "pop %ebx");
3148
3149 if (offset_p)
3150 *offset_p = 18;
3151 if (size_p)
3152 *size_p = 4;
3153}
3154
3155void
3156i386_emit_ne_goto (int *offset_p, int *size_p)
3157{
3158 EMIT_ASM32 (ne,
3159 /* Check low half first, more likely to be decider */
3160 "cmpl %eax,(%esp)\n\t"
3161 "jne .Lne_jump\n\t"
3162 "cmpl %ebx,4(%esp)\n\t"
3163 "je .Lne_fallthru\n\t"
3164 ".Lne_jump:\n\t"
3165 "lea 0x8(%esp),%esp\n\t"
3166 "pop %eax\n\t"
3167 "pop %ebx\n\t"
3168 /* jmp, but don't trust the assembler to choose the right jump */
3169 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3170 ".Lne_fallthru:\n\t"
3171 "lea 0x8(%esp),%esp\n\t"
3172 "pop %eax\n\t"
3173 "pop %ebx");
3174
3175 if (offset_p)
3176 *offset_p = 18;
3177 if (size_p)
3178 *size_p = 4;
3179}
3180
3181void
3182i386_emit_lt_goto (int *offset_p, int *size_p)
3183{
3184 EMIT_ASM32 (lt,
3185 "cmpl %ebx,4(%esp)\n\t"
3186 "jl .Llt_jump\n\t"
3187 "jne .Llt_fallthru\n\t"
3188 "cmpl %eax,(%esp)\n\t"
3189 "jnl .Llt_fallthru\n\t"
3190 ".Llt_jump:\n\t"
3191 "lea 0x8(%esp),%esp\n\t"
3192 "pop %eax\n\t"
3193 "pop %ebx\n\t"
3194 /* jmp, but don't trust the assembler to choose the right jump */
3195 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3196 ".Llt_fallthru:\n\t"
3197 "lea 0x8(%esp),%esp\n\t"
3198 "pop %eax\n\t"
3199 "pop %ebx");
3200
3201 if (offset_p)
3202 *offset_p = 20;
3203 if (size_p)
3204 *size_p = 4;
3205}
3206
3207void
3208i386_emit_le_goto (int *offset_p, int *size_p)
3209{
3210 EMIT_ASM32 (le,
3211 "cmpl %ebx,4(%esp)\n\t"
3212 "jle .Lle_jump\n\t"
3213 "jne .Lle_fallthru\n\t"
3214 "cmpl %eax,(%esp)\n\t"
3215 "jnle .Lle_fallthru\n\t"
3216 ".Lle_jump:\n\t"
3217 "lea 0x8(%esp),%esp\n\t"
3218 "pop %eax\n\t"
3219 "pop %ebx\n\t"
3220 /* jmp, but don't trust the assembler to choose the right jump */
3221 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3222 ".Lle_fallthru:\n\t"
3223 "lea 0x8(%esp),%esp\n\t"
3224 "pop %eax\n\t"
3225 "pop %ebx");
3226
3227 if (offset_p)
3228 *offset_p = 20;
3229 if (size_p)
3230 *size_p = 4;
3231}
3232
3233void
3234i386_emit_gt_goto (int *offset_p, int *size_p)
3235{
3236 EMIT_ASM32 (gt,
3237 "cmpl %ebx,4(%esp)\n\t"
3238 "jg .Lgt_jump\n\t"
3239 "jne .Lgt_fallthru\n\t"
3240 "cmpl %eax,(%esp)\n\t"
3241 "jng .Lgt_fallthru\n\t"
3242 ".Lgt_jump:\n\t"
3243 "lea 0x8(%esp),%esp\n\t"
3244 "pop %eax\n\t"
3245 "pop %ebx\n\t"
3246 /* jmp, but don't trust the assembler to choose the right jump */
3247 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3248 ".Lgt_fallthru:\n\t"
3249 "lea 0x8(%esp),%esp\n\t"
3250 "pop %eax\n\t"
3251 "pop %ebx");
3252
3253 if (offset_p)
3254 *offset_p = 20;
3255 if (size_p)
3256 *size_p = 4;
3257}
3258
3259void
3260i386_emit_ge_goto (int *offset_p, int *size_p)
3261{
3262 EMIT_ASM32 (ge,
3263 "cmpl %ebx,4(%esp)\n\t"
3264 "jge .Lge_jump\n\t"
3265 "jne .Lge_fallthru\n\t"
3266 "cmpl %eax,(%esp)\n\t"
3267 "jnge .Lge_fallthru\n\t"
3268 ".Lge_jump:\n\t"
3269 "lea 0x8(%esp),%esp\n\t"
3270 "pop %eax\n\t"
3271 "pop %ebx\n\t"
3272 /* jmp, but don't trust the assembler to choose the right jump */
3273 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3274 ".Lge_fallthru:\n\t"
3275 "lea 0x8(%esp),%esp\n\t"
3276 "pop %eax\n\t"
3277 "pop %ebx");
3278
3279 if (offset_p)
3280 *offset_p = 20;
3281 if (size_p)
3282 *size_p = 4;
3283}
3284
6a271cae
PA
3285struct emit_ops i386_emit_ops =
3286 {
3287 i386_emit_prologue,
3288 i386_emit_epilogue,
3289 i386_emit_add,
3290 i386_emit_sub,
3291 i386_emit_mul,
3292 i386_emit_lsh,
3293 i386_emit_rsh_signed,
3294 i386_emit_rsh_unsigned,
3295 i386_emit_ext,
3296 i386_emit_log_not,
3297 i386_emit_bit_and,
3298 i386_emit_bit_or,
3299 i386_emit_bit_xor,
3300 i386_emit_bit_not,
3301 i386_emit_equal,
3302 i386_emit_less_signed,
3303 i386_emit_less_unsigned,
3304 i386_emit_ref,
3305 i386_emit_if_goto,
3306 i386_emit_goto,
3307 i386_write_goto_address,
3308 i386_emit_const,
3309 i386_emit_call,
3310 i386_emit_reg,
3311 i386_emit_pop,
3312 i386_emit_stack_flush,
3313 i386_emit_zero_ext,
3314 i386_emit_swap,
3315 i386_emit_stack_adjust,
3316 i386_emit_int_call_1,
6b9801d4
SS
3317 i386_emit_void_call_2,
3318 i386_emit_eq_goto,
3319 i386_emit_ne_goto,
3320 i386_emit_lt_goto,
3321 i386_emit_le_goto,
3322 i386_emit_gt_goto,
3323 i386_emit_ge_goto
6a271cae
PA
3324 };
3325
3326
3327static struct emit_ops *
3328x86_emit_ops (void)
3329{
3330#ifdef __x86_64__
3aee8918 3331 if (is_64bit_tdesc ())
6a271cae
PA
3332 return &amd64_emit_ops;
3333 else
3334#endif
3335 return &i386_emit_ops;
3336}
3337
c2d6af84
PA
3338static int
3339x86_supports_range_stepping (void)
3340{
3341 return 1;
3342}
3343
d0722149
DE
3344/* This is initialized assuming an amd64 target.
3345 x86_arch_setup will correct it for i386 or amd64 targets. */
3346
3347struct linux_target_ops the_low_target =
3348{
3349 x86_arch_setup,
3aee8918
PA
3350 x86_linux_regs_info,
3351 x86_cannot_fetch_register,
3352 x86_cannot_store_register,
c14dfd32 3353 NULL, /* fetch_register */
d0722149
DE
3354 x86_get_pc,
3355 x86_set_pc,
3356 x86_breakpoint,
3357 x86_breakpoint_len,
3358 NULL,
3359 1,
3360 x86_breakpoint_at,
aa5ca48f
DE
3361 x86_insert_point,
3362 x86_remove_point,
3363 x86_stopped_by_watchpoint,
3364 x86_stopped_data_address,
d0722149
DE
3365 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3366 native i386 case (no registers smaller than an xfer unit), and are not
3367 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3368 NULL,
3369 NULL,
3370 /* need to fix up i386 siginfo if host is amd64 */
3371 x86_siginfo_fixup,
aa5ca48f
DE
3372 x86_linux_new_process,
3373 x86_linux_new_thread,
1570b33e 3374 x86_linux_prepare_to_resume,
219f2f23 3375 x86_linux_process_qsupported,
fa593d66
PA
3376 x86_supports_tracepoints,
3377 x86_get_thread_area,
6a271cae 3378 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
3379 x86_emit_ops,
3380 x86_get_min_fast_tracepoint_insn_len,
c2d6af84 3381 x86_supports_range_stepping,
d0722149 3382};
3aee8918
PA
3383
3384void
3385initialize_low_arch (void)
3386{
3387 /* Initialize the Linux target descriptions. */
3388#ifdef __x86_64__
3389 init_registers_amd64_linux ();
3390 init_registers_amd64_avx_linux ();
a196ebeb
WT
3391 init_registers_amd64_mpx_linux ();
3392
3aee8918 3393 init_registers_x32_linux ();
7e5aaa09 3394 init_registers_x32_avx_linux ();
3aee8918
PA
3395
3396 tdesc_amd64_linux_no_xml = xmalloc (sizeof (struct target_desc));
3397 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
3398 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3399#endif
3400 init_registers_i386_linux ();
3401 init_registers_i386_mmx_linux ();
3402 init_registers_i386_avx_linux ();
a196ebeb 3403 init_registers_i386_mpx_linux ();
3aee8918
PA
3404
3405 tdesc_i386_linux_no_xml = xmalloc (sizeof (struct target_desc));
3406 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
3407 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3408
3409 initialize_regsets_info (&x86_regsets_info);
3410}
This page took 0.758809 seconds and 4 git commands to generate.