gdbserver: perror_with_name: Add ATTRIBUTE_NORETURN.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
ecd75fc8 3 Copyright (C) 2002-2014 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
aa5ca48f 20#include <stddef.h>
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "server.h"
25#include "linux-low.h"
26#include "i387-fp.h"
aa5ca48f 27#include "i386-low.h"
1570b33e 28#include "i386-xstate.h"
d0722149
DE
29
30#include "gdb_proc_service.h"
b5737fa9
PA
31/* Don't include elf/common.h if linux/elf.h got included by
32 gdb_proc_service.h. */
33#ifndef ELFMAG0
34#include "elf/common.h"
35#endif
36
58b4daa5 37#include "agent.h"
3aee8918 38#include "tdesc.h"
c144c7a0 39#include "tracepoint.h"
f699aaba 40#include "ax.h"
d0722149 41
3aee8918 42#ifdef __x86_64__
90884b2b
L
43/* Defined in auto-generated file amd64-linux.c. */
44void init_registers_amd64_linux (void);
3aee8918
PA
45extern const struct target_desc *tdesc_amd64_linux;
46
1570b33e
L
47/* Defined in auto-generated file amd64-avx-linux.c. */
48void init_registers_amd64_avx_linux (void);
3aee8918
PA
49extern const struct target_desc *tdesc_amd64_avx_linux;
50
a196ebeb
WT
51/* Defined in auto-generated file amd64-mpx-linux.c. */
52void init_registers_amd64_mpx_linux (void);
53extern const struct target_desc *tdesc_amd64_mpx_linux;
54
4d47af5c
L
55/* Defined in auto-generated file x32-linux.c. */
56void init_registers_x32_linux (void);
3aee8918
PA
57extern const struct target_desc *tdesc_x32_linux;
58
4d47af5c
L
59/* Defined in auto-generated file x32-avx-linux.c. */
60void init_registers_x32_avx_linux (void);
3aee8918 61extern const struct target_desc *tdesc_x32_avx_linux;
a196ebeb 62
3aee8918
PA
63#endif
64
65/* Defined in auto-generated file i386-linux.c. */
66void init_registers_i386_linux (void);
67extern const struct target_desc *tdesc_i386_linux;
68
69/* Defined in auto-generated file i386-mmx-linux.c. */
70void init_registers_i386_mmx_linux (void);
71extern const struct target_desc *tdesc_i386_mmx_linux;
72
73/* Defined in auto-generated file i386-avx-linux.c. */
74void init_registers_i386_avx_linux (void);
75extern const struct target_desc *tdesc_i386_avx_linux;
76
a196ebeb
WT
77/* Defined in auto-generated file i386-mpx-linux.c. */
78void init_registers_i386_mpx_linux (void);
79extern const struct target_desc *tdesc_i386_mpx_linux;
80
3aee8918
PA
81#ifdef __x86_64__
82static struct target_desc *tdesc_amd64_linux_no_xml;
83#endif
84static struct target_desc *tdesc_i386_linux_no_xml;
85
1570b33e 86
fa593d66 87static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 88static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 89
1570b33e
L
90/* Backward compatibility for gdb without XML support. */
91
92static const char *xmltarget_i386_linux_no_xml = "@<target>\
93<architecture>i386</architecture>\
94<osabi>GNU/Linux</osabi>\
95</target>";
f6d1620c
L
96
97#ifdef __x86_64__
1570b33e
L
98static const char *xmltarget_amd64_linux_no_xml = "@<target>\
99<architecture>i386:x86-64</architecture>\
100<osabi>GNU/Linux</osabi>\
101</target>";
f6d1620c 102#endif
d0722149
DE
103
104#include <sys/reg.h>
105#include <sys/procfs.h>
106#include <sys/ptrace.h>
1570b33e
L
107#include <sys/uio.h>
108
109#ifndef PTRACE_GETREGSET
110#define PTRACE_GETREGSET 0x4204
111#endif
112
113#ifndef PTRACE_SETREGSET
114#define PTRACE_SETREGSET 0x4205
115#endif
116
d0722149
DE
117
118#ifndef PTRACE_GET_THREAD_AREA
119#define PTRACE_GET_THREAD_AREA 25
120#endif
121
122/* This definition comes from prctl.h, but some kernels may not have it. */
123#ifndef PTRACE_ARCH_PRCTL
124#define PTRACE_ARCH_PRCTL 30
125#endif
126
127/* The following definitions come from prctl.h, but may be absent
128 for certain configurations. */
129#ifndef ARCH_GET_FS
130#define ARCH_SET_GS 0x1001
131#define ARCH_SET_FS 0x1002
132#define ARCH_GET_FS 0x1003
133#define ARCH_GET_GS 0x1004
134#endif
135
aa5ca48f
DE
136/* Per-process arch-specific data we want to keep. */
137
138struct arch_process_info
139{
140 struct i386_debug_reg_state debug_reg_state;
141};
142
143/* Per-thread arch-specific data we want to keep. */
144
145struct arch_lwp_info
146{
147 /* Non-zero if our copy differs from what's recorded in the thread. */
148 int debug_registers_changed;
149};
150
d0722149
DE
151#ifdef __x86_64__
152
153/* Mapping between the general-purpose registers in `struct user'
154 format and GDB's register array layout.
155 Note that the transfer layout uses 64-bit regs. */
156static /*const*/ int i386_regmap[] =
157{
158 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
159 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
160 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
161 DS * 8, ES * 8, FS * 8, GS * 8
162};
163
164#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
165
166/* So code below doesn't have to care, i386 or amd64. */
167#define ORIG_EAX ORIG_RAX
168
169static const int x86_64_regmap[] =
170{
171 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
172 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
173 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
174 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
175 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
176 DS * 8, ES * 8, FS * 8, GS * 8,
177 -1, -1, -1, -1, -1, -1, -1, -1,
178 -1, -1, -1, -1, -1, -1, -1, -1,
179 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
180 -1,
181 -1, -1, -1, -1, -1, -1, -1, -1,
182 ORIG_RAX * 8,
183 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
184 -1, -1 /* MPX registers BNDCFGU, BNDSTATUS. */
d0722149
DE
185};
186
187#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
188
189#else /* ! __x86_64__ */
190
191/* Mapping between the general-purpose registers in `struct user'
192 format and GDB's register array layout. */
193static /*const*/ int i386_regmap[] =
194{
195 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
196 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
197 EIP * 4, EFL * 4, CS * 4, SS * 4,
198 DS * 4, ES * 4, FS * 4, GS * 4
199};
200
201#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
202
203#endif
3aee8918
PA
204
205#ifdef __x86_64__
206
207/* Returns true if the current inferior belongs to a x86-64 process,
208 per the tdesc. */
209
210static int
211is_64bit_tdesc (void)
212{
213 struct regcache *regcache = get_thread_regcache (current_inferior, 0);
214
215 return register_size (regcache->tdesc, 0) == 8;
216}
217
218#endif
219
d0722149
DE
220\f
221/* Called by libthread_db. */
222
223ps_err_e
224ps_get_thread_area (const struct ps_prochandle *ph,
225 lwpid_t lwpid, int idx, void **base)
226{
227#ifdef __x86_64__
3aee8918 228 int use_64bit = is_64bit_tdesc ();
d0722149
DE
229
230 if (use_64bit)
231 {
232 switch (idx)
233 {
234 case FS:
235 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
236 return PS_OK;
237 break;
238 case GS:
239 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
240 return PS_OK;
241 break;
242 default:
243 return PS_BADADDR;
244 }
245 return PS_ERR;
246 }
247#endif
248
249 {
250 unsigned int desc[4];
251
252 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
253 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
254 return PS_ERR;
255
d1ec4ce7
DE
256 /* Ensure we properly extend the value to 64-bits for x86_64. */
257 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
258 return PS_OK;
259 }
260}
fa593d66
PA
261
262/* Get the thread area address. This is used to recognize which
263 thread is which when tracing with the in-process agent library. We
264 don't read anything from the address, and treat it as opaque; it's
265 the address itself that we assume is unique per-thread. */
266
267static int
268x86_get_thread_area (int lwpid, CORE_ADDR *addr)
269{
270#ifdef __x86_64__
3aee8918 271 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
272
273 if (use_64bit)
274 {
275 void *base;
276 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
277 {
278 *addr = (CORE_ADDR) (uintptr_t) base;
279 return 0;
280 }
281
282 return -1;
283 }
284#endif
285
286 {
287 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
d86d4aaf
DE
288 struct thread_info *thr = get_lwp_thread (lwp);
289 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
290 unsigned int desc[4];
291 ULONGEST gs = 0;
292 const int reg_thread_area = 3; /* bits to scale down register value. */
293 int idx;
294
295 collect_register_by_name (regcache, "gs", &gs);
296
297 idx = gs >> reg_thread_area;
298
299 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 300 lwpid_of (thr),
493e2a69 301 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
302 return -1;
303
304 *addr = desc[1];
305 return 0;
306 }
307}
308
309
d0722149
DE
310\f
311static int
3aee8918 312x86_cannot_store_register (int regno)
d0722149 313{
3aee8918
PA
314#ifdef __x86_64__
315 if (is_64bit_tdesc ())
316 return 0;
317#endif
318
d0722149
DE
319 return regno >= I386_NUM_REGS;
320}
321
322static int
3aee8918 323x86_cannot_fetch_register (int regno)
d0722149 324{
3aee8918
PA
325#ifdef __x86_64__
326 if (is_64bit_tdesc ())
327 return 0;
328#endif
329
d0722149
DE
330 return regno >= I386_NUM_REGS;
331}
332
333static void
442ea881 334x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
335{
336 int i;
337
338#ifdef __x86_64__
3aee8918 339 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
340 {
341 for (i = 0; i < X86_64_NUM_REGS; i++)
342 if (x86_64_regmap[i] != -1)
442ea881 343 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
344 return;
345 }
346#endif
347
348 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 349 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 350
442ea881
PA
351 collect_register_by_name (regcache, "orig_eax",
352 ((char *) buf) + ORIG_EAX * 4);
d0722149
DE
353}
354
355static void
442ea881 356x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
357{
358 int i;
359
360#ifdef __x86_64__
3aee8918 361 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
362 {
363 for (i = 0; i < X86_64_NUM_REGS; i++)
364 if (x86_64_regmap[i] != -1)
442ea881 365 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
366 return;
367 }
368#endif
369
370 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 371 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 372
442ea881
PA
373 supply_register_by_name (regcache, "orig_eax",
374 ((char *) buf) + ORIG_EAX * 4);
d0722149
DE
375}
376
377static void
442ea881 378x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
379{
380#ifdef __x86_64__
442ea881 381 i387_cache_to_fxsave (regcache, buf);
d0722149 382#else
442ea881 383 i387_cache_to_fsave (regcache, buf);
d0722149
DE
384#endif
385}
386
387static void
442ea881 388x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
389{
390#ifdef __x86_64__
442ea881 391 i387_fxsave_to_cache (regcache, buf);
d0722149 392#else
442ea881 393 i387_fsave_to_cache (regcache, buf);
d0722149
DE
394#endif
395}
396
397#ifndef __x86_64__
398
399static void
442ea881 400x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 401{
442ea881 402 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
403}
404
405static void
442ea881 406x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 407{
442ea881 408 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
409}
410
411#endif
412
1570b33e
L
413static void
414x86_fill_xstateregset (struct regcache *regcache, void *buf)
415{
416 i387_cache_to_xsave (regcache, buf);
417}
418
419static void
420x86_store_xstateregset (struct regcache *regcache, const void *buf)
421{
422 i387_xsave_to_cache (regcache, buf);
423}
424
d0722149
DE
425/* ??? The non-biarch i386 case stores all the i387 regs twice.
426 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
427 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
428 doesn't work. IWBN to avoid the duplication in the case where it
429 does work. Maybe the arch_setup routine could check whether it works
3aee8918 430 and update the supported regsets accordingly. */
d0722149 431
3aee8918 432static struct regset_info x86_regsets[] =
d0722149
DE
433{
434#ifdef HAVE_PTRACE_GETREGS
1570b33e 435 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
436 GENERAL_REGS,
437 x86_fill_gregset, x86_store_gregset },
1570b33e
L
438 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
439 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
440# ifndef __x86_64__
441# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 442 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
443 EXTENDED_REGS,
444 x86_fill_fpxregset, x86_store_fpxregset },
445# endif
446# endif
1570b33e 447 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
448 FP_REGS,
449 x86_fill_fpregset, x86_store_fpregset },
450#endif /* HAVE_PTRACE_GETREGS */
1570b33e 451 { 0, 0, 0, -1, -1, NULL, NULL }
d0722149
DE
452};
453
454static CORE_ADDR
442ea881 455x86_get_pc (struct regcache *regcache)
d0722149 456{
3aee8918 457 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
458
459 if (use_64bit)
460 {
461 unsigned long pc;
442ea881 462 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
463 return (CORE_ADDR) pc;
464 }
465 else
466 {
467 unsigned int pc;
442ea881 468 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
469 return (CORE_ADDR) pc;
470 }
471}
472
473static void
442ea881 474x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
d0722149 475{
3aee8918 476 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
477
478 if (use_64bit)
479 {
480 unsigned long newpc = pc;
442ea881 481 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
482 }
483 else
484 {
485 unsigned int newpc = pc;
442ea881 486 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
487 }
488}
489\f
490static const unsigned char x86_breakpoint[] = { 0xCC };
491#define x86_breakpoint_len 1
492
493static int
494x86_breakpoint_at (CORE_ADDR pc)
495{
496 unsigned char c;
497
fc7238bb 498 (*the_target->read_memory) (pc, &c, 1);
d0722149
DE
499 if (c == 0xCC)
500 return 1;
501
502 return 0;
503}
504\f
aa5ca48f
DE
505/* Support for debug registers. */
506
507static unsigned long
508x86_linux_dr_get (ptid_t ptid, int regnum)
509{
510 int tid;
511 unsigned long value;
512
513 tid = ptid_get_lwp (ptid);
514
515 errno = 0;
516 value = ptrace (PTRACE_PEEKUSER, tid,
517 offsetof (struct user, u_debugreg[regnum]), 0);
518 if (errno != 0)
519 error ("Couldn't read debug register");
520
521 return value;
522}
523
524static void
525x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
526{
527 int tid;
528
529 tid = ptid_get_lwp (ptid);
530
531 errno = 0;
532 ptrace (PTRACE_POKEUSER, tid,
533 offsetof (struct user, u_debugreg[regnum]), value);
534 if (errno != 0)
535 error ("Couldn't write debug register");
536}
537
964e4306
PA
538static int
539update_debug_registers_callback (struct inferior_list_entry *entry,
540 void *pid_p)
541{
d86d4aaf
DE
542 struct thread_info *thr = (struct thread_info *) entry;
543 struct lwp_info *lwp = get_thread_lwp (thr);
964e4306
PA
544 int pid = *(int *) pid_p;
545
546 /* Only update the threads of this process. */
d86d4aaf 547 if (pid_of (thr) == pid)
964e4306
PA
548 {
549 /* The actual update is done later just before resuming the lwp,
550 we just mark that the registers need updating. */
551 lwp->arch_private->debug_registers_changed = 1;
552
553 /* If the lwp isn't stopped, force it to momentarily pause, so
554 we can update its debug registers. */
555 if (!lwp->stopped)
556 linux_stop_lwp (lwp);
557 }
558
559 return 0;
560}
561
aa5ca48f
DE
562/* Update the inferior's debug register REGNUM from STATE. */
563
564void
565i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum)
566{
964e4306 567 /* Only update the threads of this process. */
d86d4aaf 568 int pid = pid_of (current_inferior);
aa5ca48f
DE
569
570 if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
571 fatal ("Invalid debug register %d", regnum);
572
d86d4aaf 573 find_inferior (&all_threads, update_debug_registers_callback, &pid);
964e4306 574}
aa5ca48f 575
964e4306 576/* Return the inferior's debug register REGNUM. */
aa5ca48f 577
964e4306
PA
578CORE_ADDR
579i386_dr_low_get_addr (int regnum)
580{
d86d4aaf 581 ptid_t ptid = ptid_of (current_inferior);
964e4306
PA
582
583 /* DR6 and DR7 are retrieved with some other way. */
0a5b1e09 584 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
964e4306
PA
585
586 return x86_linux_dr_get (ptid, regnum);
aa5ca48f
DE
587}
588
589/* Update the inferior's DR7 debug control register from STATE. */
590
591void
592i386_dr_low_set_control (const struct i386_debug_reg_state *state)
593{
964e4306 594 /* Only update the threads of this process. */
d86d4aaf 595 int pid = pid_of (current_inferior);
aa5ca48f 596
d86d4aaf 597 find_inferior (&all_threads, update_debug_registers_callback, &pid);
964e4306 598}
aa5ca48f 599
964e4306
PA
600/* Return the inferior's DR7 debug control register. */
601
602unsigned
603i386_dr_low_get_control (void)
604{
d86d4aaf 605 ptid_t ptid = ptid_of (current_inferior);
964e4306
PA
606
607 return x86_linux_dr_get (ptid, DR_CONTROL);
aa5ca48f
DE
608}
609
610/* Get the value of the DR6 debug status register from the inferior
611 and record it in STATE. */
612
964e4306
PA
613unsigned
614i386_dr_low_get_status (void)
aa5ca48f 615{
d86d4aaf 616 ptid_t ptid = ptid_of (current_inferior);
aa5ca48f 617
964e4306 618 return x86_linux_dr_get (ptid, DR_STATUS);
aa5ca48f
DE
619}
620\f
90d74c30 621/* Breakpoint/Watchpoint support. */
aa5ca48f
DE
622
623static int
624x86_insert_point (char type, CORE_ADDR addr, int len)
625{
626 struct process_info *proc = current_process ();
627 switch (type)
628 {
961bd387 629 case '0': /* software-breakpoint */
90d74c30
PA
630 {
631 int ret;
632
633 ret = prepare_to_access_memory ();
634 if (ret)
635 return -1;
636 ret = set_gdb_breakpoint_at (addr);
0146f85b 637 done_accessing_memory ();
90d74c30
PA
638 return ret;
639 }
961bd387
ME
640 case '1': /* hardware-breakpoint */
641 case '2': /* write watchpoint */
642 case '3': /* read watchpoint */
643 case '4': /* access watchpoint */
aa5ca48f
DE
644 return i386_low_insert_watchpoint (&proc->private->arch_private->debug_reg_state,
645 type, addr, len);
961bd387 646
aa5ca48f
DE
647 default:
648 /* Unsupported. */
649 return 1;
650 }
651}
652
653static int
654x86_remove_point (char type, CORE_ADDR addr, int len)
655{
656 struct process_info *proc = current_process ();
657 switch (type)
658 {
961bd387 659 case '0': /* software-breakpoint */
90d74c30
PA
660 {
661 int ret;
662
663 ret = prepare_to_access_memory ();
664 if (ret)
665 return -1;
666 ret = delete_gdb_breakpoint_at (addr);
0146f85b 667 done_accessing_memory ();
90d74c30
PA
668 return ret;
669 }
961bd387
ME
670 case '1': /* hardware-breakpoint */
671 case '2': /* write watchpoint */
672 case '3': /* read watchpoint */
673 case '4': /* access watchpoint */
aa5ca48f
DE
674 return i386_low_remove_watchpoint (&proc->private->arch_private->debug_reg_state,
675 type, addr, len);
676 default:
677 /* Unsupported. */
678 return 1;
679 }
680}
681
682static int
683x86_stopped_by_watchpoint (void)
684{
685 struct process_info *proc = current_process ();
686 return i386_low_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
687}
688
689static CORE_ADDR
690x86_stopped_data_address (void)
691{
692 struct process_info *proc = current_process ();
693 CORE_ADDR addr;
694 if (i386_low_stopped_data_address (&proc->private->arch_private->debug_reg_state,
695 &addr))
696 return addr;
697 return 0;
698}
699\f
700/* Called when a new process is created. */
701
702static struct arch_process_info *
703x86_linux_new_process (void)
704{
705 struct arch_process_info *info = xcalloc (1, sizeof (*info));
706
707 i386_low_init_dregs (&info->debug_reg_state);
708
709 return info;
710}
711
712/* Called when a new thread is detected. */
713
714static struct arch_lwp_info *
715x86_linux_new_thread (void)
716{
717 struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
718
719 info->debug_registers_changed = 1;
720
721 return info;
722}
723
724/* Called when resuming a thread.
725 If the debug regs have changed, update the thread's copies. */
726
727static void
728x86_linux_prepare_to_resume (struct lwp_info *lwp)
729{
d86d4aaf 730 ptid_t ptid = ptid_of (get_lwp_thread (lwp));
6210a125 731 int clear_status = 0;
b9a881c2 732
aa5ca48f
DE
733 if (lwp->arch_private->debug_registers_changed)
734 {
735 int i;
aa5ca48f
DE
736 int pid = ptid_get_pid (ptid);
737 struct process_info *proc = find_process_pid (pid);
493e2a69
MS
738 struct i386_debug_reg_state *state
739 = &proc->private->arch_private->debug_reg_state;
aa5ca48f
DE
740
741 for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
6210a125
PA
742 if (state->dr_ref_count[i] > 0)
743 {
744 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
745
746 /* If we're setting a watchpoint, any change the inferior
747 had done itself to the debug registers needs to be
748 discarded, otherwise, i386_low_stopped_data_address can
749 get confused. */
750 clear_status = 1;
751 }
aa5ca48f
DE
752
753 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
754
755 lwp->arch_private->debug_registers_changed = 0;
756 }
b9a881c2 757
6210a125 758 if (clear_status || lwp->stopped_by_watchpoint)
b9a881c2 759 x86_linux_dr_set (ptid, DR_STATUS, 0);
aa5ca48f
DE
760}
761\f
d0722149
DE
762/* When GDBSERVER is built as a 64-bit application on linux, the
763 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
764 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
765 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
766 conversion in-place ourselves. */
767
768/* These types below (compat_*) define a siginfo type that is layout
769 compatible with the siginfo type exported by the 32-bit userspace
770 support. */
771
772#ifdef __x86_64__
773
774typedef int compat_int_t;
775typedef unsigned int compat_uptr_t;
776
777typedef int compat_time_t;
778typedef int compat_timer_t;
779typedef int compat_clock_t;
780
781struct compat_timeval
782{
783 compat_time_t tv_sec;
784 int tv_usec;
785};
786
787typedef union compat_sigval
788{
789 compat_int_t sival_int;
790 compat_uptr_t sival_ptr;
791} compat_sigval_t;
792
793typedef struct compat_siginfo
794{
795 int si_signo;
796 int si_errno;
797 int si_code;
798
799 union
800 {
801 int _pad[((128 / sizeof (int)) - 3)];
802
803 /* kill() */
804 struct
805 {
806 unsigned int _pid;
807 unsigned int _uid;
808 } _kill;
809
810 /* POSIX.1b timers */
811 struct
812 {
813 compat_timer_t _tid;
814 int _overrun;
815 compat_sigval_t _sigval;
816 } _timer;
817
818 /* POSIX.1b signals */
819 struct
820 {
821 unsigned int _pid;
822 unsigned int _uid;
823 compat_sigval_t _sigval;
824 } _rt;
825
826 /* SIGCHLD */
827 struct
828 {
829 unsigned int _pid;
830 unsigned int _uid;
831 int _status;
832 compat_clock_t _utime;
833 compat_clock_t _stime;
834 } _sigchld;
835
836 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
837 struct
838 {
839 unsigned int _addr;
840 } _sigfault;
841
842 /* SIGPOLL */
843 struct
844 {
845 int _band;
846 int _fd;
847 } _sigpoll;
848 } _sifields;
849} compat_siginfo_t;
850
c92b5177
L
851/* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
852typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
853
854typedef struct compat_x32_siginfo
855{
856 int si_signo;
857 int si_errno;
858 int si_code;
859
860 union
861 {
862 int _pad[((128 / sizeof (int)) - 3)];
863
864 /* kill() */
865 struct
866 {
867 unsigned int _pid;
868 unsigned int _uid;
869 } _kill;
870
871 /* POSIX.1b timers */
872 struct
873 {
874 compat_timer_t _tid;
875 int _overrun;
876 compat_sigval_t _sigval;
877 } _timer;
878
879 /* POSIX.1b signals */
880 struct
881 {
882 unsigned int _pid;
883 unsigned int _uid;
884 compat_sigval_t _sigval;
885 } _rt;
886
887 /* SIGCHLD */
888 struct
889 {
890 unsigned int _pid;
891 unsigned int _uid;
892 int _status;
893 compat_x32_clock_t _utime;
894 compat_x32_clock_t _stime;
895 } _sigchld;
896
897 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
898 struct
899 {
900 unsigned int _addr;
901 } _sigfault;
902
903 /* SIGPOLL */
904 struct
905 {
906 int _band;
907 int _fd;
908 } _sigpoll;
909 } _sifields;
910} compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
911
d0722149
DE
912#define cpt_si_pid _sifields._kill._pid
913#define cpt_si_uid _sifields._kill._uid
914#define cpt_si_timerid _sifields._timer._tid
915#define cpt_si_overrun _sifields._timer._overrun
916#define cpt_si_status _sifields._sigchld._status
917#define cpt_si_utime _sifields._sigchld._utime
918#define cpt_si_stime _sifields._sigchld._stime
919#define cpt_si_ptr _sifields._rt._sigval.sival_ptr
920#define cpt_si_addr _sifields._sigfault._addr
921#define cpt_si_band _sifields._sigpoll._band
922#define cpt_si_fd _sifields._sigpoll._fd
923
924/* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
925 In their place is si_timer1,si_timer2. */
926#ifndef si_timerid
927#define si_timerid si_timer1
928#endif
929#ifndef si_overrun
930#define si_overrun si_timer2
931#endif
932
933static void
934compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
935{
936 memset (to, 0, sizeof (*to));
937
938 to->si_signo = from->si_signo;
939 to->si_errno = from->si_errno;
940 to->si_code = from->si_code;
941
b53a1623 942 if (to->si_code == SI_TIMER)
d0722149 943 {
b53a1623
PA
944 to->cpt_si_timerid = from->si_timerid;
945 to->cpt_si_overrun = from->si_overrun;
d0722149
DE
946 to->cpt_si_ptr = (intptr_t) from->si_ptr;
947 }
948 else if (to->si_code == SI_USER)
949 {
950 to->cpt_si_pid = from->si_pid;
951 to->cpt_si_uid = from->si_uid;
952 }
b53a1623 953 else if (to->si_code < 0)
d0722149 954 {
b53a1623
PA
955 to->cpt_si_pid = from->si_pid;
956 to->cpt_si_uid = from->si_uid;
d0722149
DE
957 to->cpt_si_ptr = (intptr_t) from->si_ptr;
958 }
959 else
960 {
961 switch (to->si_signo)
962 {
963 case SIGCHLD:
964 to->cpt_si_pid = from->si_pid;
965 to->cpt_si_uid = from->si_uid;
966 to->cpt_si_status = from->si_status;
967 to->cpt_si_utime = from->si_utime;
968 to->cpt_si_stime = from->si_stime;
969 break;
970 case SIGILL:
971 case SIGFPE:
972 case SIGSEGV:
973 case SIGBUS:
974 to->cpt_si_addr = (intptr_t) from->si_addr;
975 break;
976 case SIGPOLL:
977 to->cpt_si_band = from->si_band;
978 to->cpt_si_fd = from->si_fd;
979 break;
980 default:
981 to->cpt_si_pid = from->si_pid;
982 to->cpt_si_uid = from->si_uid;
983 to->cpt_si_ptr = (intptr_t) from->si_ptr;
984 break;
985 }
986 }
987}
988
989static void
990siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
991{
992 memset (to, 0, sizeof (*to));
993
994 to->si_signo = from->si_signo;
995 to->si_errno = from->si_errno;
996 to->si_code = from->si_code;
997
b53a1623 998 if (to->si_code == SI_TIMER)
d0722149 999 {
b53a1623
PA
1000 to->si_timerid = from->cpt_si_timerid;
1001 to->si_overrun = from->cpt_si_overrun;
d0722149
DE
1002 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1003 }
1004 else if (to->si_code == SI_USER)
1005 {
1006 to->si_pid = from->cpt_si_pid;
1007 to->si_uid = from->cpt_si_uid;
1008 }
b53a1623 1009 else if (to->si_code < 0)
d0722149 1010 {
b53a1623
PA
1011 to->si_pid = from->cpt_si_pid;
1012 to->si_uid = from->cpt_si_uid;
d0722149
DE
1013 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1014 }
1015 else
1016 {
1017 switch (to->si_signo)
1018 {
1019 case SIGCHLD:
1020 to->si_pid = from->cpt_si_pid;
1021 to->si_uid = from->cpt_si_uid;
1022 to->si_status = from->cpt_si_status;
1023 to->si_utime = from->cpt_si_utime;
1024 to->si_stime = from->cpt_si_stime;
1025 break;
1026 case SIGILL:
1027 case SIGFPE:
1028 case SIGSEGV:
1029 case SIGBUS:
1030 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1031 break;
1032 case SIGPOLL:
1033 to->si_band = from->cpt_si_band;
1034 to->si_fd = from->cpt_si_fd;
1035 break;
1036 default:
1037 to->si_pid = from->cpt_si_pid;
1038 to->si_uid = from->cpt_si_uid;
1039 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1040 break;
1041 }
1042 }
1043}
1044
c92b5177
L
1045static void
1046compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
1047 siginfo_t *from)
1048{
1049 memset (to, 0, sizeof (*to));
1050
1051 to->si_signo = from->si_signo;
1052 to->si_errno = from->si_errno;
1053 to->si_code = from->si_code;
1054
1055 if (to->si_code == SI_TIMER)
1056 {
1057 to->cpt_si_timerid = from->si_timerid;
1058 to->cpt_si_overrun = from->si_overrun;
1059 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1060 }
1061 else if (to->si_code == SI_USER)
1062 {
1063 to->cpt_si_pid = from->si_pid;
1064 to->cpt_si_uid = from->si_uid;
1065 }
1066 else if (to->si_code < 0)
1067 {
1068 to->cpt_si_pid = from->si_pid;
1069 to->cpt_si_uid = from->si_uid;
1070 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1071 }
1072 else
1073 {
1074 switch (to->si_signo)
1075 {
1076 case SIGCHLD:
1077 to->cpt_si_pid = from->si_pid;
1078 to->cpt_si_uid = from->si_uid;
1079 to->cpt_si_status = from->si_status;
1080 to->cpt_si_utime = from->si_utime;
1081 to->cpt_si_stime = from->si_stime;
1082 break;
1083 case SIGILL:
1084 case SIGFPE:
1085 case SIGSEGV:
1086 case SIGBUS:
1087 to->cpt_si_addr = (intptr_t) from->si_addr;
1088 break;
1089 case SIGPOLL:
1090 to->cpt_si_band = from->si_band;
1091 to->cpt_si_fd = from->si_fd;
1092 break;
1093 default:
1094 to->cpt_si_pid = from->si_pid;
1095 to->cpt_si_uid = from->si_uid;
1096 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1097 break;
1098 }
1099 }
1100}
1101
1102static void
1103siginfo_from_compat_x32_siginfo (siginfo_t *to,
1104 compat_x32_siginfo_t *from)
1105{
1106 memset (to, 0, sizeof (*to));
1107
1108 to->si_signo = from->si_signo;
1109 to->si_errno = from->si_errno;
1110 to->si_code = from->si_code;
1111
1112 if (to->si_code == SI_TIMER)
1113 {
1114 to->si_timerid = from->cpt_si_timerid;
1115 to->si_overrun = from->cpt_si_overrun;
1116 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1117 }
1118 else if (to->si_code == SI_USER)
1119 {
1120 to->si_pid = from->cpt_si_pid;
1121 to->si_uid = from->cpt_si_uid;
1122 }
1123 else if (to->si_code < 0)
1124 {
1125 to->si_pid = from->cpt_si_pid;
1126 to->si_uid = from->cpt_si_uid;
1127 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1128 }
1129 else
1130 {
1131 switch (to->si_signo)
1132 {
1133 case SIGCHLD:
1134 to->si_pid = from->cpt_si_pid;
1135 to->si_uid = from->cpt_si_uid;
1136 to->si_status = from->cpt_si_status;
1137 to->si_utime = from->cpt_si_utime;
1138 to->si_stime = from->cpt_si_stime;
1139 break;
1140 case SIGILL:
1141 case SIGFPE:
1142 case SIGSEGV:
1143 case SIGBUS:
1144 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1145 break;
1146 case SIGPOLL:
1147 to->si_band = from->cpt_si_band;
1148 to->si_fd = from->cpt_si_fd;
1149 break;
1150 default:
1151 to->si_pid = from->cpt_si_pid;
1152 to->si_uid = from->cpt_si_uid;
1153 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1154 break;
1155 }
1156 }
1157}
1158
d0722149
DE
1159#endif /* __x86_64__ */
1160
1161/* Convert a native/host siginfo object, into/from the siginfo in the
1162 layout of the inferiors' architecture. Returns true if any
1163 conversion was done; false otherwise. If DIRECTION is 1, then copy
1164 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1165 INF. */
1166
1167static int
a5362b9a 1168x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
d0722149
DE
1169{
1170#ifdef __x86_64__
760256f9 1171 unsigned int machine;
d86d4aaf 1172 int tid = lwpid_of (current_inferior);
760256f9
PA
1173 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1174
d0722149 1175 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 1176 if (!is_64bit_tdesc ())
d0722149 1177 {
a5362b9a 1178 if (sizeof (siginfo_t) != sizeof (compat_siginfo_t))
9f1036c1 1179 fatal ("unexpected difference in siginfo");
d0722149
DE
1180
1181 if (direction == 0)
1182 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
1183 else
1184 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
1185
c92b5177
L
1186 return 1;
1187 }
1188 /* No fixup for native x32 GDB. */
760256f9 1189 else if (!is_elf64 && sizeof (void *) == 8)
c92b5177
L
1190 {
1191 if (sizeof (siginfo_t) != sizeof (compat_x32_siginfo_t))
1192 fatal ("unexpected difference in siginfo");
1193
1194 if (direction == 0)
1195 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
1196 native);
1197 else
1198 siginfo_from_compat_x32_siginfo (native,
1199 (struct compat_x32_siginfo *) inf);
1200
d0722149
DE
1201 return 1;
1202 }
1203#endif
1204
1205 return 0;
1206}
1207\f
1570b33e
L
1208static int use_xml;
1209
3aee8918
PA
1210/* Format of XSAVE extended state is:
1211 struct
1212 {
1213 fxsave_bytes[0..463]
1214 sw_usable_bytes[464..511]
1215 xstate_hdr_bytes[512..575]
1216 avx_bytes[576..831]
1217 future_state etc
1218 };
1219
1220 Same memory layout will be used for the coredump NT_X86_XSTATE
1221 representing the XSAVE extended state registers.
1222
1223 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1224 extended state mask, which is the same as the extended control register
1225 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1226 together with the mask saved in the xstate_hdr_bytes to determine what
1227 states the processor/OS supports and what state, used or initialized,
1228 the process/thread is in. */
1229#define I386_LINUX_XSAVE_XCR0_OFFSET 464
1230
1231/* Does the current host support the GETFPXREGS request? The header
1232 file may or may not define it, and even if it is defined, the
1233 kernel will return EIO if it's running on a pre-SSE processor. */
1234int have_ptrace_getfpxregs =
1235#ifdef HAVE_PTRACE_GETFPXREGS
1236 -1
1237#else
1238 0
1239#endif
1240;
1570b33e 1241
3aee8918
PA
1242/* Does the current host support PTRACE_GETREGSET? */
1243static int have_ptrace_getregset = -1;
1244
1245/* Get Linux/x86 target description from running target. */
1246
1247static const struct target_desc *
1248x86_linux_read_description (void)
1570b33e 1249{
3aee8918
PA
1250 unsigned int machine;
1251 int is_elf64;
a196ebeb 1252 int xcr0_features;
3aee8918
PA
1253 int tid;
1254 static uint64_t xcr0;
3a13a53b 1255 struct regset_info *regset;
1570b33e 1256
d86d4aaf 1257 tid = lwpid_of (current_inferior);
1570b33e 1258
3aee8918 1259 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 1260
3aee8918 1261 if (sizeof (void *) == 4)
3a13a53b 1262 {
3aee8918
PA
1263 if (is_elf64 > 0)
1264 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1265#ifndef __x86_64__
1266 else if (machine == EM_X86_64)
1267 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1268#endif
1269 }
3a13a53b 1270
3aee8918
PA
1271#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1272 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
1273 {
1274 elf_fpxregset_t fpxregs;
3a13a53b 1275
3aee8918 1276 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 1277 {
3aee8918
PA
1278 have_ptrace_getfpxregs = 0;
1279 have_ptrace_getregset = 0;
1280 return tdesc_i386_mmx_linux;
3a13a53b 1281 }
3aee8918
PA
1282 else
1283 have_ptrace_getfpxregs = 1;
3a13a53b 1284 }
1570b33e
L
1285#endif
1286
1287 if (!use_xml)
1288 {
3aee8918
PA
1289 x86_xcr0 = I386_XSTATE_SSE_MASK;
1290
1570b33e
L
1291 /* Don't use XML. */
1292#ifdef __x86_64__
3aee8918
PA
1293 if (machine == EM_X86_64)
1294 return tdesc_amd64_linux_no_xml;
1570b33e 1295 else
1570b33e 1296#endif
3aee8918 1297 return tdesc_i386_linux_no_xml;
1570b33e
L
1298 }
1299
1570b33e
L
1300 if (have_ptrace_getregset == -1)
1301 {
3aee8918 1302 uint64_t xstateregs[(I386_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 1303 struct iovec iov;
1570b33e
L
1304
1305 iov.iov_base = xstateregs;
1306 iov.iov_len = sizeof (xstateregs);
1307
1308 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
1309 if (ptrace (PTRACE_GETREGSET, tid,
1310 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
1311 have_ptrace_getregset = 0;
1312 else
1570b33e 1313 {
3aee8918
PA
1314 have_ptrace_getregset = 1;
1315
1316 /* Get XCR0 from XSAVE extended state. */
1317 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
1318 / sizeof (uint64_t))];
1319
1320 /* Use PTRACE_GETREGSET if it is available. */
1321 for (regset = x86_regsets;
1322 regset->fill_function != NULL; regset++)
1323 if (regset->get_request == PTRACE_GETREGSET)
1324 regset->size = I386_XSTATE_SIZE (xcr0);
1325 else if (regset->type != GENERAL_REGS)
1326 regset->size = 0;
1570b33e 1327 }
1570b33e
L
1328 }
1329
3aee8918 1330 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb
WT
1331 xcr0_features = (have_ptrace_getregset
1332 && (xcr0 & I386_XSTATE_ALL_MASK));
3aee8918 1333
a196ebeb 1334 if (xcr0_features)
3aee8918 1335 x86_xcr0 = xcr0;
1570b33e 1336
3aee8918
PA
1337 if (machine == EM_X86_64)
1338 {
1570b33e 1339#ifdef __x86_64__
a196ebeb 1340 if (is_elf64)
3aee8918 1341 {
a196ebeb
WT
1342 if (xcr0_features)
1343 {
1344 switch (xcr0 & I386_XSTATE_ALL_MASK)
1345 {
1346 case I386_XSTATE_MPX_MASK:
1347 return tdesc_amd64_mpx_linux;
1348
1349 case I386_XSTATE_AVX_MASK:
1350 return tdesc_amd64_avx_linux;
1351
1352 default:
1353 return tdesc_amd64_linux;
1354 }
1355 }
4d47af5c 1356 else
a196ebeb 1357 return tdesc_amd64_linux;
3aee8918
PA
1358 }
1359 else
1360 {
a196ebeb
WT
1361 if (xcr0_features)
1362 {
1363 switch (xcr0 & I386_XSTATE_ALL_MASK)
1364 {
1365 case I386_XSTATE_MPX_MASK: /* No MPX on x32. */
1366 case I386_XSTATE_AVX_MASK:
1367 return tdesc_x32_avx_linux;
1368
1369 default:
1370 return tdesc_x32_linux;
1371 }
1372 }
3aee8918 1373 else
a196ebeb 1374 return tdesc_x32_linux;
1570b33e 1375 }
3aee8918 1376#endif
1570b33e 1377 }
3aee8918
PA
1378 else
1379 {
a196ebeb
WT
1380 if (xcr0_features)
1381 {
1382 switch (xcr0 & I386_XSTATE_ALL_MASK)
1383 {
1384 case (I386_XSTATE_MPX_MASK):
1385 return tdesc_i386_mpx_linux;
1386
1387 case (I386_XSTATE_AVX_MASK):
1388 return tdesc_i386_avx_linux;
1389
1390 default:
1391 return tdesc_i386_linux;
1392 }
1393 }
3aee8918
PA
1394 else
1395 return tdesc_i386_linux;
1396 }
1397
1398 gdb_assert_not_reached ("failed to return tdesc");
1399}
1400
1401/* Callback for find_inferior. Stops iteration when a thread with a
1402 given PID is found. */
1403
1404static int
1405same_process_callback (struct inferior_list_entry *entry, void *data)
1406{
1407 int pid = *(int *) data;
1408
1409 return (ptid_get_pid (entry->id) == pid);
1410}
1411
1412/* Callback for for_each_inferior. Calls the arch_setup routine for
1413 each process. */
1414
1415static void
1416x86_arch_setup_process_callback (struct inferior_list_entry *entry)
1417{
1418 int pid = ptid_get_pid (entry->id);
1419
1420 /* Look up any thread of this processes. */
1421 current_inferior
1422 = (struct thread_info *) find_inferior (&all_threads,
1423 same_process_callback, &pid);
1424
1425 the_low_target.arch_setup ();
1426}
1427
1428/* Update all the target description of all processes; a new GDB
1429 connected, and it may or not support xml target descriptions. */
1430
1431static void
1432x86_linux_update_xmltarget (void)
1433{
1434 struct thread_info *save_inferior = current_inferior;
1435
1436 /* Before changing the register cache's internal layout, flush the
1437 contents of the current valid caches back to the threads, and
1438 release the current regcache objects. */
1439 regcache_release ();
1440
1441 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
1442
1443 current_inferior = save_inferior;
1570b33e
L
1444}
1445
1446/* Process qSupported query, "xmlRegisters=". Update the buffer size for
1447 PTRACE_GETREGSET. */
1448
1449static void
1450x86_linux_process_qsupported (const char *query)
1451{
1452 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1453 with "i386" in qSupported query, it supports x86 XML target
1454 descriptions. */
1455 use_xml = 0;
1456 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1457 {
1458 char *copy = xstrdup (query + 13);
1459 char *p;
1460
1461 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1462 {
1463 if (strcmp (p, "i386") == 0)
1464 {
1465 use_xml = 1;
1466 break;
1467 }
1468 }
1469
1470 free (copy);
1471 }
1472
1473 x86_linux_update_xmltarget ();
1474}
1475
3aee8918 1476/* Common for x86/x86-64. */
d0722149 1477
3aee8918
PA
1478static struct regsets_info x86_regsets_info =
1479 {
1480 x86_regsets, /* regsets */
1481 0, /* num_regsets */
1482 NULL, /* disabled_regsets */
1483 };
214d508e
L
1484
1485#ifdef __x86_64__
3aee8918
PA
1486static struct regs_info amd64_linux_regs_info =
1487 {
1488 NULL, /* regset_bitmap */
1489 NULL, /* usrregs_info */
1490 &x86_regsets_info
1491 };
d0722149 1492#endif
3aee8918
PA
1493static struct usrregs_info i386_linux_usrregs_info =
1494 {
1495 I386_NUM_REGS,
1496 i386_regmap,
1497 };
d0722149 1498
3aee8918
PA
1499static struct regs_info i386_linux_regs_info =
1500 {
1501 NULL, /* regset_bitmap */
1502 &i386_linux_usrregs_info,
1503 &x86_regsets_info
1504 };
d0722149 1505
3aee8918
PA
1506const struct regs_info *
1507x86_linux_regs_info (void)
1508{
1509#ifdef __x86_64__
1510 if (is_64bit_tdesc ())
1511 return &amd64_linux_regs_info;
1512 else
1513#endif
1514 return &i386_linux_regs_info;
1515}
d0722149 1516
3aee8918
PA
1517/* Initialize the target description for the architecture of the
1518 inferior. */
1570b33e 1519
3aee8918
PA
1520static void
1521x86_arch_setup (void)
1522{
1523 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1524}
1525
219f2f23
PA
1526static int
1527x86_supports_tracepoints (void)
1528{
1529 return 1;
1530}
1531
fa593d66
PA
1532static void
1533append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1534{
1535 write_inferior_memory (*to, buf, len);
1536 *to += len;
1537}
1538
1539static int
1540push_opcode (unsigned char *buf, char *op)
1541{
1542 unsigned char *buf_org = buf;
1543
1544 while (1)
1545 {
1546 char *endptr;
1547 unsigned long ul = strtoul (op, &endptr, 16);
1548
1549 if (endptr == op)
1550 break;
1551
1552 *buf++ = ul;
1553 op = endptr;
1554 }
1555
1556 return buf - buf_org;
1557}
1558
1559#ifdef __x86_64__
1560
1561/* Build a jump pad that saves registers and calls a collection
1562 function. Writes a jump instruction to the jump pad to
1563 JJUMPAD_INSN. The caller is responsible to write it in at the
1564 tracepoint address. */
1565
1566static int
1567amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1568 CORE_ADDR collector,
1569 CORE_ADDR lockaddr,
1570 ULONGEST orig_size,
1571 CORE_ADDR *jump_entry,
405f8e94
SS
1572 CORE_ADDR *trampoline,
1573 ULONGEST *trampoline_size,
fa593d66
PA
1574 unsigned char *jjump_pad_insn,
1575 ULONGEST *jjump_pad_insn_size,
1576 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1577 CORE_ADDR *adjusted_insn_addr_end,
1578 char *err)
fa593d66
PA
1579{
1580 unsigned char buf[40];
1581 int i, offset;
f4647387
YQ
1582 int64_t loffset;
1583
fa593d66
PA
1584 CORE_ADDR buildaddr = *jump_entry;
1585
1586 /* Build the jump pad. */
1587
1588 /* First, do tracepoint data collection. Save registers. */
1589 i = 0;
1590 /* Need to ensure stack pointer saved first. */
1591 buf[i++] = 0x54; /* push %rsp */
1592 buf[i++] = 0x55; /* push %rbp */
1593 buf[i++] = 0x57; /* push %rdi */
1594 buf[i++] = 0x56; /* push %rsi */
1595 buf[i++] = 0x52; /* push %rdx */
1596 buf[i++] = 0x51; /* push %rcx */
1597 buf[i++] = 0x53; /* push %rbx */
1598 buf[i++] = 0x50; /* push %rax */
1599 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1600 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1601 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1602 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1603 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1604 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1605 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1606 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1607 buf[i++] = 0x9c; /* pushfq */
1608 buf[i++] = 0x48; /* movl <addr>,%rdi */
1609 buf[i++] = 0xbf;
1610 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1611 i += sizeof (unsigned long);
1612 buf[i++] = 0x57; /* push %rdi */
1613 append_insns (&buildaddr, i, buf);
1614
1615 /* Stack space for the collecting_t object. */
1616 i = 0;
1617 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1618 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1619 memcpy (buf + i, &tpoint, 8);
1620 i += 8;
1621 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1622 i += push_opcode (&buf[i],
1623 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1624 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1625 append_insns (&buildaddr, i, buf);
1626
1627 /* spin-lock. */
1628 i = 0;
1629 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1630 memcpy (&buf[i], (void *) &lockaddr, 8);
1631 i += 8;
1632 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1633 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1634 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1635 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1636 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1637 append_insns (&buildaddr, i, buf);
1638
1639 /* Set up the gdb_collect call. */
1640 /* At this point, (stack pointer + 0x18) is the base of our saved
1641 register block. */
1642
1643 i = 0;
1644 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1645 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1646
1647 /* tpoint address may be 64-bit wide. */
1648 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1649 memcpy (buf + i, &tpoint, 8);
1650 i += 8;
1651 append_insns (&buildaddr, i, buf);
1652
1653 /* The collector function being in the shared library, may be
1654 >31-bits away off the jump pad. */
1655 i = 0;
1656 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1657 memcpy (buf + i, &collector, 8);
1658 i += 8;
1659 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1660 append_insns (&buildaddr, i, buf);
1661
1662 /* Clear the spin-lock. */
1663 i = 0;
1664 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1665 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1666 memcpy (buf + i, &lockaddr, 8);
1667 i += 8;
1668 append_insns (&buildaddr, i, buf);
1669
1670 /* Remove stack that had been used for the collect_t object. */
1671 i = 0;
1672 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1673 append_insns (&buildaddr, i, buf);
1674
1675 /* Restore register state. */
1676 i = 0;
1677 buf[i++] = 0x48; /* add $0x8,%rsp */
1678 buf[i++] = 0x83;
1679 buf[i++] = 0xc4;
1680 buf[i++] = 0x08;
1681 buf[i++] = 0x9d; /* popfq */
1682 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1683 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1684 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1685 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1686 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1687 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1688 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1689 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1690 buf[i++] = 0x58; /* pop %rax */
1691 buf[i++] = 0x5b; /* pop %rbx */
1692 buf[i++] = 0x59; /* pop %rcx */
1693 buf[i++] = 0x5a; /* pop %rdx */
1694 buf[i++] = 0x5e; /* pop %rsi */
1695 buf[i++] = 0x5f; /* pop %rdi */
1696 buf[i++] = 0x5d; /* pop %rbp */
1697 buf[i++] = 0x5c; /* pop %rsp */
1698 append_insns (&buildaddr, i, buf);
1699
1700 /* Now, adjust the original instruction to execute in the jump
1701 pad. */
1702 *adjusted_insn_addr = buildaddr;
1703 relocate_instruction (&buildaddr, tpaddr);
1704 *adjusted_insn_addr_end = buildaddr;
1705
1706 /* Finally, write a jump back to the program. */
f4647387
YQ
1707
1708 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1709 if (loffset > INT_MAX || loffset < INT_MIN)
1710 {
1711 sprintf (err,
1712 "E.Jump back from jump pad too far from tracepoint "
1713 "(offset 0x%" PRIx64 " > int32).", loffset);
1714 return 1;
1715 }
1716
1717 offset = (int) loffset;
fa593d66
PA
1718 memcpy (buf, jump_insn, sizeof (jump_insn));
1719 memcpy (buf + 1, &offset, 4);
1720 append_insns (&buildaddr, sizeof (jump_insn), buf);
1721
1722 /* The jump pad is now built. Wire in a jump to our jump pad. This
1723 is always done last (by our caller actually), so that we can
1724 install fast tracepoints with threads running. This relies on
1725 the agent's atomic write support. */
f4647387
YQ
1726 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1727 if (loffset > INT_MAX || loffset < INT_MIN)
1728 {
1729 sprintf (err,
1730 "E.Jump pad too far from tracepoint "
1731 "(offset 0x%" PRIx64 " > int32).", loffset);
1732 return 1;
1733 }
1734
1735 offset = (int) loffset;
1736
fa593d66
PA
1737 memcpy (buf, jump_insn, sizeof (jump_insn));
1738 memcpy (buf + 1, &offset, 4);
1739 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1740 *jjump_pad_insn_size = sizeof (jump_insn);
1741
1742 /* Return the end address of our pad. */
1743 *jump_entry = buildaddr;
1744
1745 return 0;
1746}
1747
1748#endif /* __x86_64__ */
1749
1750/* Build a jump pad that saves registers and calls a collection
1751 function. Writes a jump instruction to the jump pad to
1752 JJUMPAD_INSN. The caller is responsible to write it in at the
1753 tracepoint address. */
1754
1755static int
1756i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1757 CORE_ADDR collector,
1758 CORE_ADDR lockaddr,
1759 ULONGEST orig_size,
1760 CORE_ADDR *jump_entry,
405f8e94
SS
1761 CORE_ADDR *trampoline,
1762 ULONGEST *trampoline_size,
fa593d66
PA
1763 unsigned char *jjump_pad_insn,
1764 ULONGEST *jjump_pad_insn_size,
1765 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1766 CORE_ADDR *adjusted_insn_addr_end,
1767 char *err)
fa593d66
PA
1768{
1769 unsigned char buf[0x100];
1770 int i, offset;
1771 CORE_ADDR buildaddr = *jump_entry;
1772
1773 /* Build the jump pad. */
1774
1775 /* First, do tracepoint data collection. Save registers. */
1776 i = 0;
1777 buf[i++] = 0x60; /* pushad */
1778 buf[i++] = 0x68; /* push tpaddr aka $pc */
1779 *((int *)(buf + i)) = (int) tpaddr;
1780 i += 4;
1781 buf[i++] = 0x9c; /* pushf */
1782 buf[i++] = 0x1e; /* push %ds */
1783 buf[i++] = 0x06; /* push %es */
1784 buf[i++] = 0x0f; /* push %fs */
1785 buf[i++] = 0xa0;
1786 buf[i++] = 0x0f; /* push %gs */
1787 buf[i++] = 0xa8;
1788 buf[i++] = 0x16; /* push %ss */
1789 buf[i++] = 0x0e; /* push %cs */
1790 append_insns (&buildaddr, i, buf);
1791
1792 /* Stack space for the collecting_t object. */
1793 i = 0;
1794 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1795
1796 /* Build the object. */
1797 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1798 memcpy (buf + i, &tpoint, 4);
1799 i += 4;
1800 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1801
1802 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1803 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1804 append_insns (&buildaddr, i, buf);
1805
1806 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1807 If we cared for it, this could be using xchg alternatively. */
1808
1809 i = 0;
1810 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1811 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1812 %esp,<lockaddr> */
1813 memcpy (&buf[i], (void *) &lockaddr, 4);
1814 i += 4;
1815 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1816 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1817 append_insns (&buildaddr, i, buf);
1818
1819
1820 /* Set up arguments to the gdb_collect call. */
1821 i = 0;
1822 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1823 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1824 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1825 append_insns (&buildaddr, i, buf);
1826
1827 i = 0;
1828 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1829 append_insns (&buildaddr, i, buf);
1830
1831 i = 0;
1832 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1833 memcpy (&buf[i], (void *) &tpoint, 4);
1834 i += 4;
1835 append_insns (&buildaddr, i, buf);
1836
1837 buf[0] = 0xe8; /* call <reladdr> */
1838 offset = collector - (buildaddr + sizeof (jump_insn));
1839 memcpy (buf + 1, &offset, 4);
1840 append_insns (&buildaddr, 5, buf);
1841 /* Clean up after the call. */
1842 buf[0] = 0x83; /* add $0x8,%esp */
1843 buf[1] = 0xc4;
1844 buf[2] = 0x08;
1845 append_insns (&buildaddr, 3, buf);
1846
1847
1848 /* Clear the spin-lock. This would need the LOCK prefix on older
1849 broken archs. */
1850 i = 0;
1851 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1852 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1853 memcpy (buf + i, &lockaddr, 4);
1854 i += 4;
1855 append_insns (&buildaddr, i, buf);
1856
1857
1858 /* Remove stack that had been used for the collect_t object. */
1859 i = 0;
1860 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1861 append_insns (&buildaddr, i, buf);
1862
1863 i = 0;
1864 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1865 buf[i++] = 0xc4;
1866 buf[i++] = 0x04;
1867 buf[i++] = 0x17; /* pop %ss */
1868 buf[i++] = 0x0f; /* pop %gs */
1869 buf[i++] = 0xa9;
1870 buf[i++] = 0x0f; /* pop %fs */
1871 buf[i++] = 0xa1;
1872 buf[i++] = 0x07; /* pop %es */
405f8e94 1873 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1874 buf[i++] = 0x9d; /* popf */
1875 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1876 buf[i++] = 0xc4;
1877 buf[i++] = 0x04;
1878 buf[i++] = 0x61; /* popad */
1879 append_insns (&buildaddr, i, buf);
1880
1881 /* Now, adjust the original instruction to execute in the jump
1882 pad. */
1883 *adjusted_insn_addr = buildaddr;
1884 relocate_instruction (&buildaddr, tpaddr);
1885 *adjusted_insn_addr_end = buildaddr;
1886
1887 /* Write the jump back to the program. */
1888 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1889 memcpy (buf, jump_insn, sizeof (jump_insn));
1890 memcpy (buf + 1, &offset, 4);
1891 append_insns (&buildaddr, sizeof (jump_insn), buf);
1892
1893 /* The jump pad is now built. Wire in a jump to our jump pad. This
1894 is always done last (by our caller actually), so that we can
1895 install fast tracepoints with threads running. This relies on
1896 the agent's atomic write support. */
405f8e94
SS
1897 if (orig_size == 4)
1898 {
1899 /* Create a trampoline. */
1900 *trampoline_size = sizeof (jump_insn);
1901 if (!claim_trampoline_space (*trampoline_size, trampoline))
1902 {
1903 /* No trampoline space available. */
1904 strcpy (err,
1905 "E.Cannot allocate trampoline space needed for fast "
1906 "tracepoints on 4-byte instructions.");
1907 return 1;
1908 }
1909
1910 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1911 memcpy (buf, jump_insn, sizeof (jump_insn));
1912 memcpy (buf + 1, &offset, 4);
1913 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1914
1915 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1916 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1917 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1918 memcpy (buf + 2, &offset, 2);
1919 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1920 *jjump_pad_insn_size = sizeof (small_jump_insn);
1921 }
1922 else
1923 {
1924 /* Else use a 32-bit relative jump instruction. */
1925 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1926 memcpy (buf, jump_insn, sizeof (jump_insn));
1927 memcpy (buf + 1, &offset, 4);
1928 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1929 *jjump_pad_insn_size = sizeof (jump_insn);
1930 }
fa593d66
PA
1931
1932 /* Return the end address of our pad. */
1933 *jump_entry = buildaddr;
1934
1935 return 0;
1936}
1937
1938static int
1939x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1940 CORE_ADDR collector,
1941 CORE_ADDR lockaddr,
1942 ULONGEST orig_size,
1943 CORE_ADDR *jump_entry,
405f8e94
SS
1944 CORE_ADDR *trampoline,
1945 ULONGEST *trampoline_size,
fa593d66
PA
1946 unsigned char *jjump_pad_insn,
1947 ULONGEST *jjump_pad_insn_size,
1948 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1949 CORE_ADDR *adjusted_insn_addr_end,
1950 char *err)
fa593d66
PA
1951{
1952#ifdef __x86_64__
3aee8918 1953 if (is_64bit_tdesc ())
fa593d66
PA
1954 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1955 collector, lockaddr,
1956 orig_size, jump_entry,
405f8e94 1957 trampoline, trampoline_size,
fa593d66
PA
1958 jjump_pad_insn,
1959 jjump_pad_insn_size,
1960 adjusted_insn_addr,
405f8e94
SS
1961 adjusted_insn_addr_end,
1962 err);
fa593d66
PA
1963#endif
1964
1965 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1966 collector, lockaddr,
1967 orig_size, jump_entry,
405f8e94 1968 trampoline, trampoline_size,
fa593d66
PA
1969 jjump_pad_insn,
1970 jjump_pad_insn_size,
1971 adjusted_insn_addr,
405f8e94
SS
1972 adjusted_insn_addr_end,
1973 err);
1974}
1975
1976/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1977 architectures. */
1978
1979static int
1980x86_get_min_fast_tracepoint_insn_len (void)
1981{
1982 static int warned_about_fast_tracepoints = 0;
1983
1984#ifdef __x86_64__
1985 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1986 used for fast tracepoints. */
3aee8918 1987 if (is_64bit_tdesc ())
405f8e94
SS
1988 return 5;
1989#endif
1990
58b4daa5 1991 if (agent_loaded_p ())
405f8e94
SS
1992 {
1993 char errbuf[IPA_BUFSIZ];
1994
1995 errbuf[0] = '\0';
1996
1997 /* On x86, if trampolines are available, then 4-byte jump instructions
1998 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1999 with a 4-byte offset are used instead. */
2000 if (have_fast_tracepoint_trampoline_buffer (errbuf))
2001 return 4;
2002 else
2003 {
2004 /* GDB has no channel to explain to user why a shorter fast
2005 tracepoint is not possible, but at least make GDBserver
2006 mention that something has gone awry. */
2007 if (!warned_about_fast_tracepoints)
2008 {
2009 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
2010 warned_about_fast_tracepoints = 1;
2011 }
2012 return 5;
2013 }
2014 }
2015 else
2016 {
2017 /* Indicate that the minimum length is currently unknown since the IPA
2018 has not loaded yet. */
2019 return 0;
2020 }
fa593d66
PA
2021}
2022
6a271cae
PA
2023static void
2024add_insns (unsigned char *start, int len)
2025{
2026 CORE_ADDR buildaddr = current_insn_ptr;
2027
2028 if (debug_threads)
87ce2a04
DE
2029 debug_printf ("Adding %d bytes of insn at %s\n",
2030 len, paddress (buildaddr));
6a271cae
PA
2031
2032 append_insns (&buildaddr, len, start);
2033 current_insn_ptr = buildaddr;
2034}
2035
6a271cae
PA
2036/* Our general strategy for emitting code is to avoid specifying raw
2037 bytes whenever possible, and instead copy a block of inline asm
2038 that is embedded in the function. This is a little messy, because
2039 we need to keep the compiler from discarding what looks like dead
2040 code, plus suppress various warnings. */
2041
9e4344e5
PA
2042#define EMIT_ASM(NAME, INSNS) \
2043 do \
2044 { \
2045 extern unsigned char start_ ## NAME, end_ ## NAME; \
2046 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 2047 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
2048 "\t" "start_" #NAME ":" \
2049 "\t" INSNS "\n" \
2050 "\t" "end_" #NAME ":"); \
2051 } while (0)
6a271cae
PA
2052
2053#ifdef __x86_64__
2054
2055#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
2056 do \
2057 { \
2058 extern unsigned char start_ ## NAME, end_ ## NAME; \
2059 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2060 __asm__ (".code32\n" \
2061 "\t" "jmp end_" #NAME "\n" \
2062 "\t" "start_" #NAME ":\n" \
2063 "\t" INSNS "\n" \
2064 "\t" "end_" #NAME ":\n" \
2065 ".code64\n"); \
2066 } while (0)
6a271cae
PA
2067
2068#else
2069
2070#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2071
2072#endif
2073
2074#ifdef __x86_64__
2075
2076static void
2077amd64_emit_prologue (void)
2078{
2079 EMIT_ASM (amd64_prologue,
2080 "pushq %rbp\n\t"
2081 "movq %rsp,%rbp\n\t"
2082 "sub $0x20,%rsp\n\t"
2083 "movq %rdi,-8(%rbp)\n\t"
2084 "movq %rsi,-16(%rbp)");
2085}
2086
2087
2088static void
2089amd64_emit_epilogue (void)
2090{
2091 EMIT_ASM (amd64_epilogue,
2092 "movq -16(%rbp),%rdi\n\t"
2093 "movq %rax,(%rdi)\n\t"
2094 "xor %rax,%rax\n\t"
2095 "leave\n\t"
2096 "ret");
2097}
2098
2099static void
2100amd64_emit_add (void)
2101{
2102 EMIT_ASM (amd64_add,
2103 "add (%rsp),%rax\n\t"
2104 "lea 0x8(%rsp),%rsp");
2105}
2106
2107static void
2108amd64_emit_sub (void)
2109{
2110 EMIT_ASM (amd64_sub,
2111 "sub %rax,(%rsp)\n\t"
2112 "pop %rax");
2113}
2114
2115static void
2116amd64_emit_mul (void)
2117{
2118 emit_error = 1;
2119}
2120
2121static void
2122amd64_emit_lsh (void)
2123{
2124 emit_error = 1;
2125}
2126
2127static void
2128amd64_emit_rsh_signed (void)
2129{
2130 emit_error = 1;
2131}
2132
2133static void
2134amd64_emit_rsh_unsigned (void)
2135{
2136 emit_error = 1;
2137}
2138
2139static void
2140amd64_emit_ext (int arg)
2141{
2142 switch (arg)
2143 {
2144 case 8:
2145 EMIT_ASM (amd64_ext_8,
2146 "cbtw\n\t"
2147 "cwtl\n\t"
2148 "cltq");
2149 break;
2150 case 16:
2151 EMIT_ASM (amd64_ext_16,
2152 "cwtl\n\t"
2153 "cltq");
2154 break;
2155 case 32:
2156 EMIT_ASM (amd64_ext_32,
2157 "cltq");
2158 break;
2159 default:
2160 emit_error = 1;
2161 }
2162}
2163
2164static void
2165amd64_emit_log_not (void)
2166{
2167 EMIT_ASM (amd64_log_not,
2168 "test %rax,%rax\n\t"
2169 "sete %cl\n\t"
2170 "movzbq %cl,%rax");
2171}
2172
2173static void
2174amd64_emit_bit_and (void)
2175{
2176 EMIT_ASM (amd64_and,
2177 "and (%rsp),%rax\n\t"
2178 "lea 0x8(%rsp),%rsp");
2179}
2180
2181static void
2182amd64_emit_bit_or (void)
2183{
2184 EMIT_ASM (amd64_or,
2185 "or (%rsp),%rax\n\t"
2186 "lea 0x8(%rsp),%rsp");
2187}
2188
2189static void
2190amd64_emit_bit_xor (void)
2191{
2192 EMIT_ASM (amd64_xor,
2193 "xor (%rsp),%rax\n\t"
2194 "lea 0x8(%rsp),%rsp");
2195}
2196
2197static void
2198amd64_emit_bit_not (void)
2199{
2200 EMIT_ASM (amd64_bit_not,
2201 "xorq $0xffffffffffffffff,%rax");
2202}
2203
2204static void
2205amd64_emit_equal (void)
2206{
2207 EMIT_ASM (amd64_equal,
2208 "cmp %rax,(%rsp)\n\t"
2209 "je .Lamd64_equal_true\n\t"
2210 "xor %rax,%rax\n\t"
2211 "jmp .Lamd64_equal_end\n\t"
2212 ".Lamd64_equal_true:\n\t"
2213 "mov $0x1,%rax\n\t"
2214 ".Lamd64_equal_end:\n\t"
2215 "lea 0x8(%rsp),%rsp");
2216}
2217
2218static void
2219amd64_emit_less_signed (void)
2220{
2221 EMIT_ASM (amd64_less_signed,
2222 "cmp %rax,(%rsp)\n\t"
2223 "jl .Lamd64_less_signed_true\n\t"
2224 "xor %rax,%rax\n\t"
2225 "jmp .Lamd64_less_signed_end\n\t"
2226 ".Lamd64_less_signed_true:\n\t"
2227 "mov $1,%rax\n\t"
2228 ".Lamd64_less_signed_end:\n\t"
2229 "lea 0x8(%rsp),%rsp");
2230}
2231
2232static void
2233amd64_emit_less_unsigned (void)
2234{
2235 EMIT_ASM (amd64_less_unsigned,
2236 "cmp %rax,(%rsp)\n\t"
2237 "jb .Lamd64_less_unsigned_true\n\t"
2238 "xor %rax,%rax\n\t"
2239 "jmp .Lamd64_less_unsigned_end\n\t"
2240 ".Lamd64_less_unsigned_true:\n\t"
2241 "mov $1,%rax\n\t"
2242 ".Lamd64_less_unsigned_end:\n\t"
2243 "lea 0x8(%rsp),%rsp");
2244}
2245
2246static void
2247amd64_emit_ref (int size)
2248{
2249 switch (size)
2250 {
2251 case 1:
2252 EMIT_ASM (amd64_ref1,
2253 "movb (%rax),%al");
2254 break;
2255 case 2:
2256 EMIT_ASM (amd64_ref2,
2257 "movw (%rax),%ax");
2258 break;
2259 case 4:
2260 EMIT_ASM (amd64_ref4,
2261 "movl (%rax),%eax");
2262 break;
2263 case 8:
2264 EMIT_ASM (amd64_ref8,
2265 "movq (%rax),%rax");
2266 break;
2267 }
2268}
2269
2270static void
2271amd64_emit_if_goto (int *offset_p, int *size_p)
2272{
2273 EMIT_ASM (amd64_if_goto,
2274 "mov %rax,%rcx\n\t"
2275 "pop %rax\n\t"
2276 "cmp $0,%rcx\n\t"
2277 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2278 if (offset_p)
2279 *offset_p = 10;
2280 if (size_p)
2281 *size_p = 4;
2282}
2283
2284static void
2285amd64_emit_goto (int *offset_p, int *size_p)
2286{
2287 EMIT_ASM (amd64_goto,
2288 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2289 if (offset_p)
2290 *offset_p = 1;
2291 if (size_p)
2292 *size_p = 4;
2293}
2294
2295static void
2296amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2297{
2298 int diff = (to - (from + size));
2299 unsigned char buf[sizeof (int)];
2300
2301 if (size != 4)
2302 {
2303 emit_error = 1;
2304 return;
2305 }
2306
2307 memcpy (buf, &diff, sizeof (int));
2308 write_inferior_memory (from, buf, sizeof (int));
2309}
2310
2311static void
4e29fb54 2312amd64_emit_const (LONGEST num)
6a271cae
PA
2313{
2314 unsigned char buf[16];
2315 int i;
2316 CORE_ADDR buildaddr = current_insn_ptr;
2317
2318 i = 0;
2319 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 2320 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
2321 i += 8;
2322 append_insns (&buildaddr, i, buf);
2323 current_insn_ptr = buildaddr;
2324}
2325
2326static void
2327amd64_emit_call (CORE_ADDR fn)
2328{
2329 unsigned char buf[16];
2330 int i;
2331 CORE_ADDR buildaddr;
4e29fb54 2332 LONGEST offset64;
6a271cae
PA
2333
2334 /* The destination function being in the shared library, may be
2335 >31-bits away off the compiled code pad. */
2336
2337 buildaddr = current_insn_ptr;
2338
2339 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
2340
2341 i = 0;
2342
2343 if (offset64 > INT_MAX || offset64 < INT_MIN)
2344 {
2345 /* Offset is too large for a call. Use callq, but that requires
2346 a register, so avoid it if possible. Use r10, since it is
2347 call-clobbered, we don't have to push/pop it. */
2348 buf[i++] = 0x48; /* mov $fn,%r10 */
2349 buf[i++] = 0xba;
2350 memcpy (buf + i, &fn, 8);
2351 i += 8;
2352 buf[i++] = 0xff; /* callq *%r10 */
2353 buf[i++] = 0xd2;
2354 }
2355 else
2356 {
2357 int offset32 = offset64; /* we know we can't overflow here. */
2358 memcpy (buf + i, &offset32, 4);
2359 i += 4;
2360 }
2361
2362 append_insns (&buildaddr, i, buf);
2363 current_insn_ptr = buildaddr;
2364}
2365
2366static void
2367amd64_emit_reg (int reg)
2368{
2369 unsigned char buf[16];
2370 int i;
2371 CORE_ADDR buildaddr;
2372
2373 /* Assume raw_regs is still in %rdi. */
2374 buildaddr = current_insn_ptr;
2375 i = 0;
2376 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 2377 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2378 i += 4;
2379 append_insns (&buildaddr, i, buf);
2380 current_insn_ptr = buildaddr;
2381 amd64_emit_call (get_raw_reg_func_addr ());
2382}
2383
2384static void
2385amd64_emit_pop (void)
2386{
2387 EMIT_ASM (amd64_pop,
2388 "pop %rax");
2389}
2390
2391static void
2392amd64_emit_stack_flush (void)
2393{
2394 EMIT_ASM (amd64_stack_flush,
2395 "push %rax");
2396}
2397
2398static void
2399amd64_emit_zero_ext (int arg)
2400{
2401 switch (arg)
2402 {
2403 case 8:
2404 EMIT_ASM (amd64_zero_ext_8,
2405 "and $0xff,%rax");
2406 break;
2407 case 16:
2408 EMIT_ASM (amd64_zero_ext_16,
2409 "and $0xffff,%rax");
2410 break;
2411 case 32:
2412 EMIT_ASM (amd64_zero_ext_32,
2413 "mov $0xffffffff,%rcx\n\t"
2414 "and %rcx,%rax");
2415 break;
2416 default:
2417 emit_error = 1;
2418 }
2419}
2420
2421static void
2422amd64_emit_swap (void)
2423{
2424 EMIT_ASM (amd64_swap,
2425 "mov %rax,%rcx\n\t"
2426 "pop %rax\n\t"
2427 "push %rcx");
2428}
2429
2430static void
2431amd64_emit_stack_adjust (int n)
2432{
2433 unsigned char buf[16];
2434 int i;
2435 CORE_ADDR buildaddr = current_insn_ptr;
2436
2437 i = 0;
2438 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2439 buf[i++] = 0x8d;
2440 buf[i++] = 0x64;
2441 buf[i++] = 0x24;
2442 /* This only handles adjustments up to 16, but we don't expect any more. */
2443 buf[i++] = n * 8;
2444 append_insns (&buildaddr, i, buf);
2445 current_insn_ptr = buildaddr;
2446}
2447
2448/* FN's prototype is `LONGEST(*fn)(int)'. */
2449
2450static void
2451amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2452{
2453 unsigned char buf[16];
2454 int i;
2455 CORE_ADDR buildaddr;
2456
2457 buildaddr = current_insn_ptr;
2458 i = 0;
2459 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2460 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2461 i += 4;
2462 append_insns (&buildaddr, i, buf);
2463 current_insn_ptr = buildaddr;
2464 amd64_emit_call (fn);
2465}
2466
4e29fb54 2467/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2468
2469static void
2470amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2471{
2472 unsigned char buf[16];
2473 int i;
2474 CORE_ADDR buildaddr;
2475
2476 buildaddr = current_insn_ptr;
2477 i = 0;
2478 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2479 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2480 i += 4;
2481 append_insns (&buildaddr, i, buf);
2482 current_insn_ptr = buildaddr;
2483 EMIT_ASM (amd64_void_call_2_a,
2484 /* Save away a copy of the stack top. */
2485 "push %rax\n\t"
2486 /* Also pass top as the second argument. */
2487 "mov %rax,%rsi");
2488 amd64_emit_call (fn);
2489 EMIT_ASM (amd64_void_call_2_b,
2490 /* Restore the stack top, %rax may have been trashed. */
2491 "pop %rax");
2492}
2493
6b9801d4
SS
2494void
2495amd64_emit_eq_goto (int *offset_p, int *size_p)
2496{
2497 EMIT_ASM (amd64_eq,
2498 "cmp %rax,(%rsp)\n\t"
2499 "jne .Lamd64_eq_fallthru\n\t"
2500 "lea 0x8(%rsp),%rsp\n\t"
2501 "pop %rax\n\t"
2502 /* jmp, but don't trust the assembler to choose the right jump */
2503 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2504 ".Lamd64_eq_fallthru:\n\t"
2505 "lea 0x8(%rsp),%rsp\n\t"
2506 "pop %rax");
2507
2508 if (offset_p)
2509 *offset_p = 13;
2510 if (size_p)
2511 *size_p = 4;
2512}
2513
2514void
2515amd64_emit_ne_goto (int *offset_p, int *size_p)
2516{
2517 EMIT_ASM (amd64_ne,
2518 "cmp %rax,(%rsp)\n\t"
2519 "je .Lamd64_ne_fallthru\n\t"
2520 "lea 0x8(%rsp),%rsp\n\t"
2521 "pop %rax\n\t"
2522 /* jmp, but don't trust the assembler to choose the right jump */
2523 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2524 ".Lamd64_ne_fallthru:\n\t"
2525 "lea 0x8(%rsp),%rsp\n\t"
2526 "pop %rax");
2527
2528 if (offset_p)
2529 *offset_p = 13;
2530 if (size_p)
2531 *size_p = 4;
2532}
2533
2534void
2535amd64_emit_lt_goto (int *offset_p, int *size_p)
2536{
2537 EMIT_ASM (amd64_lt,
2538 "cmp %rax,(%rsp)\n\t"
2539 "jnl .Lamd64_lt_fallthru\n\t"
2540 "lea 0x8(%rsp),%rsp\n\t"
2541 "pop %rax\n\t"
2542 /* jmp, but don't trust the assembler to choose the right jump */
2543 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2544 ".Lamd64_lt_fallthru:\n\t"
2545 "lea 0x8(%rsp),%rsp\n\t"
2546 "pop %rax");
2547
2548 if (offset_p)
2549 *offset_p = 13;
2550 if (size_p)
2551 *size_p = 4;
2552}
2553
2554void
2555amd64_emit_le_goto (int *offset_p, int *size_p)
2556{
2557 EMIT_ASM (amd64_le,
2558 "cmp %rax,(%rsp)\n\t"
2559 "jnle .Lamd64_le_fallthru\n\t"
2560 "lea 0x8(%rsp),%rsp\n\t"
2561 "pop %rax\n\t"
2562 /* jmp, but don't trust the assembler to choose the right jump */
2563 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2564 ".Lamd64_le_fallthru:\n\t"
2565 "lea 0x8(%rsp),%rsp\n\t"
2566 "pop %rax");
2567
2568 if (offset_p)
2569 *offset_p = 13;
2570 if (size_p)
2571 *size_p = 4;
2572}
2573
2574void
2575amd64_emit_gt_goto (int *offset_p, int *size_p)
2576{
2577 EMIT_ASM (amd64_gt,
2578 "cmp %rax,(%rsp)\n\t"
2579 "jng .Lamd64_gt_fallthru\n\t"
2580 "lea 0x8(%rsp),%rsp\n\t"
2581 "pop %rax\n\t"
2582 /* jmp, but don't trust the assembler to choose the right jump */
2583 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2584 ".Lamd64_gt_fallthru:\n\t"
2585 "lea 0x8(%rsp),%rsp\n\t"
2586 "pop %rax");
2587
2588 if (offset_p)
2589 *offset_p = 13;
2590 if (size_p)
2591 *size_p = 4;
2592}
2593
2594void
2595amd64_emit_ge_goto (int *offset_p, int *size_p)
2596{
2597 EMIT_ASM (amd64_ge,
2598 "cmp %rax,(%rsp)\n\t"
2599 "jnge .Lamd64_ge_fallthru\n\t"
2600 ".Lamd64_ge_jump:\n\t"
2601 "lea 0x8(%rsp),%rsp\n\t"
2602 "pop %rax\n\t"
2603 /* jmp, but don't trust the assembler to choose the right jump */
2604 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2605 ".Lamd64_ge_fallthru:\n\t"
2606 "lea 0x8(%rsp),%rsp\n\t"
2607 "pop %rax");
2608
2609 if (offset_p)
2610 *offset_p = 13;
2611 if (size_p)
2612 *size_p = 4;
2613}
2614
6a271cae
PA
2615struct emit_ops amd64_emit_ops =
2616 {
2617 amd64_emit_prologue,
2618 amd64_emit_epilogue,
2619 amd64_emit_add,
2620 amd64_emit_sub,
2621 amd64_emit_mul,
2622 amd64_emit_lsh,
2623 amd64_emit_rsh_signed,
2624 amd64_emit_rsh_unsigned,
2625 amd64_emit_ext,
2626 amd64_emit_log_not,
2627 amd64_emit_bit_and,
2628 amd64_emit_bit_or,
2629 amd64_emit_bit_xor,
2630 amd64_emit_bit_not,
2631 amd64_emit_equal,
2632 amd64_emit_less_signed,
2633 amd64_emit_less_unsigned,
2634 amd64_emit_ref,
2635 amd64_emit_if_goto,
2636 amd64_emit_goto,
2637 amd64_write_goto_address,
2638 amd64_emit_const,
2639 amd64_emit_call,
2640 amd64_emit_reg,
2641 amd64_emit_pop,
2642 amd64_emit_stack_flush,
2643 amd64_emit_zero_ext,
2644 amd64_emit_swap,
2645 amd64_emit_stack_adjust,
2646 amd64_emit_int_call_1,
6b9801d4
SS
2647 amd64_emit_void_call_2,
2648 amd64_emit_eq_goto,
2649 amd64_emit_ne_goto,
2650 amd64_emit_lt_goto,
2651 amd64_emit_le_goto,
2652 amd64_emit_gt_goto,
2653 amd64_emit_ge_goto
6a271cae
PA
2654 };
2655
2656#endif /* __x86_64__ */
2657
2658static void
2659i386_emit_prologue (void)
2660{
2661 EMIT_ASM32 (i386_prologue,
2662 "push %ebp\n\t"
bf15cbda
SS
2663 "mov %esp,%ebp\n\t"
2664 "push %ebx");
6a271cae
PA
2665 /* At this point, the raw regs base address is at 8(%ebp), and the
2666 value pointer is at 12(%ebp). */
2667}
2668
2669static void
2670i386_emit_epilogue (void)
2671{
2672 EMIT_ASM32 (i386_epilogue,
2673 "mov 12(%ebp),%ecx\n\t"
2674 "mov %eax,(%ecx)\n\t"
2675 "mov %ebx,0x4(%ecx)\n\t"
2676 "xor %eax,%eax\n\t"
bf15cbda 2677 "pop %ebx\n\t"
6a271cae
PA
2678 "pop %ebp\n\t"
2679 "ret");
2680}
2681
2682static void
2683i386_emit_add (void)
2684{
2685 EMIT_ASM32 (i386_add,
2686 "add (%esp),%eax\n\t"
2687 "adc 0x4(%esp),%ebx\n\t"
2688 "lea 0x8(%esp),%esp");
2689}
2690
2691static void
2692i386_emit_sub (void)
2693{
2694 EMIT_ASM32 (i386_sub,
2695 "subl %eax,(%esp)\n\t"
2696 "sbbl %ebx,4(%esp)\n\t"
2697 "pop %eax\n\t"
2698 "pop %ebx\n\t");
2699}
2700
2701static void
2702i386_emit_mul (void)
2703{
2704 emit_error = 1;
2705}
2706
2707static void
2708i386_emit_lsh (void)
2709{
2710 emit_error = 1;
2711}
2712
2713static void
2714i386_emit_rsh_signed (void)
2715{
2716 emit_error = 1;
2717}
2718
2719static void
2720i386_emit_rsh_unsigned (void)
2721{
2722 emit_error = 1;
2723}
2724
2725static void
2726i386_emit_ext (int arg)
2727{
2728 switch (arg)
2729 {
2730 case 8:
2731 EMIT_ASM32 (i386_ext_8,
2732 "cbtw\n\t"
2733 "cwtl\n\t"
2734 "movl %eax,%ebx\n\t"
2735 "sarl $31,%ebx");
2736 break;
2737 case 16:
2738 EMIT_ASM32 (i386_ext_16,
2739 "cwtl\n\t"
2740 "movl %eax,%ebx\n\t"
2741 "sarl $31,%ebx");
2742 break;
2743 case 32:
2744 EMIT_ASM32 (i386_ext_32,
2745 "movl %eax,%ebx\n\t"
2746 "sarl $31,%ebx");
2747 break;
2748 default:
2749 emit_error = 1;
2750 }
2751}
2752
2753static void
2754i386_emit_log_not (void)
2755{
2756 EMIT_ASM32 (i386_log_not,
2757 "or %ebx,%eax\n\t"
2758 "test %eax,%eax\n\t"
2759 "sete %cl\n\t"
2760 "xor %ebx,%ebx\n\t"
2761 "movzbl %cl,%eax");
2762}
2763
2764static void
2765i386_emit_bit_and (void)
2766{
2767 EMIT_ASM32 (i386_and,
2768 "and (%esp),%eax\n\t"
2769 "and 0x4(%esp),%ebx\n\t"
2770 "lea 0x8(%esp),%esp");
2771}
2772
2773static void
2774i386_emit_bit_or (void)
2775{
2776 EMIT_ASM32 (i386_or,
2777 "or (%esp),%eax\n\t"
2778 "or 0x4(%esp),%ebx\n\t"
2779 "lea 0x8(%esp),%esp");
2780}
2781
2782static void
2783i386_emit_bit_xor (void)
2784{
2785 EMIT_ASM32 (i386_xor,
2786 "xor (%esp),%eax\n\t"
2787 "xor 0x4(%esp),%ebx\n\t"
2788 "lea 0x8(%esp),%esp");
2789}
2790
2791static void
2792i386_emit_bit_not (void)
2793{
2794 EMIT_ASM32 (i386_bit_not,
2795 "xor $0xffffffff,%eax\n\t"
2796 "xor $0xffffffff,%ebx\n\t");
2797}
2798
2799static void
2800i386_emit_equal (void)
2801{
2802 EMIT_ASM32 (i386_equal,
2803 "cmpl %ebx,4(%esp)\n\t"
2804 "jne .Li386_equal_false\n\t"
2805 "cmpl %eax,(%esp)\n\t"
2806 "je .Li386_equal_true\n\t"
2807 ".Li386_equal_false:\n\t"
2808 "xor %eax,%eax\n\t"
2809 "jmp .Li386_equal_end\n\t"
2810 ".Li386_equal_true:\n\t"
2811 "mov $1,%eax\n\t"
2812 ".Li386_equal_end:\n\t"
2813 "xor %ebx,%ebx\n\t"
2814 "lea 0x8(%esp),%esp");
2815}
2816
2817static void
2818i386_emit_less_signed (void)
2819{
2820 EMIT_ASM32 (i386_less_signed,
2821 "cmpl %ebx,4(%esp)\n\t"
2822 "jl .Li386_less_signed_true\n\t"
2823 "jne .Li386_less_signed_false\n\t"
2824 "cmpl %eax,(%esp)\n\t"
2825 "jl .Li386_less_signed_true\n\t"
2826 ".Li386_less_signed_false:\n\t"
2827 "xor %eax,%eax\n\t"
2828 "jmp .Li386_less_signed_end\n\t"
2829 ".Li386_less_signed_true:\n\t"
2830 "mov $1,%eax\n\t"
2831 ".Li386_less_signed_end:\n\t"
2832 "xor %ebx,%ebx\n\t"
2833 "lea 0x8(%esp),%esp");
2834}
2835
2836static void
2837i386_emit_less_unsigned (void)
2838{
2839 EMIT_ASM32 (i386_less_unsigned,
2840 "cmpl %ebx,4(%esp)\n\t"
2841 "jb .Li386_less_unsigned_true\n\t"
2842 "jne .Li386_less_unsigned_false\n\t"
2843 "cmpl %eax,(%esp)\n\t"
2844 "jb .Li386_less_unsigned_true\n\t"
2845 ".Li386_less_unsigned_false:\n\t"
2846 "xor %eax,%eax\n\t"
2847 "jmp .Li386_less_unsigned_end\n\t"
2848 ".Li386_less_unsigned_true:\n\t"
2849 "mov $1,%eax\n\t"
2850 ".Li386_less_unsigned_end:\n\t"
2851 "xor %ebx,%ebx\n\t"
2852 "lea 0x8(%esp),%esp");
2853}
2854
2855static void
2856i386_emit_ref (int size)
2857{
2858 switch (size)
2859 {
2860 case 1:
2861 EMIT_ASM32 (i386_ref1,
2862 "movb (%eax),%al");
2863 break;
2864 case 2:
2865 EMIT_ASM32 (i386_ref2,
2866 "movw (%eax),%ax");
2867 break;
2868 case 4:
2869 EMIT_ASM32 (i386_ref4,
2870 "movl (%eax),%eax");
2871 break;
2872 case 8:
2873 EMIT_ASM32 (i386_ref8,
2874 "movl 4(%eax),%ebx\n\t"
2875 "movl (%eax),%eax");
2876 break;
2877 }
2878}
2879
2880static void
2881i386_emit_if_goto (int *offset_p, int *size_p)
2882{
2883 EMIT_ASM32 (i386_if_goto,
2884 "mov %eax,%ecx\n\t"
2885 "or %ebx,%ecx\n\t"
2886 "pop %eax\n\t"
2887 "pop %ebx\n\t"
2888 "cmpl $0,%ecx\n\t"
2889 /* Don't trust the assembler to choose the right jump */
2890 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2891
2892 if (offset_p)
2893 *offset_p = 11; /* be sure that this matches the sequence above */
2894 if (size_p)
2895 *size_p = 4;
2896}
2897
2898static void
2899i386_emit_goto (int *offset_p, int *size_p)
2900{
2901 EMIT_ASM32 (i386_goto,
2902 /* Don't trust the assembler to choose the right jump */
2903 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2904 if (offset_p)
2905 *offset_p = 1;
2906 if (size_p)
2907 *size_p = 4;
2908}
2909
2910static void
2911i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2912{
2913 int diff = (to - (from + size));
2914 unsigned char buf[sizeof (int)];
2915
2916 /* We're only doing 4-byte sizes at the moment. */
2917 if (size != 4)
2918 {
2919 emit_error = 1;
2920 return;
2921 }
2922
2923 memcpy (buf, &diff, sizeof (int));
2924 write_inferior_memory (from, buf, sizeof (int));
2925}
2926
2927static void
4e29fb54 2928i386_emit_const (LONGEST num)
6a271cae
PA
2929{
2930 unsigned char buf[16];
b00ad6ff 2931 int i, hi, lo;
6a271cae
PA
2932 CORE_ADDR buildaddr = current_insn_ptr;
2933
2934 i = 0;
2935 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2936 lo = num & 0xffffffff;
2937 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2938 i += 4;
2939 hi = ((num >> 32) & 0xffffffff);
2940 if (hi)
2941 {
2942 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2943 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2944 i += 4;
2945 }
2946 else
2947 {
2948 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2949 }
2950 append_insns (&buildaddr, i, buf);
2951 current_insn_ptr = buildaddr;
2952}
2953
2954static void
2955i386_emit_call (CORE_ADDR fn)
2956{
2957 unsigned char buf[16];
2958 int i, offset;
2959 CORE_ADDR buildaddr;
2960
2961 buildaddr = current_insn_ptr;
2962 i = 0;
2963 buf[i++] = 0xe8; /* call <reladdr> */
2964 offset = ((int) fn) - (buildaddr + 5);
2965 memcpy (buf + 1, &offset, 4);
2966 append_insns (&buildaddr, 5, buf);
2967 current_insn_ptr = buildaddr;
2968}
2969
2970static void
2971i386_emit_reg (int reg)
2972{
2973 unsigned char buf[16];
2974 int i;
2975 CORE_ADDR buildaddr;
2976
2977 EMIT_ASM32 (i386_reg_a,
2978 "sub $0x8,%esp");
2979 buildaddr = current_insn_ptr;
2980 i = 0;
2981 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2982 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2983 i += 4;
2984 append_insns (&buildaddr, i, buf);
2985 current_insn_ptr = buildaddr;
2986 EMIT_ASM32 (i386_reg_b,
2987 "mov %eax,4(%esp)\n\t"
2988 "mov 8(%ebp),%eax\n\t"
2989 "mov %eax,(%esp)");
2990 i386_emit_call (get_raw_reg_func_addr ());
2991 EMIT_ASM32 (i386_reg_c,
2992 "xor %ebx,%ebx\n\t"
2993 "lea 0x8(%esp),%esp");
2994}
2995
2996static void
2997i386_emit_pop (void)
2998{
2999 EMIT_ASM32 (i386_pop,
3000 "pop %eax\n\t"
3001 "pop %ebx");
3002}
3003
3004static void
3005i386_emit_stack_flush (void)
3006{
3007 EMIT_ASM32 (i386_stack_flush,
3008 "push %ebx\n\t"
3009 "push %eax");
3010}
3011
3012static void
3013i386_emit_zero_ext (int arg)
3014{
3015 switch (arg)
3016 {
3017 case 8:
3018 EMIT_ASM32 (i386_zero_ext_8,
3019 "and $0xff,%eax\n\t"
3020 "xor %ebx,%ebx");
3021 break;
3022 case 16:
3023 EMIT_ASM32 (i386_zero_ext_16,
3024 "and $0xffff,%eax\n\t"
3025 "xor %ebx,%ebx");
3026 break;
3027 case 32:
3028 EMIT_ASM32 (i386_zero_ext_32,
3029 "xor %ebx,%ebx");
3030 break;
3031 default:
3032 emit_error = 1;
3033 }
3034}
3035
3036static void
3037i386_emit_swap (void)
3038{
3039 EMIT_ASM32 (i386_swap,
3040 "mov %eax,%ecx\n\t"
3041 "mov %ebx,%edx\n\t"
3042 "pop %eax\n\t"
3043 "pop %ebx\n\t"
3044 "push %edx\n\t"
3045 "push %ecx");
3046}
3047
3048static void
3049i386_emit_stack_adjust (int n)
3050{
3051 unsigned char buf[16];
3052 int i;
3053 CORE_ADDR buildaddr = current_insn_ptr;
3054
3055 i = 0;
3056 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
3057 buf[i++] = 0x64;
3058 buf[i++] = 0x24;
3059 buf[i++] = n * 8;
3060 append_insns (&buildaddr, i, buf);
3061 current_insn_ptr = buildaddr;
3062}
3063
3064/* FN's prototype is `LONGEST(*fn)(int)'. */
3065
3066static void
3067i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
3068{
3069 unsigned char buf[16];
3070 int i;
3071 CORE_ADDR buildaddr;
3072
3073 EMIT_ASM32 (i386_int_call_1_a,
3074 /* Reserve a bit of stack space. */
3075 "sub $0x8,%esp");
3076 /* Put the one argument on the stack. */
3077 buildaddr = current_insn_ptr;
3078 i = 0;
3079 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3080 buf[i++] = 0x04;
3081 buf[i++] = 0x24;
b00ad6ff 3082 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
3083 i += 4;
3084 append_insns (&buildaddr, i, buf);
3085 current_insn_ptr = buildaddr;
3086 i386_emit_call (fn);
3087 EMIT_ASM32 (i386_int_call_1_c,
3088 "mov %edx,%ebx\n\t"
3089 "lea 0x8(%esp),%esp");
3090}
3091
4e29fb54 3092/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
3093
3094static void
3095i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
3096{
3097 unsigned char buf[16];
3098 int i;
3099 CORE_ADDR buildaddr;
3100
3101 EMIT_ASM32 (i386_void_call_2_a,
3102 /* Preserve %eax only; we don't have to worry about %ebx. */
3103 "push %eax\n\t"
3104 /* Reserve a bit of stack space for arguments. */
3105 "sub $0x10,%esp\n\t"
3106 /* Copy "top" to the second argument position. (Note that
3107 we can't assume function won't scribble on its
3108 arguments, so don't try to restore from this.) */
3109 "mov %eax,4(%esp)\n\t"
3110 "mov %ebx,8(%esp)");
3111 /* Put the first argument on the stack. */
3112 buildaddr = current_insn_ptr;
3113 i = 0;
3114 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3115 buf[i++] = 0x04;
3116 buf[i++] = 0x24;
b00ad6ff 3117 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
3118 i += 4;
3119 append_insns (&buildaddr, i, buf);
3120 current_insn_ptr = buildaddr;
3121 i386_emit_call (fn);
3122 EMIT_ASM32 (i386_void_call_2_b,
3123 "lea 0x10(%esp),%esp\n\t"
3124 /* Restore original stack top. */
3125 "pop %eax");
3126}
3127
6b9801d4
SS
3128
3129void
3130i386_emit_eq_goto (int *offset_p, int *size_p)
3131{
3132 EMIT_ASM32 (eq,
3133 /* Check low half first, more likely to be decider */
3134 "cmpl %eax,(%esp)\n\t"
3135 "jne .Leq_fallthru\n\t"
3136 "cmpl %ebx,4(%esp)\n\t"
3137 "jne .Leq_fallthru\n\t"
3138 "lea 0x8(%esp),%esp\n\t"
3139 "pop %eax\n\t"
3140 "pop %ebx\n\t"
3141 /* jmp, but don't trust the assembler to choose the right jump */
3142 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3143 ".Leq_fallthru:\n\t"
3144 "lea 0x8(%esp),%esp\n\t"
3145 "pop %eax\n\t"
3146 "pop %ebx");
3147
3148 if (offset_p)
3149 *offset_p = 18;
3150 if (size_p)
3151 *size_p = 4;
3152}
3153
3154void
3155i386_emit_ne_goto (int *offset_p, int *size_p)
3156{
3157 EMIT_ASM32 (ne,
3158 /* Check low half first, more likely to be decider */
3159 "cmpl %eax,(%esp)\n\t"
3160 "jne .Lne_jump\n\t"
3161 "cmpl %ebx,4(%esp)\n\t"
3162 "je .Lne_fallthru\n\t"
3163 ".Lne_jump:\n\t"
3164 "lea 0x8(%esp),%esp\n\t"
3165 "pop %eax\n\t"
3166 "pop %ebx\n\t"
3167 /* jmp, but don't trust the assembler to choose the right jump */
3168 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3169 ".Lne_fallthru:\n\t"
3170 "lea 0x8(%esp),%esp\n\t"
3171 "pop %eax\n\t"
3172 "pop %ebx");
3173
3174 if (offset_p)
3175 *offset_p = 18;
3176 if (size_p)
3177 *size_p = 4;
3178}
3179
3180void
3181i386_emit_lt_goto (int *offset_p, int *size_p)
3182{
3183 EMIT_ASM32 (lt,
3184 "cmpl %ebx,4(%esp)\n\t"
3185 "jl .Llt_jump\n\t"
3186 "jne .Llt_fallthru\n\t"
3187 "cmpl %eax,(%esp)\n\t"
3188 "jnl .Llt_fallthru\n\t"
3189 ".Llt_jump:\n\t"
3190 "lea 0x8(%esp),%esp\n\t"
3191 "pop %eax\n\t"
3192 "pop %ebx\n\t"
3193 /* jmp, but don't trust the assembler to choose the right jump */
3194 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3195 ".Llt_fallthru:\n\t"
3196 "lea 0x8(%esp),%esp\n\t"
3197 "pop %eax\n\t"
3198 "pop %ebx");
3199
3200 if (offset_p)
3201 *offset_p = 20;
3202 if (size_p)
3203 *size_p = 4;
3204}
3205
3206void
3207i386_emit_le_goto (int *offset_p, int *size_p)
3208{
3209 EMIT_ASM32 (le,
3210 "cmpl %ebx,4(%esp)\n\t"
3211 "jle .Lle_jump\n\t"
3212 "jne .Lle_fallthru\n\t"
3213 "cmpl %eax,(%esp)\n\t"
3214 "jnle .Lle_fallthru\n\t"
3215 ".Lle_jump:\n\t"
3216 "lea 0x8(%esp),%esp\n\t"
3217 "pop %eax\n\t"
3218 "pop %ebx\n\t"
3219 /* jmp, but don't trust the assembler to choose the right jump */
3220 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3221 ".Lle_fallthru:\n\t"
3222 "lea 0x8(%esp),%esp\n\t"
3223 "pop %eax\n\t"
3224 "pop %ebx");
3225
3226 if (offset_p)
3227 *offset_p = 20;
3228 if (size_p)
3229 *size_p = 4;
3230}
3231
3232void
3233i386_emit_gt_goto (int *offset_p, int *size_p)
3234{
3235 EMIT_ASM32 (gt,
3236 "cmpl %ebx,4(%esp)\n\t"
3237 "jg .Lgt_jump\n\t"
3238 "jne .Lgt_fallthru\n\t"
3239 "cmpl %eax,(%esp)\n\t"
3240 "jng .Lgt_fallthru\n\t"
3241 ".Lgt_jump:\n\t"
3242 "lea 0x8(%esp),%esp\n\t"
3243 "pop %eax\n\t"
3244 "pop %ebx\n\t"
3245 /* jmp, but don't trust the assembler to choose the right jump */
3246 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3247 ".Lgt_fallthru:\n\t"
3248 "lea 0x8(%esp),%esp\n\t"
3249 "pop %eax\n\t"
3250 "pop %ebx");
3251
3252 if (offset_p)
3253 *offset_p = 20;
3254 if (size_p)
3255 *size_p = 4;
3256}
3257
3258void
3259i386_emit_ge_goto (int *offset_p, int *size_p)
3260{
3261 EMIT_ASM32 (ge,
3262 "cmpl %ebx,4(%esp)\n\t"
3263 "jge .Lge_jump\n\t"
3264 "jne .Lge_fallthru\n\t"
3265 "cmpl %eax,(%esp)\n\t"
3266 "jnge .Lge_fallthru\n\t"
3267 ".Lge_jump:\n\t"
3268 "lea 0x8(%esp),%esp\n\t"
3269 "pop %eax\n\t"
3270 "pop %ebx\n\t"
3271 /* jmp, but don't trust the assembler to choose the right jump */
3272 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3273 ".Lge_fallthru:\n\t"
3274 "lea 0x8(%esp),%esp\n\t"
3275 "pop %eax\n\t"
3276 "pop %ebx");
3277
3278 if (offset_p)
3279 *offset_p = 20;
3280 if (size_p)
3281 *size_p = 4;
3282}
3283
6a271cae
PA
3284struct emit_ops i386_emit_ops =
3285 {
3286 i386_emit_prologue,
3287 i386_emit_epilogue,
3288 i386_emit_add,
3289 i386_emit_sub,
3290 i386_emit_mul,
3291 i386_emit_lsh,
3292 i386_emit_rsh_signed,
3293 i386_emit_rsh_unsigned,
3294 i386_emit_ext,
3295 i386_emit_log_not,
3296 i386_emit_bit_and,
3297 i386_emit_bit_or,
3298 i386_emit_bit_xor,
3299 i386_emit_bit_not,
3300 i386_emit_equal,
3301 i386_emit_less_signed,
3302 i386_emit_less_unsigned,
3303 i386_emit_ref,
3304 i386_emit_if_goto,
3305 i386_emit_goto,
3306 i386_write_goto_address,
3307 i386_emit_const,
3308 i386_emit_call,
3309 i386_emit_reg,
3310 i386_emit_pop,
3311 i386_emit_stack_flush,
3312 i386_emit_zero_ext,
3313 i386_emit_swap,
3314 i386_emit_stack_adjust,
3315 i386_emit_int_call_1,
6b9801d4
SS
3316 i386_emit_void_call_2,
3317 i386_emit_eq_goto,
3318 i386_emit_ne_goto,
3319 i386_emit_lt_goto,
3320 i386_emit_le_goto,
3321 i386_emit_gt_goto,
3322 i386_emit_ge_goto
6a271cae
PA
3323 };
3324
3325
3326static struct emit_ops *
3327x86_emit_ops (void)
3328{
3329#ifdef __x86_64__
3aee8918 3330 if (is_64bit_tdesc ())
6a271cae
PA
3331 return &amd64_emit_ops;
3332 else
3333#endif
3334 return &i386_emit_ops;
3335}
3336
c2d6af84
PA
3337static int
3338x86_supports_range_stepping (void)
3339{
3340 return 1;
3341}
3342
d0722149
DE
3343/* This is initialized assuming an amd64 target.
3344 x86_arch_setup will correct it for i386 or amd64 targets. */
3345
3346struct linux_target_ops the_low_target =
3347{
3348 x86_arch_setup,
3aee8918
PA
3349 x86_linux_regs_info,
3350 x86_cannot_fetch_register,
3351 x86_cannot_store_register,
c14dfd32 3352 NULL, /* fetch_register */
d0722149
DE
3353 x86_get_pc,
3354 x86_set_pc,
3355 x86_breakpoint,
3356 x86_breakpoint_len,
3357 NULL,
3358 1,
3359 x86_breakpoint_at,
aa5ca48f
DE
3360 x86_insert_point,
3361 x86_remove_point,
3362 x86_stopped_by_watchpoint,
3363 x86_stopped_data_address,
d0722149
DE
3364 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3365 native i386 case (no registers smaller than an xfer unit), and are not
3366 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3367 NULL,
3368 NULL,
3369 /* need to fix up i386 siginfo if host is amd64 */
3370 x86_siginfo_fixup,
aa5ca48f
DE
3371 x86_linux_new_process,
3372 x86_linux_new_thread,
1570b33e 3373 x86_linux_prepare_to_resume,
219f2f23 3374 x86_linux_process_qsupported,
fa593d66
PA
3375 x86_supports_tracepoints,
3376 x86_get_thread_area,
6a271cae 3377 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
3378 x86_emit_ops,
3379 x86_get_min_fast_tracepoint_insn_len,
c2d6af84 3380 x86_supports_range_stepping,
d0722149 3381};
3aee8918
PA
3382
3383void
3384initialize_low_arch (void)
3385{
3386 /* Initialize the Linux target descriptions. */
3387#ifdef __x86_64__
3388 init_registers_amd64_linux ();
3389 init_registers_amd64_avx_linux ();
a196ebeb
WT
3390 init_registers_amd64_mpx_linux ();
3391
3aee8918 3392 init_registers_x32_linux ();
7e5aaa09 3393 init_registers_x32_avx_linux ();
3aee8918
PA
3394
3395 tdesc_amd64_linux_no_xml = xmalloc (sizeof (struct target_desc));
3396 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
3397 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3398#endif
3399 init_registers_i386_linux ();
3400 init_registers_i386_mmx_linux ();
3401 init_registers_i386_avx_linux ();
a196ebeb 3402 init_registers_i386_mpx_linux ();
3aee8918
PA
3403
3404 tdesc_i386_linux_no_xml = xmalloc (sizeof (struct target_desc));
3405 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
3406 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3407
3408 initialize_regsets_info (&x86_regsets_info);
3409}
This page took 0.575411 seconds and 4 git commands to generate.