PR16867, linking object with separate debug file
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
ecd75fc8 3 Copyright (C) 2002-2014 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
aa5ca48f 20#include <stddef.h>
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "server.h"
25#include "linux-low.h"
26#include "i387-fp.h"
aa5ca48f 27#include "i386-low.h"
1570b33e 28#include "i386-xstate.h"
d0722149
DE
29
30#include "gdb_proc_service.h"
b5737fa9
PA
31/* Don't include elf/common.h if linux/elf.h got included by
32 gdb_proc_service.h. */
33#ifndef ELFMAG0
34#include "elf/common.h"
35#endif
36
58b4daa5 37#include "agent.h"
3aee8918 38#include "tdesc.h"
c144c7a0 39#include "tracepoint.h"
f699aaba 40#include "ax.h"
d0722149 41
3aee8918 42#ifdef __x86_64__
90884b2b
L
43/* Defined in auto-generated file amd64-linux.c. */
44void init_registers_amd64_linux (void);
3aee8918
PA
45extern const struct target_desc *tdesc_amd64_linux;
46
1570b33e
L
47/* Defined in auto-generated file amd64-avx-linux.c. */
48void init_registers_amd64_avx_linux (void);
3aee8918
PA
49extern const struct target_desc *tdesc_amd64_avx_linux;
50
a196ebeb
WT
51/* Defined in auto-generated file amd64-mpx-linux.c. */
52void init_registers_amd64_mpx_linux (void);
53extern const struct target_desc *tdesc_amd64_mpx_linux;
54
4d47af5c
L
55/* Defined in auto-generated file x32-linux.c. */
56void init_registers_x32_linux (void);
3aee8918
PA
57extern const struct target_desc *tdesc_x32_linux;
58
4d47af5c
L
59/* Defined in auto-generated file x32-avx-linux.c. */
60void init_registers_x32_avx_linux (void);
3aee8918 61extern const struct target_desc *tdesc_x32_avx_linux;
a196ebeb 62
3aee8918
PA
63#endif
64
65/* Defined in auto-generated file i386-linux.c. */
66void init_registers_i386_linux (void);
67extern const struct target_desc *tdesc_i386_linux;
68
69/* Defined in auto-generated file i386-mmx-linux.c. */
70void init_registers_i386_mmx_linux (void);
71extern const struct target_desc *tdesc_i386_mmx_linux;
72
73/* Defined in auto-generated file i386-avx-linux.c. */
74void init_registers_i386_avx_linux (void);
75extern const struct target_desc *tdesc_i386_avx_linux;
76
a196ebeb
WT
77/* Defined in auto-generated file i386-mpx-linux.c. */
78void init_registers_i386_mpx_linux (void);
79extern const struct target_desc *tdesc_i386_mpx_linux;
80
3aee8918
PA
81#ifdef __x86_64__
82static struct target_desc *tdesc_amd64_linux_no_xml;
83#endif
84static struct target_desc *tdesc_i386_linux_no_xml;
85
1570b33e 86
fa593d66 87static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 88static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 89
1570b33e
L
90/* Backward compatibility for gdb without XML support. */
91
92static const char *xmltarget_i386_linux_no_xml = "@<target>\
93<architecture>i386</architecture>\
94<osabi>GNU/Linux</osabi>\
95</target>";
f6d1620c
L
96
97#ifdef __x86_64__
1570b33e
L
98static const char *xmltarget_amd64_linux_no_xml = "@<target>\
99<architecture>i386:x86-64</architecture>\
100<osabi>GNU/Linux</osabi>\
101</target>";
f6d1620c 102#endif
d0722149
DE
103
104#include <sys/reg.h>
105#include <sys/procfs.h>
106#include <sys/ptrace.h>
1570b33e
L
107#include <sys/uio.h>
108
109#ifndef PTRACE_GETREGSET
110#define PTRACE_GETREGSET 0x4204
111#endif
112
113#ifndef PTRACE_SETREGSET
114#define PTRACE_SETREGSET 0x4205
115#endif
116
d0722149
DE
117
118#ifndef PTRACE_GET_THREAD_AREA
119#define PTRACE_GET_THREAD_AREA 25
120#endif
121
122/* This definition comes from prctl.h, but some kernels may not have it. */
123#ifndef PTRACE_ARCH_PRCTL
124#define PTRACE_ARCH_PRCTL 30
125#endif
126
127/* The following definitions come from prctl.h, but may be absent
128 for certain configurations. */
129#ifndef ARCH_GET_FS
130#define ARCH_SET_GS 0x1001
131#define ARCH_SET_FS 0x1002
132#define ARCH_GET_FS 0x1003
133#define ARCH_GET_GS 0x1004
134#endif
135
aa5ca48f
DE
136/* Per-process arch-specific data we want to keep. */
137
138struct arch_process_info
139{
140 struct i386_debug_reg_state debug_reg_state;
141};
142
143/* Per-thread arch-specific data we want to keep. */
144
145struct arch_lwp_info
146{
147 /* Non-zero if our copy differs from what's recorded in the thread. */
148 int debug_registers_changed;
149};
150
d0722149
DE
151#ifdef __x86_64__
152
153/* Mapping between the general-purpose registers in `struct user'
154 format and GDB's register array layout.
155 Note that the transfer layout uses 64-bit regs. */
156static /*const*/ int i386_regmap[] =
157{
158 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
159 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
160 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
161 DS * 8, ES * 8, FS * 8, GS * 8
162};
163
164#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
165
166/* So code below doesn't have to care, i386 or amd64. */
167#define ORIG_EAX ORIG_RAX
168
169static const int x86_64_regmap[] =
170{
171 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
172 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
173 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
174 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
175 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
176 DS * 8, ES * 8, FS * 8, GS * 8,
177 -1, -1, -1, -1, -1, -1, -1, -1,
178 -1, -1, -1, -1, -1, -1, -1, -1,
179 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
180 -1,
181 -1, -1, -1, -1, -1, -1, -1, -1,
182 ORIG_RAX * 8,
183 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
184 -1, -1 /* MPX registers BNDCFGU, BNDSTATUS. */
d0722149
DE
185};
186
187#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
188
189#else /* ! __x86_64__ */
190
191/* Mapping between the general-purpose registers in `struct user'
192 format and GDB's register array layout. */
193static /*const*/ int i386_regmap[] =
194{
195 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
196 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
197 EIP * 4, EFL * 4, CS * 4, SS * 4,
198 DS * 4, ES * 4, FS * 4, GS * 4
199};
200
201#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
202
203#endif
3aee8918
PA
204
205#ifdef __x86_64__
206
207/* Returns true if the current inferior belongs to a x86-64 process,
208 per the tdesc. */
209
210static int
211is_64bit_tdesc (void)
212{
213 struct regcache *regcache = get_thread_regcache (current_inferior, 0);
214
215 return register_size (regcache->tdesc, 0) == 8;
216}
217
218#endif
219
d0722149
DE
220\f
221/* Called by libthread_db. */
222
223ps_err_e
224ps_get_thread_area (const struct ps_prochandle *ph,
225 lwpid_t lwpid, int idx, void **base)
226{
227#ifdef __x86_64__
3aee8918 228 int use_64bit = is_64bit_tdesc ();
d0722149
DE
229
230 if (use_64bit)
231 {
232 switch (idx)
233 {
234 case FS:
235 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
236 return PS_OK;
237 break;
238 case GS:
239 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
240 return PS_OK;
241 break;
242 default:
243 return PS_BADADDR;
244 }
245 return PS_ERR;
246 }
247#endif
248
249 {
250 unsigned int desc[4];
251
252 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
253 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
254 return PS_ERR;
255
d1ec4ce7
DE
256 /* Ensure we properly extend the value to 64-bits for x86_64. */
257 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
258 return PS_OK;
259 }
260}
fa593d66
PA
261
262/* Get the thread area address. This is used to recognize which
263 thread is which when tracing with the in-process agent library. We
264 don't read anything from the address, and treat it as opaque; it's
265 the address itself that we assume is unique per-thread. */
266
267static int
268x86_get_thread_area (int lwpid, CORE_ADDR *addr)
269{
270#ifdef __x86_64__
3aee8918 271 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
272
273 if (use_64bit)
274 {
275 void *base;
276 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
277 {
278 *addr = (CORE_ADDR) (uintptr_t) base;
279 return 0;
280 }
281
282 return -1;
283 }
284#endif
285
286 {
287 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
d86d4aaf
DE
288 struct thread_info *thr = get_lwp_thread (lwp);
289 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
290 unsigned int desc[4];
291 ULONGEST gs = 0;
292 const int reg_thread_area = 3; /* bits to scale down register value. */
293 int idx;
294
295 collect_register_by_name (regcache, "gs", &gs);
296
297 idx = gs >> reg_thread_area;
298
299 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 300 lwpid_of (thr),
493e2a69 301 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
302 return -1;
303
304 *addr = desc[1];
305 return 0;
306 }
307}
308
309
d0722149
DE
310\f
311static int
3aee8918 312x86_cannot_store_register (int regno)
d0722149 313{
3aee8918
PA
314#ifdef __x86_64__
315 if (is_64bit_tdesc ())
316 return 0;
317#endif
318
d0722149
DE
319 return regno >= I386_NUM_REGS;
320}
321
322static int
3aee8918 323x86_cannot_fetch_register (int regno)
d0722149 324{
3aee8918
PA
325#ifdef __x86_64__
326 if (is_64bit_tdesc ())
327 return 0;
328#endif
329
d0722149
DE
330 return regno >= I386_NUM_REGS;
331}
332
333static void
442ea881 334x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
335{
336 int i;
337
338#ifdef __x86_64__
3aee8918 339 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
340 {
341 for (i = 0; i < X86_64_NUM_REGS; i++)
342 if (x86_64_regmap[i] != -1)
442ea881 343 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
344 return;
345 }
346#endif
347
348 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 349 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 350
442ea881
PA
351 collect_register_by_name (regcache, "orig_eax",
352 ((char *) buf) + ORIG_EAX * 4);
d0722149
DE
353}
354
355static void
442ea881 356x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
357{
358 int i;
359
360#ifdef __x86_64__
3aee8918 361 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
362 {
363 for (i = 0; i < X86_64_NUM_REGS; i++)
364 if (x86_64_regmap[i] != -1)
442ea881 365 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
366 return;
367 }
368#endif
369
370 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 371 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 372
442ea881
PA
373 supply_register_by_name (regcache, "orig_eax",
374 ((char *) buf) + ORIG_EAX * 4);
d0722149
DE
375}
376
377static void
442ea881 378x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
379{
380#ifdef __x86_64__
442ea881 381 i387_cache_to_fxsave (regcache, buf);
d0722149 382#else
442ea881 383 i387_cache_to_fsave (regcache, buf);
d0722149
DE
384#endif
385}
386
387static void
442ea881 388x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
389{
390#ifdef __x86_64__
442ea881 391 i387_fxsave_to_cache (regcache, buf);
d0722149 392#else
442ea881 393 i387_fsave_to_cache (regcache, buf);
d0722149
DE
394#endif
395}
396
397#ifndef __x86_64__
398
399static void
442ea881 400x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 401{
442ea881 402 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
403}
404
405static void
442ea881 406x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 407{
442ea881 408 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
409}
410
411#endif
412
1570b33e
L
413static void
414x86_fill_xstateregset (struct regcache *regcache, void *buf)
415{
416 i387_cache_to_xsave (regcache, buf);
417}
418
419static void
420x86_store_xstateregset (struct regcache *regcache, const void *buf)
421{
422 i387_xsave_to_cache (regcache, buf);
423}
424
d0722149
DE
425/* ??? The non-biarch i386 case stores all the i387 regs twice.
426 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
427 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
428 doesn't work. IWBN to avoid the duplication in the case where it
429 does work. Maybe the arch_setup routine could check whether it works
3aee8918 430 and update the supported regsets accordingly. */
d0722149 431
3aee8918 432static struct regset_info x86_regsets[] =
d0722149
DE
433{
434#ifdef HAVE_PTRACE_GETREGS
1570b33e 435 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
436 GENERAL_REGS,
437 x86_fill_gregset, x86_store_gregset },
1570b33e
L
438 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
439 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
440# ifndef __x86_64__
441# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 442 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
443 EXTENDED_REGS,
444 x86_fill_fpxregset, x86_store_fpxregset },
445# endif
446# endif
1570b33e 447 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
448 FP_REGS,
449 x86_fill_fpregset, x86_store_fpregset },
450#endif /* HAVE_PTRACE_GETREGS */
1570b33e 451 { 0, 0, 0, -1, -1, NULL, NULL }
d0722149
DE
452};
453
454static CORE_ADDR
442ea881 455x86_get_pc (struct regcache *regcache)
d0722149 456{
3aee8918 457 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
458
459 if (use_64bit)
460 {
461 unsigned long pc;
442ea881 462 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
463 return (CORE_ADDR) pc;
464 }
465 else
466 {
467 unsigned int pc;
442ea881 468 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
469 return (CORE_ADDR) pc;
470 }
471}
472
473static void
442ea881 474x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
d0722149 475{
3aee8918 476 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
477
478 if (use_64bit)
479 {
480 unsigned long newpc = pc;
442ea881 481 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
482 }
483 else
484 {
485 unsigned int newpc = pc;
442ea881 486 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
487 }
488}
489\f
490static const unsigned char x86_breakpoint[] = { 0xCC };
491#define x86_breakpoint_len 1
492
493static int
494x86_breakpoint_at (CORE_ADDR pc)
495{
496 unsigned char c;
497
fc7238bb 498 (*the_target->read_memory) (pc, &c, 1);
d0722149
DE
499 if (c == 0xCC)
500 return 1;
501
502 return 0;
503}
504\f
aa5ca48f
DE
505/* Support for debug registers. */
506
507static unsigned long
508x86_linux_dr_get (ptid_t ptid, int regnum)
509{
510 int tid;
511 unsigned long value;
512
513 tid = ptid_get_lwp (ptid);
514
515 errno = 0;
516 value = ptrace (PTRACE_PEEKUSER, tid,
517 offsetof (struct user, u_debugreg[regnum]), 0);
518 if (errno != 0)
519 error ("Couldn't read debug register");
520
521 return value;
522}
523
524static void
525x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
526{
527 int tid;
528
529 tid = ptid_get_lwp (ptid);
530
531 errno = 0;
532 ptrace (PTRACE_POKEUSER, tid,
533 offsetof (struct user, u_debugreg[regnum]), value);
534 if (errno != 0)
535 error ("Couldn't write debug register");
536}
537
964e4306
PA
538static int
539update_debug_registers_callback (struct inferior_list_entry *entry,
540 void *pid_p)
541{
d86d4aaf
DE
542 struct thread_info *thr = (struct thread_info *) entry;
543 struct lwp_info *lwp = get_thread_lwp (thr);
964e4306
PA
544 int pid = *(int *) pid_p;
545
546 /* Only update the threads of this process. */
d86d4aaf 547 if (pid_of (thr) == pid)
964e4306
PA
548 {
549 /* The actual update is done later just before resuming the lwp,
550 we just mark that the registers need updating. */
551 lwp->arch_private->debug_registers_changed = 1;
552
553 /* If the lwp isn't stopped, force it to momentarily pause, so
554 we can update its debug registers. */
555 if (!lwp->stopped)
556 linux_stop_lwp (lwp);
557 }
558
559 return 0;
560}
561
aa5ca48f
DE
562/* Update the inferior's debug register REGNUM from STATE. */
563
564void
565i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum)
566{
964e4306 567 /* Only update the threads of this process. */
d86d4aaf 568 int pid = pid_of (current_inferior);
aa5ca48f
DE
569
570 if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
571 fatal ("Invalid debug register %d", regnum);
572
d86d4aaf 573 find_inferior (&all_threads, update_debug_registers_callback, &pid);
964e4306 574}
aa5ca48f 575
964e4306 576/* Return the inferior's debug register REGNUM. */
aa5ca48f 577
964e4306
PA
578CORE_ADDR
579i386_dr_low_get_addr (int regnum)
580{
d86d4aaf 581 ptid_t ptid = ptid_of (current_inferior);
964e4306
PA
582
583 /* DR6 and DR7 are retrieved with some other way. */
0a5b1e09 584 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
964e4306
PA
585
586 return x86_linux_dr_get (ptid, regnum);
aa5ca48f
DE
587}
588
589/* Update the inferior's DR7 debug control register from STATE. */
590
591void
592i386_dr_low_set_control (const struct i386_debug_reg_state *state)
593{
964e4306 594 /* Only update the threads of this process. */
d86d4aaf 595 int pid = pid_of (current_inferior);
aa5ca48f 596
d86d4aaf 597 find_inferior (&all_threads, update_debug_registers_callback, &pid);
964e4306 598}
aa5ca48f 599
964e4306
PA
600/* Return the inferior's DR7 debug control register. */
601
602unsigned
603i386_dr_low_get_control (void)
604{
d86d4aaf 605 ptid_t ptid = ptid_of (current_inferior);
964e4306
PA
606
607 return x86_linux_dr_get (ptid, DR_CONTROL);
aa5ca48f
DE
608}
609
610/* Get the value of the DR6 debug status register from the inferior
611 and record it in STATE. */
612
964e4306
PA
613unsigned
614i386_dr_low_get_status (void)
aa5ca48f 615{
d86d4aaf 616 ptid_t ptid = ptid_of (current_inferior);
aa5ca48f 617
964e4306 618 return x86_linux_dr_get (ptid, DR_STATUS);
aa5ca48f
DE
619}
620\f
90d74c30 621/* Breakpoint/Watchpoint support. */
aa5ca48f
DE
622
623static int
624x86_insert_point (char type, CORE_ADDR addr, int len)
625{
626 struct process_info *proc = current_process ();
627 switch (type)
628 {
961bd387 629 case '0': /* software-breakpoint */
90d74c30
PA
630 {
631 int ret;
632
633 ret = prepare_to_access_memory ();
634 if (ret)
635 return -1;
636 ret = set_gdb_breakpoint_at (addr);
0146f85b 637 done_accessing_memory ();
90d74c30
PA
638 return ret;
639 }
961bd387
ME
640 case '1': /* hardware-breakpoint */
641 case '2': /* write watchpoint */
642 case '3': /* read watchpoint */
643 case '4': /* access watchpoint */
a4165e94
PA
644 {
645 enum target_hw_bp_type hw_type = Z_packet_to_hw_type (type);
646 struct i386_debug_reg_state *state
647 = &proc->private->arch_private->debug_reg_state;
648
649 return i386_low_insert_watchpoint (state, hw_type, addr, len);
650 }
961bd387 651
aa5ca48f
DE
652 default:
653 /* Unsupported. */
654 return 1;
655 }
656}
657
658static int
659x86_remove_point (char type, CORE_ADDR addr, int len)
660{
661 struct process_info *proc = current_process ();
662 switch (type)
663 {
961bd387 664 case '0': /* software-breakpoint */
90d74c30
PA
665 {
666 int ret;
667
668 ret = prepare_to_access_memory ();
669 if (ret)
670 return -1;
671 ret = delete_gdb_breakpoint_at (addr);
0146f85b 672 done_accessing_memory ();
90d74c30
PA
673 return ret;
674 }
961bd387
ME
675 case '1': /* hardware-breakpoint */
676 case '2': /* write watchpoint */
677 case '3': /* read watchpoint */
678 case '4': /* access watchpoint */
a4165e94
PA
679 {
680 enum target_hw_bp_type hw_type = Z_packet_to_hw_type (type);
681 struct i386_debug_reg_state *state
682 = &proc->private->arch_private->debug_reg_state;
683
684 return i386_low_remove_watchpoint (state, hw_type, addr, len);
685 }
aa5ca48f
DE
686 default:
687 /* Unsupported. */
688 return 1;
689 }
690}
691
692static int
693x86_stopped_by_watchpoint (void)
694{
695 struct process_info *proc = current_process ();
696 return i386_low_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
697}
698
699static CORE_ADDR
700x86_stopped_data_address (void)
701{
702 struct process_info *proc = current_process ();
703 CORE_ADDR addr;
704 if (i386_low_stopped_data_address (&proc->private->arch_private->debug_reg_state,
705 &addr))
706 return addr;
707 return 0;
708}
709\f
710/* Called when a new process is created. */
711
712static struct arch_process_info *
713x86_linux_new_process (void)
714{
715 struct arch_process_info *info = xcalloc (1, sizeof (*info));
716
717 i386_low_init_dregs (&info->debug_reg_state);
718
719 return info;
720}
721
722/* Called when a new thread is detected. */
723
724static struct arch_lwp_info *
725x86_linux_new_thread (void)
726{
727 struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
728
729 info->debug_registers_changed = 1;
730
731 return info;
732}
733
734/* Called when resuming a thread.
735 If the debug regs have changed, update the thread's copies. */
736
737static void
738x86_linux_prepare_to_resume (struct lwp_info *lwp)
739{
d86d4aaf 740 ptid_t ptid = ptid_of (get_lwp_thread (lwp));
6210a125 741 int clear_status = 0;
b9a881c2 742
aa5ca48f
DE
743 if (lwp->arch_private->debug_registers_changed)
744 {
745 int i;
aa5ca48f
DE
746 int pid = ptid_get_pid (ptid);
747 struct process_info *proc = find_process_pid (pid);
493e2a69
MS
748 struct i386_debug_reg_state *state
749 = &proc->private->arch_private->debug_reg_state;
aa5ca48f
DE
750
751 for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
6210a125
PA
752 if (state->dr_ref_count[i] > 0)
753 {
754 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
755
756 /* If we're setting a watchpoint, any change the inferior
757 had done itself to the debug registers needs to be
758 discarded, otherwise, i386_low_stopped_data_address can
759 get confused. */
760 clear_status = 1;
761 }
aa5ca48f
DE
762
763 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
764
765 lwp->arch_private->debug_registers_changed = 0;
766 }
b9a881c2 767
6210a125 768 if (clear_status || lwp->stopped_by_watchpoint)
b9a881c2 769 x86_linux_dr_set (ptid, DR_STATUS, 0);
aa5ca48f
DE
770}
771\f
d0722149
DE
772/* When GDBSERVER is built as a 64-bit application on linux, the
773 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
774 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
775 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
776 conversion in-place ourselves. */
777
778/* These types below (compat_*) define a siginfo type that is layout
779 compatible with the siginfo type exported by the 32-bit userspace
780 support. */
781
782#ifdef __x86_64__
783
784typedef int compat_int_t;
785typedef unsigned int compat_uptr_t;
786
787typedef int compat_time_t;
788typedef int compat_timer_t;
789typedef int compat_clock_t;
790
791struct compat_timeval
792{
793 compat_time_t tv_sec;
794 int tv_usec;
795};
796
797typedef union compat_sigval
798{
799 compat_int_t sival_int;
800 compat_uptr_t sival_ptr;
801} compat_sigval_t;
802
803typedef struct compat_siginfo
804{
805 int si_signo;
806 int si_errno;
807 int si_code;
808
809 union
810 {
811 int _pad[((128 / sizeof (int)) - 3)];
812
813 /* kill() */
814 struct
815 {
816 unsigned int _pid;
817 unsigned int _uid;
818 } _kill;
819
820 /* POSIX.1b timers */
821 struct
822 {
823 compat_timer_t _tid;
824 int _overrun;
825 compat_sigval_t _sigval;
826 } _timer;
827
828 /* POSIX.1b signals */
829 struct
830 {
831 unsigned int _pid;
832 unsigned int _uid;
833 compat_sigval_t _sigval;
834 } _rt;
835
836 /* SIGCHLD */
837 struct
838 {
839 unsigned int _pid;
840 unsigned int _uid;
841 int _status;
842 compat_clock_t _utime;
843 compat_clock_t _stime;
844 } _sigchld;
845
846 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
847 struct
848 {
849 unsigned int _addr;
850 } _sigfault;
851
852 /* SIGPOLL */
853 struct
854 {
855 int _band;
856 int _fd;
857 } _sigpoll;
858 } _sifields;
859} compat_siginfo_t;
860
c92b5177
L
861/* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
862typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
863
864typedef struct compat_x32_siginfo
865{
866 int si_signo;
867 int si_errno;
868 int si_code;
869
870 union
871 {
872 int _pad[((128 / sizeof (int)) - 3)];
873
874 /* kill() */
875 struct
876 {
877 unsigned int _pid;
878 unsigned int _uid;
879 } _kill;
880
881 /* POSIX.1b timers */
882 struct
883 {
884 compat_timer_t _tid;
885 int _overrun;
886 compat_sigval_t _sigval;
887 } _timer;
888
889 /* POSIX.1b signals */
890 struct
891 {
892 unsigned int _pid;
893 unsigned int _uid;
894 compat_sigval_t _sigval;
895 } _rt;
896
897 /* SIGCHLD */
898 struct
899 {
900 unsigned int _pid;
901 unsigned int _uid;
902 int _status;
903 compat_x32_clock_t _utime;
904 compat_x32_clock_t _stime;
905 } _sigchld;
906
907 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
908 struct
909 {
910 unsigned int _addr;
911 } _sigfault;
912
913 /* SIGPOLL */
914 struct
915 {
916 int _band;
917 int _fd;
918 } _sigpoll;
919 } _sifields;
920} compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
921
d0722149
DE
922#define cpt_si_pid _sifields._kill._pid
923#define cpt_si_uid _sifields._kill._uid
924#define cpt_si_timerid _sifields._timer._tid
925#define cpt_si_overrun _sifields._timer._overrun
926#define cpt_si_status _sifields._sigchld._status
927#define cpt_si_utime _sifields._sigchld._utime
928#define cpt_si_stime _sifields._sigchld._stime
929#define cpt_si_ptr _sifields._rt._sigval.sival_ptr
930#define cpt_si_addr _sifields._sigfault._addr
931#define cpt_si_band _sifields._sigpoll._band
932#define cpt_si_fd _sifields._sigpoll._fd
933
934/* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
935 In their place is si_timer1,si_timer2. */
936#ifndef si_timerid
937#define si_timerid si_timer1
938#endif
939#ifndef si_overrun
940#define si_overrun si_timer2
941#endif
942
943static void
944compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
945{
946 memset (to, 0, sizeof (*to));
947
948 to->si_signo = from->si_signo;
949 to->si_errno = from->si_errno;
950 to->si_code = from->si_code;
951
b53a1623 952 if (to->si_code == SI_TIMER)
d0722149 953 {
b53a1623
PA
954 to->cpt_si_timerid = from->si_timerid;
955 to->cpt_si_overrun = from->si_overrun;
d0722149
DE
956 to->cpt_si_ptr = (intptr_t) from->si_ptr;
957 }
958 else if (to->si_code == SI_USER)
959 {
960 to->cpt_si_pid = from->si_pid;
961 to->cpt_si_uid = from->si_uid;
962 }
b53a1623 963 else if (to->si_code < 0)
d0722149 964 {
b53a1623
PA
965 to->cpt_si_pid = from->si_pid;
966 to->cpt_si_uid = from->si_uid;
d0722149
DE
967 to->cpt_si_ptr = (intptr_t) from->si_ptr;
968 }
969 else
970 {
971 switch (to->si_signo)
972 {
973 case SIGCHLD:
974 to->cpt_si_pid = from->si_pid;
975 to->cpt_si_uid = from->si_uid;
976 to->cpt_si_status = from->si_status;
977 to->cpt_si_utime = from->si_utime;
978 to->cpt_si_stime = from->si_stime;
979 break;
980 case SIGILL:
981 case SIGFPE:
982 case SIGSEGV:
983 case SIGBUS:
984 to->cpt_si_addr = (intptr_t) from->si_addr;
985 break;
986 case SIGPOLL:
987 to->cpt_si_band = from->si_band;
988 to->cpt_si_fd = from->si_fd;
989 break;
990 default:
991 to->cpt_si_pid = from->si_pid;
992 to->cpt_si_uid = from->si_uid;
993 to->cpt_si_ptr = (intptr_t) from->si_ptr;
994 break;
995 }
996 }
997}
998
999static void
1000siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
1001{
1002 memset (to, 0, sizeof (*to));
1003
1004 to->si_signo = from->si_signo;
1005 to->si_errno = from->si_errno;
1006 to->si_code = from->si_code;
1007
b53a1623 1008 if (to->si_code == SI_TIMER)
d0722149 1009 {
b53a1623
PA
1010 to->si_timerid = from->cpt_si_timerid;
1011 to->si_overrun = from->cpt_si_overrun;
d0722149
DE
1012 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1013 }
1014 else if (to->si_code == SI_USER)
1015 {
1016 to->si_pid = from->cpt_si_pid;
1017 to->si_uid = from->cpt_si_uid;
1018 }
b53a1623 1019 else if (to->si_code < 0)
d0722149 1020 {
b53a1623
PA
1021 to->si_pid = from->cpt_si_pid;
1022 to->si_uid = from->cpt_si_uid;
d0722149
DE
1023 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1024 }
1025 else
1026 {
1027 switch (to->si_signo)
1028 {
1029 case SIGCHLD:
1030 to->si_pid = from->cpt_si_pid;
1031 to->si_uid = from->cpt_si_uid;
1032 to->si_status = from->cpt_si_status;
1033 to->si_utime = from->cpt_si_utime;
1034 to->si_stime = from->cpt_si_stime;
1035 break;
1036 case SIGILL:
1037 case SIGFPE:
1038 case SIGSEGV:
1039 case SIGBUS:
1040 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1041 break;
1042 case SIGPOLL:
1043 to->si_band = from->cpt_si_band;
1044 to->si_fd = from->cpt_si_fd;
1045 break;
1046 default:
1047 to->si_pid = from->cpt_si_pid;
1048 to->si_uid = from->cpt_si_uid;
1049 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1050 break;
1051 }
1052 }
1053}
1054
c92b5177
L
1055static void
1056compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
1057 siginfo_t *from)
1058{
1059 memset (to, 0, sizeof (*to));
1060
1061 to->si_signo = from->si_signo;
1062 to->si_errno = from->si_errno;
1063 to->si_code = from->si_code;
1064
1065 if (to->si_code == SI_TIMER)
1066 {
1067 to->cpt_si_timerid = from->si_timerid;
1068 to->cpt_si_overrun = from->si_overrun;
1069 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1070 }
1071 else if (to->si_code == SI_USER)
1072 {
1073 to->cpt_si_pid = from->si_pid;
1074 to->cpt_si_uid = from->si_uid;
1075 }
1076 else if (to->si_code < 0)
1077 {
1078 to->cpt_si_pid = from->si_pid;
1079 to->cpt_si_uid = from->si_uid;
1080 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1081 }
1082 else
1083 {
1084 switch (to->si_signo)
1085 {
1086 case SIGCHLD:
1087 to->cpt_si_pid = from->si_pid;
1088 to->cpt_si_uid = from->si_uid;
1089 to->cpt_si_status = from->si_status;
1090 to->cpt_si_utime = from->si_utime;
1091 to->cpt_si_stime = from->si_stime;
1092 break;
1093 case SIGILL:
1094 case SIGFPE:
1095 case SIGSEGV:
1096 case SIGBUS:
1097 to->cpt_si_addr = (intptr_t) from->si_addr;
1098 break;
1099 case SIGPOLL:
1100 to->cpt_si_band = from->si_band;
1101 to->cpt_si_fd = from->si_fd;
1102 break;
1103 default:
1104 to->cpt_si_pid = from->si_pid;
1105 to->cpt_si_uid = from->si_uid;
1106 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1107 break;
1108 }
1109 }
1110}
1111
1112static void
1113siginfo_from_compat_x32_siginfo (siginfo_t *to,
1114 compat_x32_siginfo_t *from)
1115{
1116 memset (to, 0, sizeof (*to));
1117
1118 to->si_signo = from->si_signo;
1119 to->si_errno = from->si_errno;
1120 to->si_code = from->si_code;
1121
1122 if (to->si_code == SI_TIMER)
1123 {
1124 to->si_timerid = from->cpt_si_timerid;
1125 to->si_overrun = from->cpt_si_overrun;
1126 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1127 }
1128 else if (to->si_code == SI_USER)
1129 {
1130 to->si_pid = from->cpt_si_pid;
1131 to->si_uid = from->cpt_si_uid;
1132 }
1133 else if (to->si_code < 0)
1134 {
1135 to->si_pid = from->cpt_si_pid;
1136 to->si_uid = from->cpt_si_uid;
1137 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1138 }
1139 else
1140 {
1141 switch (to->si_signo)
1142 {
1143 case SIGCHLD:
1144 to->si_pid = from->cpt_si_pid;
1145 to->si_uid = from->cpt_si_uid;
1146 to->si_status = from->cpt_si_status;
1147 to->si_utime = from->cpt_si_utime;
1148 to->si_stime = from->cpt_si_stime;
1149 break;
1150 case SIGILL:
1151 case SIGFPE:
1152 case SIGSEGV:
1153 case SIGBUS:
1154 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1155 break;
1156 case SIGPOLL:
1157 to->si_band = from->cpt_si_band;
1158 to->si_fd = from->cpt_si_fd;
1159 break;
1160 default:
1161 to->si_pid = from->cpt_si_pid;
1162 to->si_uid = from->cpt_si_uid;
1163 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1164 break;
1165 }
1166 }
1167}
1168
d0722149
DE
1169#endif /* __x86_64__ */
1170
1171/* Convert a native/host siginfo object, into/from the siginfo in the
1172 layout of the inferiors' architecture. Returns true if any
1173 conversion was done; false otherwise. If DIRECTION is 1, then copy
1174 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1175 INF. */
1176
1177static int
a5362b9a 1178x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
d0722149
DE
1179{
1180#ifdef __x86_64__
760256f9 1181 unsigned int machine;
d86d4aaf 1182 int tid = lwpid_of (current_inferior);
760256f9
PA
1183 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1184
d0722149 1185 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 1186 if (!is_64bit_tdesc ())
d0722149 1187 {
a5362b9a 1188 if (sizeof (siginfo_t) != sizeof (compat_siginfo_t))
9f1036c1 1189 fatal ("unexpected difference in siginfo");
d0722149
DE
1190
1191 if (direction == 0)
1192 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
1193 else
1194 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
1195
c92b5177
L
1196 return 1;
1197 }
1198 /* No fixup for native x32 GDB. */
760256f9 1199 else if (!is_elf64 && sizeof (void *) == 8)
c92b5177
L
1200 {
1201 if (sizeof (siginfo_t) != sizeof (compat_x32_siginfo_t))
1202 fatal ("unexpected difference in siginfo");
1203
1204 if (direction == 0)
1205 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
1206 native);
1207 else
1208 siginfo_from_compat_x32_siginfo (native,
1209 (struct compat_x32_siginfo *) inf);
1210
d0722149
DE
1211 return 1;
1212 }
1213#endif
1214
1215 return 0;
1216}
1217\f
1570b33e
L
1218static int use_xml;
1219
3aee8918
PA
1220/* Format of XSAVE extended state is:
1221 struct
1222 {
1223 fxsave_bytes[0..463]
1224 sw_usable_bytes[464..511]
1225 xstate_hdr_bytes[512..575]
1226 avx_bytes[576..831]
1227 future_state etc
1228 };
1229
1230 Same memory layout will be used for the coredump NT_X86_XSTATE
1231 representing the XSAVE extended state registers.
1232
1233 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1234 extended state mask, which is the same as the extended control register
1235 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1236 together with the mask saved in the xstate_hdr_bytes to determine what
1237 states the processor/OS supports and what state, used or initialized,
1238 the process/thread is in. */
1239#define I386_LINUX_XSAVE_XCR0_OFFSET 464
1240
1241/* Does the current host support the GETFPXREGS request? The header
1242 file may or may not define it, and even if it is defined, the
1243 kernel will return EIO if it's running on a pre-SSE processor. */
1244int have_ptrace_getfpxregs =
1245#ifdef HAVE_PTRACE_GETFPXREGS
1246 -1
1247#else
1248 0
1249#endif
1250;
1570b33e 1251
3aee8918
PA
1252/* Does the current host support PTRACE_GETREGSET? */
1253static int have_ptrace_getregset = -1;
1254
1255/* Get Linux/x86 target description from running target. */
1256
1257static const struct target_desc *
1258x86_linux_read_description (void)
1570b33e 1259{
3aee8918
PA
1260 unsigned int machine;
1261 int is_elf64;
a196ebeb 1262 int xcr0_features;
3aee8918
PA
1263 int tid;
1264 static uint64_t xcr0;
3a13a53b 1265 struct regset_info *regset;
1570b33e 1266
d86d4aaf 1267 tid = lwpid_of (current_inferior);
1570b33e 1268
3aee8918 1269 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 1270
3aee8918 1271 if (sizeof (void *) == 4)
3a13a53b 1272 {
3aee8918
PA
1273 if (is_elf64 > 0)
1274 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1275#ifndef __x86_64__
1276 else if (machine == EM_X86_64)
1277 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1278#endif
1279 }
3a13a53b 1280
3aee8918
PA
1281#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1282 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
1283 {
1284 elf_fpxregset_t fpxregs;
3a13a53b 1285
3aee8918 1286 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 1287 {
3aee8918
PA
1288 have_ptrace_getfpxregs = 0;
1289 have_ptrace_getregset = 0;
1290 return tdesc_i386_mmx_linux;
3a13a53b 1291 }
3aee8918
PA
1292 else
1293 have_ptrace_getfpxregs = 1;
3a13a53b 1294 }
1570b33e
L
1295#endif
1296
1297 if (!use_xml)
1298 {
3aee8918
PA
1299 x86_xcr0 = I386_XSTATE_SSE_MASK;
1300
1570b33e
L
1301 /* Don't use XML. */
1302#ifdef __x86_64__
3aee8918
PA
1303 if (machine == EM_X86_64)
1304 return tdesc_amd64_linux_no_xml;
1570b33e 1305 else
1570b33e 1306#endif
3aee8918 1307 return tdesc_i386_linux_no_xml;
1570b33e
L
1308 }
1309
1570b33e
L
1310 if (have_ptrace_getregset == -1)
1311 {
3aee8918 1312 uint64_t xstateregs[(I386_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 1313 struct iovec iov;
1570b33e
L
1314
1315 iov.iov_base = xstateregs;
1316 iov.iov_len = sizeof (xstateregs);
1317
1318 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
1319 if (ptrace (PTRACE_GETREGSET, tid,
1320 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
1321 have_ptrace_getregset = 0;
1322 else
1570b33e 1323 {
3aee8918
PA
1324 have_ptrace_getregset = 1;
1325
1326 /* Get XCR0 from XSAVE extended state. */
1327 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
1328 / sizeof (uint64_t))];
1329
1330 /* Use PTRACE_GETREGSET if it is available. */
1331 for (regset = x86_regsets;
1332 regset->fill_function != NULL; regset++)
1333 if (regset->get_request == PTRACE_GETREGSET)
1334 regset->size = I386_XSTATE_SIZE (xcr0);
1335 else if (regset->type != GENERAL_REGS)
1336 regset->size = 0;
1570b33e 1337 }
1570b33e
L
1338 }
1339
3aee8918 1340 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb
WT
1341 xcr0_features = (have_ptrace_getregset
1342 && (xcr0 & I386_XSTATE_ALL_MASK));
3aee8918 1343
a196ebeb 1344 if (xcr0_features)
3aee8918 1345 x86_xcr0 = xcr0;
1570b33e 1346
3aee8918
PA
1347 if (machine == EM_X86_64)
1348 {
1570b33e 1349#ifdef __x86_64__
a196ebeb 1350 if (is_elf64)
3aee8918 1351 {
a196ebeb
WT
1352 if (xcr0_features)
1353 {
1354 switch (xcr0 & I386_XSTATE_ALL_MASK)
1355 {
1356 case I386_XSTATE_MPX_MASK:
1357 return tdesc_amd64_mpx_linux;
1358
1359 case I386_XSTATE_AVX_MASK:
1360 return tdesc_amd64_avx_linux;
1361
1362 default:
1363 return tdesc_amd64_linux;
1364 }
1365 }
4d47af5c 1366 else
a196ebeb 1367 return tdesc_amd64_linux;
3aee8918
PA
1368 }
1369 else
1370 {
a196ebeb
WT
1371 if (xcr0_features)
1372 {
1373 switch (xcr0 & I386_XSTATE_ALL_MASK)
1374 {
1375 case I386_XSTATE_MPX_MASK: /* No MPX on x32. */
1376 case I386_XSTATE_AVX_MASK:
1377 return tdesc_x32_avx_linux;
1378
1379 default:
1380 return tdesc_x32_linux;
1381 }
1382 }
3aee8918 1383 else
a196ebeb 1384 return tdesc_x32_linux;
1570b33e 1385 }
3aee8918 1386#endif
1570b33e 1387 }
3aee8918
PA
1388 else
1389 {
a196ebeb
WT
1390 if (xcr0_features)
1391 {
1392 switch (xcr0 & I386_XSTATE_ALL_MASK)
1393 {
1394 case (I386_XSTATE_MPX_MASK):
1395 return tdesc_i386_mpx_linux;
1396
1397 case (I386_XSTATE_AVX_MASK):
1398 return tdesc_i386_avx_linux;
1399
1400 default:
1401 return tdesc_i386_linux;
1402 }
1403 }
3aee8918
PA
1404 else
1405 return tdesc_i386_linux;
1406 }
1407
1408 gdb_assert_not_reached ("failed to return tdesc");
1409}
1410
1411/* Callback for find_inferior. Stops iteration when a thread with a
1412 given PID is found. */
1413
1414static int
1415same_process_callback (struct inferior_list_entry *entry, void *data)
1416{
1417 int pid = *(int *) data;
1418
1419 return (ptid_get_pid (entry->id) == pid);
1420}
1421
1422/* Callback for for_each_inferior. Calls the arch_setup routine for
1423 each process. */
1424
1425static void
1426x86_arch_setup_process_callback (struct inferior_list_entry *entry)
1427{
1428 int pid = ptid_get_pid (entry->id);
1429
1430 /* Look up any thread of this processes. */
1431 current_inferior
1432 = (struct thread_info *) find_inferior (&all_threads,
1433 same_process_callback, &pid);
1434
1435 the_low_target.arch_setup ();
1436}
1437
1438/* Update all the target description of all processes; a new GDB
1439 connected, and it may or not support xml target descriptions. */
1440
1441static void
1442x86_linux_update_xmltarget (void)
1443{
1444 struct thread_info *save_inferior = current_inferior;
1445
1446 /* Before changing the register cache's internal layout, flush the
1447 contents of the current valid caches back to the threads, and
1448 release the current regcache objects. */
1449 regcache_release ();
1450
1451 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
1452
1453 current_inferior = save_inferior;
1570b33e
L
1454}
1455
1456/* Process qSupported query, "xmlRegisters=". Update the buffer size for
1457 PTRACE_GETREGSET. */
1458
1459static void
1460x86_linux_process_qsupported (const char *query)
1461{
1462 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1463 with "i386" in qSupported query, it supports x86 XML target
1464 descriptions. */
1465 use_xml = 0;
1466 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1467 {
1468 char *copy = xstrdup (query + 13);
1469 char *p;
1470
1471 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1472 {
1473 if (strcmp (p, "i386") == 0)
1474 {
1475 use_xml = 1;
1476 break;
1477 }
1478 }
1479
1480 free (copy);
1481 }
1482
1483 x86_linux_update_xmltarget ();
1484}
1485
3aee8918 1486/* Common for x86/x86-64. */
d0722149 1487
3aee8918
PA
1488static struct regsets_info x86_regsets_info =
1489 {
1490 x86_regsets, /* regsets */
1491 0, /* num_regsets */
1492 NULL, /* disabled_regsets */
1493 };
214d508e
L
1494
1495#ifdef __x86_64__
3aee8918
PA
1496static struct regs_info amd64_linux_regs_info =
1497 {
1498 NULL, /* regset_bitmap */
1499 NULL, /* usrregs_info */
1500 &x86_regsets_info
1501 };
d0722149 1502#endif
3aee8918
PA
1503static struct usrregs_info i386_linux_usrregs_info =
1504 {
1505 I386_NUM_REGS,
1506 i386_regmap,
1507 };
d0722149 1508
3aee8918
PA
1509static struct regs_info i386_linux_regs_info =
1510 {
1511 NULL, /* regset_bitmap */
1512 &i386_linux_usrregs_info,
1513 &x86_regsets_info
1514 };
d0722149 1515
3aee8918
PA
1516const struct regs_info *
1517x86_linux_regs_info (void)
1518{
1519#ifdef __x86_64__
1520 if (is_64bit_tdesc ())
1521 return &amd64_linux_regs_info;
1522 else
1523#endif
1524 return &i386_linux_regs_info;
1525}
d0722149 1526
3aee8918
PA
1527/* Initialize the target description for the architecture of the
1528 inferior. */
1570b33e 1529
3aee8918
PA
1530static void
1531x86_arch_setup (void)
1532{
1533 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1534}
1535
219f2f23
PA
1536static int
1537x86_supports_tracepoints (void)
1538{
1539 return 1;
1540}
1541
fa593d66
PA
1542static void
1543append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1544{
1545 write_inferior_memory (*to, buf, len);
1546 *to += len;
1547}
1548
1549static int
1550push_opcode (unsigned char *buf, char *op)
1551{
1552 unsigned char *buf_org = buf;
1553
1554 while (1)
1555 {
1556 char *endptr;
1557 unsigned long ul = strtoul (op, &endptr, 16);
1558
1559 if (endptr == op)
1560 break;
1561
1562 *buf++ = ul;
1563 op = endptr;
1564 }
1565
1566 return buf - buf_org;
1567}
1568
1569#ifdef __x86_64__
1570
1571/* Build a jump pad that saves registers and calls a collection
1572 function. Writes a jump instruction to the jump pad to
1573 JJUMPAD_INSN. The caller is responsible to write it in at the
1574 tracepoint address. */
1575
1576static int
1577amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1578 CORE_ADDR collector,
1579 CORE_ADDR lockaddr,
1580 ULONGEST orig_size,
1581 CORE_ADDR *jump_entry,
405f8e94
SS
1582 CORE_ADDR *trampoline,
1583 ULONGEST *trampoline_size,
fa593d66
PA
1584 unsigned char *jjump_pad_insn,
1585 ULONGEST *jjump_pad_insn_size,
1586 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1587 CORE_ADDR *adjusted_insn_addr_end,
1588 char *err)
fa593d66
PA
1589{
1590 unsigned char buf[40];
1591 int i, offset;
f4647387
YQ
1592 int64_t loffset;
1593
fa593d66
PA
1594 CORE_ADDR buildaddr = *jump_entry;
1595
1596 /* Build the jump pad. */
1597
1598 /* First, do tracepoint data collection. Save registers. */
1599 i = 0;
1600 /* Need to ensure stack pointer saved first. */
1601 buf[i++] = 0x54; /* push %rsp */
1602 buf[i++] = 0x55; /* push %rbp */
1603 buf[i++] = 0x57; /* push %rdi */
1604 buf[i++] = 0x56; /* push %rsi */
1605 buf[i++] = 0x52; /* push %rdx */
1606 buf[i++] = 0x51; /* push %rcx */
1607 buf[i++] = 0x53; /* push %rbx */
1608 buf[i++] = 0x50; /* push %rax */
1609 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1610 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1611 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1612 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1613 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1614 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1615 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1616 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1617 buf[i++] = 0x9c; /* pushfq */
1618 buf[i++] = 0x48; /* movl <addr>,%rdi */
1619 buf[i++] = 0xbf;
1620 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1621 i += sizeof (unsigned long);
1622 buf[i++] = 0x57; /* push %rdi */
1623 append_insns (&buildaddr, i, buf);
1624
1625 /* Stack space for the collecting_t object. */
1626 i = 0;
1627 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1628 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1629 memcpy (buf + i, &tpoint, 8);
1630 i += 8;
1631 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1632 i += push_opcode (&buf[i],
1633 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1634 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1635 append_insns (&buildaddr, i, buf);
1636
1637 /* spin-lock. */
1638 i = 0;
1639 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1640 memcpy (&buf[i], (void *) &lockaddr, 8);
1641 i += 8;
1642 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1643 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1644 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1645 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1646 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1647 append_insns (&buildaddr, i, buf);
1648
1649 /* Set up the gdb_collect call. */
1650 /* At this point, (stack pointer + 0x18) is the base of our saved
1651 register block. */
1652
1653 i = 0;
1654 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1655 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1656
1657 /* tpoint address may be 64-bit wide. */
1658 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1659 memcpy (buf + i, &tpoint, 8);
1660 i += 8;
1661 append_insns (&buildaddr, i, buf);
1662
1663 /* The collector function being in the shared library, may be
1664 >31-bits away off the jump pad. */
1665 i = 0;
1666 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1667 memcpy (buf + i, &collector, 8);
1668 i += 8;
1669 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1670 append_insns (&buildaddr, i, buf);
1671
1672 /* Clear the spin-lock. */
1673 i = 0;
1674 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1675 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1676 memcpy (buf + i, &lockaddr, 8);
1677 i += 8;
1678 append_insns (&buildaddr, i, buf);
1679
1680 /* Remove stack that had been used for the collect_t object. */
1681 i = 0;
1682 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1683 append_insns (&buildaddr, i, buf);
1684
1685 /* Restore register state. */
1686 i = 0;
1687 buf[i++] = 0x48; /* add $0x8,%rsp */
1688 buf[i++] = 0x83;
1689 buf[i++] = 0xc4;
1690 buf[i++] = 0x08;
1691 buf[i++] = 0x9d; /* popfq */
1692 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1693 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1694 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1695 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1696 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1697 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1698 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1699 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1700 buf[i++] = 0x58; /* pop %rax */
1701 buf[i++] = 0x5b; /* pop %rbx */
1702 buf[i++] = 0x59; /* pop %rcx */
1703 buf[i++] = 0x5a; /* pop %rdx */
1704 buf[i++] = 0x5e; /* pop %rsi */
1705 buf[i++] = 0x5f; /* pop %rdi */
1706 buf[i++] = 0x5d; /* pop %rbp */
1707 buf[i++] = 0x5c; /* pop %rsp */
1708 append_insns (&buildaddr, i, buf);
1709
1710 /* Now, adjust the original instruction to execute in the jump
1711 pad. */
1712 *adjusted_insn_addr = buildaddr;
1713 relocate_instruction (&buildaddr, tpaddr);
1714 *adjusted_insn_addr_end = buildaddr;
1715
1716 /* Finally, write a jump back to the program. */
f4647387
YQ
1717
1718 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1719 if (loffset > INT_MAX || loffset < INT_MIN)
1720 {
1721 sprintf (err,
1722 "E.Jump back from jump pad too far from tracepoint "
1723 "(offset 0x%" PRIx64 " > int32).", loffset);
1724 return 1;
1725 }
1726
1727 offset = (int) loffset;
fa593d66
PA
1728 memcpy (buf, jump_insn, sizeof (jump_insn));
1729 memcpy (buf + 1, &offset, 4);
1730 append_insns (&buildaddr, sizeof (jump_insn), buf);
1731
1732 /* The jump pad is now built. Wire in a jump to our jump pad. This
1733 is always done last (by our caller actually), so that we can
1734 install fast tracepoints with threads running. This relies on
1735 the agent's atomic write support. */
f4647387
YQ
1736 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1737 if (loffset > INT_MAX || loffset < INT_MIN)
1738 {
1739 sprintf (err,
1740 "E.Jump pad too far from tracepoint "
1741 "(offset 0x%" PRIx64 " > int32).", loffset);
1742 return 1;
1743 }
1744
1745 offset = (int) loffset;
1746
fa593d66
PA
1747 memcpy (buf, jump_insn, sizeof (jump_insn));
1748 memcpy (buf + 1, &offset, 4);
1749 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1750 *jjump_pad_insn_size = sizeof (jump_insn);
1751
1752 /* Return the end address of our pad. */
1753 *jump_entry = buildaddr;
1754
1755 return 0;
1756}
1757
1758#endif /* __x86_64__ */
1759
1760/* Build a jump pad that saves registers and calls a collection
1761 function. Writes a jump instruction to the jump pad to
1762 JJUMPAD_INSN. The caller is responsible to write it in at the
1763 tracepoint address. */
1764
1765static int
1766i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1767 CORE_ADDR collector,
1768 CORE_ADDR lockaddr,
1769 ULONGEST orig_size,
1770 CORE_ADDR *jump_entry,
405f8e94
SS
1771 CORE_ADDR *trampoline,
1772 ULONGEST *trampoline_size,
fa593d66
PA
1773 unsigned char *jjump_pad_insn,
1774 ULONGEST *jjump_pad_insn_size,
1775 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1776 CORE_ADDR *adjusted_insn_addr_end,
1777 char *err)
fa593d66
PA
1778{
1779 unsigned char buf[0x100];
1780 int i, offset;
1781 CORE_ADDR buildaddr = *jump_entry;
1782
1783 /* Build the jump pad. */
1784
1785 /* First, do tracepoint data collection. Save registers. */
1786 i = 0;
1787 buf[i++] = 0x60; /* pushad */
1788 buf[i++] = 0x68; /* push tpaddr aka $pc */
1789 *((int *)(buf + i)) = (int) tpaddr;
1790 i += 4;
1791 buf[i++] = 0x9c; /* pushf */
1792 buf[i++] = 0x1e; /* push %ds */
1793 buf[i++] = 0x06; /* push %es */
1794 buf[i++] = 0x0f; /* push %fs */
1795 buf[i++] = 0xa0;
1796 buf[i++] = 0x0f; /* push %gs */
1797 buf[i++] = 0xa8;
1798 buf[i++] = 0x16; /* push %ss */
1799 buf[i++] = 0x0e; /* push %cs */
1800 append_insns (&buildaddr, i, buf);
1801
1802 /* Stack space for the collecting_t object. */
1803 i = 0;
1804 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1805
1806 /* Build the object. */
1807 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1808 memcpy (buf + i, &tpoint, 4);
1809 i += 4;
1810 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1811
1812 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1813 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1814 append_insns (&buildaddr, i, buf);
1815
1816 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1817 If we cared for it, this could be using xchg alternatively. */
1818
1819 i = 0;
1820 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1821 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1822 %esp,<lockaddr> */
1823 memcpy (&buf[i], (void *) &lockaddr, 4);
1824 i += 4;
1825 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1826 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1827 append_insns (&buildaddr, i, buf);
1828
1829
1830 /* Set up arguments to the gdb_collect call. */
1831 i = 0;
1832 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1833 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1834 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1835 append_insns (&buildaddr, i, buf);
1836
1837 i = 0;
1838 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1839 append_insns (&buildaddr, i, buf);
1840
1841 i = 0;
1842 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1843 memcpy (&buf[i], (void *) &tpoint, 4);
1844 i += 4;
1845 append_insns (&buildaddr, i, buf);
1846
1847 buf[0] = 0xe8; /* call <reladdr> */
1848 offset = collector - (buildaddr + sizeof (jump_insn));
1849 memcpy (buf + 1, &offset, 4);
1850 append_insns (&buildaddr, 5, buf);
1851 /* Clean up after the call. */
1852 buf[0] = 0x83; /* add $0x8,%esp */
1853 buf[1] = 0xc4;
1854 buf[2] = 0x08;
1855 append_insns (&buildaddr, 3, buf);
1856
1857
1858 /* Clear the spin-lock. This would need the LOCK prefix on older
1859 broken archs. */
1860 i = 0;
1861 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1862 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1863 memcpy (buf + i, &lockaddr, 4);
1864 i += 4;
1865 append_insns (&buildaddr, i, buf);
1866
1867
1868 /* Remove stack that had been used for the collect_t object. */
1869 i = 0;
1870 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1871 append_insns (&buildaddr, i, buf);
1872
1873 i = 0;
1874 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1875 buf[i++] = 0xc4;
1876 buf[i++] = 0x04;
1877 buf[i++] = 0x17; /* pop %ss */
1878 buf[i++] = 0x0f; /* pop %gs */
1879 buf[i++] = 0xa9;
1880 buf[i++] = 0x0f; /* pop %fs */
1881 buf[i++] = 0xa1;
1882 buf[i++] = 0x07; /* pop %es */
405f8e94 1883 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1884 buf[i++] = 0x9d; /* popf */
1885 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1886 buf[i++] = 0xc4;
1887 buf[i++] = 0x04;
1888 buf[i++] = 0x61; /* popad */
1889 append_insns (&buildaddr, i, buf);
1890
1891 /* Now, adjust the original instruction to execute in the jump
1892 pad. */
1893 *adjusted_insn_addr = buildaddr;
1894 relocate_instruction (&buildaddr, tpaddr);
1895 *adjusted_insn_addr_end = buildaddr;
1896
1897 /* Write the jump back to the program. */
1898 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1899 memcpy (buf, jump_insn, sizeof (jump_insn));
1900 memcpy (buf + 1, &offset, 4);
1901 append_insns (&buildaddr, sizeof (jump_insn), buf);
1902
1903 /* The jump pad is now built. Wire in a jump to our jump pad. This
1904 is always done last (by our caller actually), so that we can
1905 install fast tracepoints with threads running. This relies on
1906 the agent's atomic write support. */
405f8e94
SS
1907 if (orig_size == 4)
1908 {
1909 /* Create a trampoline. */
1910 *trampoline_size = sizeof (jump_insn);
1911 if (!claim_trampoline_space (*trampoline_size, trampoline))
1912 {
1913 /* No trampoline space available. */
1914 strcpy (err,
1915 "E.Cannot allocate trampoline space needed for fast "
1916 "tracepoints on 4-byte instructions.");
1917 return 1;
1918 }
1919
1920 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1921 memcpy (buf, jump_insn, sizeof (jump_insn));
1922 memcpy (buf + 1, &offset, 4);
1923 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1924
1925 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1926 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1927 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1928 memcpy (buf + 2, &offset, 2);
1929 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1930 *jjump_pad_insn_size = sizeof (small_jump_insn);
1931 }
1932 else
1933 {
1934 /* Else use a 32-bit relative jump instruction. */
1935 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1936 memcpy (buf, jump_insn, sizeof (jump_insn));
1937 memcpy (buf + 1, &offset, 4);
1938 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1939 *jjump_pad_insn_size = sizeof (jump_insn);
1940 }
fa593d66
PA
1941
1942 /* Return the end address of our pad. */
1943 *jump_entry = buildaddr;
1944
1945 return 0;
1946}
1947
1948static int
1949x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1950 CORE_ADDR collector,
1951 CORE_ADDR lockaddr,
1952 ULONGEST orig_size,
1953 CORE_ADDR *jump_entry,
405f8e94
SS
1954 CORE_ADDR *trampoline,
1955 ULONGEST *trampoline_size,
fa593d66
PA
1956 unsigned char *jjump_pad_insn,
1957 ULONGEST *jjump_pad_insn_size,
1958 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1959 CORE_ADDR *adjusted_insn_addr_end,
1960 char *err)
fa593d66
PA
1961{
1962#ifdef __x86_64__
3aee8918 1963 if (is_64bit_tdesc ())
fa593d66
PA
1964 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1965 collector, lockaddr,
1966 orig_size, jump_entry,
405f8e94 1967 trampoline, trampoline_size,
fa593d66
PA
1968 jjump_pad_insn,
1969 jjump_pad_insn_size,
1970 adjusted_insn_addr,
405f8e94
SS
1971 adjusted_insn_addr_end,
1972 err);
fa593d66
PA
1973#endif
1974
1975 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1976 collector, lockaddr,
1977 orig_size, jump_entry,
405f8e94 1978 trampoline, trampoline_size,
fa593d66
PA
1979 jjump_pad_insn,
1980 jjump_pad_insn_size,
1981 adjusted_insn_addr,
405f8e94
SS
1982 adjusted_insn_addr_end,
1983 err);
1984}
1985
1986/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1987 architectures. */
1988
1989static int
1990x86_get_min_fast_tracepoint_insn_len (void)
1991{
1992 static int warned_about_fast_tracepoints = 0;
1993
1994#ifdef __x86_64__
1995 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1996 used for fast tracepoints. */
3aee8918 1997 if (is_64bit_tdesc ())
405f8e94
SS
1998 return 5;
1999#endif
2000
58b4daa5 2001 if (agent_loaded_p ())
405f8e94
SS
2002 {
2003 char errbuf[IPA_BUFSIZ];
2004
2005 errbuf[0] = '\0';
2006
2007 /* On x86, if trampolines are available, then 4-byte jump instructions
2008 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2009 with a 4-byte offset are used instead. */
2010 if (have_fast_tracepoint_trampoline_buffer (errbuf))
2011 return 4;
2012 else
2013 {
2014 /* GDB has no channel to explain to user why a shorter fast
2015 tracepoint is not possible, but at least make GDBserver
2016 mention that something has gone awry. */
2017 if (!warned_about_fast_tracepoints)
2018 {
2019 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
2020 warned_about_fast_tracepoints = 1;
2021 }
2022 return 5;
2023 }
2024 }
2025 else
2026 {
2027 /* Indicate that the minimum length is currently unknown since the IPA
2028 has not loaded yet. */
2029 return 0;
2030 }
fa593d66
PA
2031}
2032
6a271cae
PA
2033static void
2034add_insns (unsigned char *start, int len)
2035{
2036 CORE_ADDR buildaddr = current_insn_ptr;
2037
2038 if (debug_threads)
87ce2a04
DE
2039 debug_printf ("Adding %d bytes of insn at %s\n",
2040 len, paddress (buildaddr));
6a271cae
PA
2041
2042 append_insns (&buildaddr, len, start);
2043 current_insn_ptr = buildaddr;
2044}
2045
6a271cae
PA
2046/* Our general strategy for emitting code is to avoid specifying raw
2047 bytes whenever possible, and instead copy a block of inline asm
2048 that is embedded in the function. This is a little messy, because
2049 we need to keep the compiler from discarding what looks like dead
2050 code, plus suppress various warnings. */
2051
9e4344e5
PA
2052#define EMIT_ASM(NAME, INSNS) \
2053 do \
2054 { \
2055 extern unsigned char start_ ## NAME, end_ ## NAME; \
2056 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 2057 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
2058 "\t" "start_" #NAME ":" \
2059 "\t" INSNS "\n" \
2060 "\t" "end_" #NAME ":"); \
2061 } while (0)
6a271cae
PA
2062
2063#ifdef __x86_64__
2064
2065#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
2066 do \
2067 { \
2068 extern unsigned char start_ ## NAME, end_ ## NAME; \
2069 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2070 __asm__ (".code32\n" \
2071 "\t" "jmp end_" #NAME "\n" \
2072 "\t" "start_" #NAME ":\n" \
2073 "\t" INSNS "\n" \
2074 "\t" "end_" #NAME ":\n" \
2075 ".code64\n"); \
2076 } while (0)
6a271cae
PA
2077
2078#else
2079
2080#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2081
2082#endif
2083
2084#ifdef __x86_64__
2085
2086static void
2087amd64_emit_prologue (void)
2088{
2089 EMIT_ASM (amd64_prologue,
2090 "pushq %rbp\n\t"
2091 "movq %rsp,%rbp\n\t"
2092 "sub $0x20,%rsp\n\t"
2093 "movq %rdi,-8(%rbp)\n\t"
2094 "movq %rsi,-16(%rbp)");
2095}
2096
2097
2098static void
2099amd64_emit_epilogue (void)
2100{
2101 EMIT_ASM (amd64_epilogue,
2102 "movq -16(%rbp),%rdi\n\t"
2103 "movq %rax,(%rdi)\n\t"
2104 "xor %rax,%rax\n\t"
2105 "leave\n\t"
2106 "ret");
2107}
2108
2109static void
2110amd64_emit_add (void)
2111{
2112 EMIT_ASM (amd64_add,
2113 "add (%rsp),%rax\n\t"
2114 "lea 0x8(%rsp),%rsp");
2115}
2116
2117static void
2118amd64_emit_sub (void)
2119{
2120 EMIT_ASM (amd64_sub,
2121 "sub %rax,(%rsp)\n\t"
2122 "pop %rax");
2123}
2124
2125static void
2126amd64_emit_mul (void)
2127{
2128 emit_error = 1;
2129}
2130
2131static void
2132amd64_emit_lsh (void)
2133{
2134 emit_error = 1;
2135}
2136
2137static void
2138amd64_emit_rsh_signed (void)
2139{
2140 emit_error = 1;
2141}
2142
2143static void
2144amd64_emit_rsh_unsigned (void)
2145{
2146 emit_error = 1;
2147}
2148
2149static void
2150amd64_emit_ext (int arg)
2151{
2152 switch (arg)
2153 {
2154 case 8:
2155 EMIT_ASM (amd64_ext_8,
2156 "cbtw\n\t"
2157 "cwtl\n\t"
2158 "cltq");
2159 break;
2160 case 16:
2161 EMIT_ASM (amd64_ext_16,
2162 "cwtl\n\t"
2163 "cltq");
2164 break;
2165 case 32:
2166 EMIT_ASM (amd64_ext_32,
2167 "cltq");
2168 break;
2169 default:
2170 emit_error = 1;
2171 }
2172}
2173
2174static void
2175amd64_emit_log_not (void)
2176{
2177 EMIT_ASM (amd64_log_not,
2178 "test %rax,%rax\n\t"
2179 "sete %cl\n\t"
2180 "movzbq %cl,%rax");
2181}
2182
2183static void
2184amd64_emit_bit_and (void)
2185{
2186 EMIT_ASM (amd64_and,
2187 "and (%rsp),%rax\n\t"
2188 "lea 0x8(%rsp),%rsp");
2189}
2190
2191static void
2192amd64_emit_bit_or (void)
2193{
2194 EMIT_ASM (amd64_or,
2195 "or (%rsp),%rax\n\t"
2196 "lea 0x8(%rsp),%rsp");
2197}
2198
2199static void
2200amd64_emit_bit_xor (void)
2201{
2202 EMIT_ASM (amd64_xor,
2203 "xor (%rsp),%rax\n\t"
2204 "lea 0x8(%rsp),%rsp");
2205}
2206
2207static void
2208amd64_emit_bit_not (void)
2209{
2210 EMIT_ASM (amd64_bit_not,
2211 "xorq $0xffffffffffffffff,%rax");
2212}
2213
2214static void
2215amd64_emit_equal (void)
2216{
2217 EMIT_ASM (amd64_equal,
2218 "cmp %rax,(%rsp)\n\t"
2219 "je .Lamd64_equal_true\n\t"
2220 "xor %rax,%rax\n\t"
2221 "jmp .Lamd64_equal_end\n\t"
2222 ".Lamd64_equal_true:\n\t"
2223 "mov $0x1,%rax\n\t"
2224 ".Lamd64_equal_end:\n\t"
2225 "lea 0x8(%rsp),%rsp");
2226}
2227
2228static void
2229amd64_emit_less_signed (void)
2230{
2231 EMIT_ASM (amd64_less_signed,
2232 "cmp %rax,(%rsp)\n\t"
2233 "jl .Lamd64_less_signed_true\n\t"
2234 "xor %rax,%rax\n\t"
2235 "jmp .Lamd64_less_signed_end\n\t"
2236 ".Lamd64_less_signed_true:\n\t"
2237 "mov $1,%rax\n\t"
2238 ".Lamd64_less_signed_end:\n\t"
2239 "lea 0x8(%rsp),%rsp");
2240}
2241
2242static void
2243amd64_emit_less_unsigned (void)
2244{
2245 EMIT_ASM (amd64_less_unsigned,
2246 "cmp %rax,(%rsp)\n\t"
2247 "jb .Lamd64_less_unsigned_true\n\t"
2248 "xor %rax,%rax\n\t"
2249 "jmp .Lamd64_less_unsigned_end\n\t"
2250 ".Lamd64_less_unsigned_true:\n\t"
2251 "mov $1,%rax\n\t"
2252 ".Lamd64_less_unsigned_end:\n\t"
2253 "lea 0x8(%rsp),%rsp");
2254}
2255
2256static void
2257amd64_emit_ref (int size)
2258{
2259 switch (size)
2260 {
2261 case 1:
2262 EMIT_ASM (amd64_ref1,
2263 "movb (%rax),%al");
2264 break;
2265 case 2:
2266 EMIT_ASM (amd64_ref2,
2267 "movw (%rax),%ax");
2268 break;
2269 case 4:
2270 EMIT_ASM (amd64_ref4,
2271 "movl (%rax),%eax");
2272 break;
2273 case 8:
2274 EMIT_ASM (amd64_ref8,
2275 "movq (%rax),%rax");
2276 break;
2277 }
2278}
2279
2280static void
2281amd64_emit_if_goto (int *offset_p, int *size_p)
2282{
2283 EMIT_ASM (amd64_if_goto,
2284 "mov %rax,%rcx\n\t"
2285 "pop %rax\n\t"
2286 "cmp $0,%rcx\n\t"
2287 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2288 if (offset_p)
2289 *offset_p = 10;
2290 if (size_p)
2291 *size_p = 4;
2292}
2293
2294static void
2295amd64_emit_goto (int *offset_p, int *size_p)
2296{
2297 EMIT_ASM (amd64_goto,
2298 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2299 if (offset_p)
2300 *offset_p = 1;
2301 if (size_p)
2302 *size_p = 4;
2303}
2304
2305static void
2306amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2307{
2308 int diff = (to - (from + size));
2309 unsigned char buf[sizeof (int)];
2310
2311 if (size != 4)
2312 {
2313 emit_error = 1;
2314 return;
2315 }
2316
2317 memcpy (buf, &diff, sizeof (int));
2318 write_inferior_memory (from, buf, sizeof (int));
2319}
2320
2321static void
4e29fb54 2322amd64_emit_const (LONGEST num)
6a271cae
PA
2323{
2324 unsigned char buf[16];
2325 int i;
2326 CORE_ADDR buildaddr = current_insn_ptr;
2327
2328 i = 0;
2329 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 2330 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
2331 i += 8;
2332 append_insns (&buildaddr, i, buf);
2333 current_insn_ptr = buildaddr;
2334}
2335
2336static void
2337amd64_emit_call (CORE_ADDR fn)
2338{
2339 unsigned char buf[16];
2340 int i;
2341 CORE_ADDR buildaddr;
4e29fb54 2342 LONGEST offset64;
6a271cae
PA
2343
2344 /* The destination function being in the shared library, may be
2345 >31-bits away off the compiled code pad. */
2346
2347 buildaddr = current_insn_ptr;
2348
2349 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
2350
2351 i = 0;
2352
2353 if (offset64 > INT_MAX || offset64 < INT_MIN)
2354 {
2355 /* Offset is too large for a call. Use callq, but that requires
2356 a register, so avoid it if possible. Use r10, since it is
2357 call-clobbered, we don't have to push/pop it. */
2358 buf[i++] = 0x48; /* mov $fn,%r10 */
2359 buf[i++] = 0xba;
2360 memcpy (buf + i, &fn, 8);
2361 i += 8;
2362 buf[i++] = 0xff; /* callq *%r10 */
2363 buf[i++] = 0xd2;
2364 }
2365 else
2366 {
2367 int offset32 = offset64; /* we know we can't overflow here. */
2368 memcpy (buf + i, &offset32, 4);
2369 i += 4;
2370 }
2371
2372 append_insns (&buildaddr, i, buf);
2373 current_insn_ptr = buildaddr;
2374}
2375
2376static void
2377amd64_emit_reg (int reg)
2378{
2379 unsigned char buf[16];
2380 int i;
2381 CORE_ADDR buildaddr;
2382
2383 /* Assume raw_regs is still in %rdi. */
2384 buildaddr = current_insn_ptr;
2385 i = 0;
2386 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 2387 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2388 i += 4;
2389 append_insns (&buildaddr, i, buf);
2390 current_insn_ptr = buildaddr;
2391 amd64_emit_call (get_raw_reg_func_addr ());
2392}
2393
2394static void
2395amd64_emit_pop (void)
2396{
2397 EMIT_ASM (amd64_pop,
2398 "pop %rax");
2399}
2400
2401static void
2402amd64_emit_stack_flush (void)
2403{
2404 EMIT_ASM (amd64_stack_flush,
2405 "push %rax");
2406}
2407
2408static void
2409amd64_emit_zero_ext (int arg)
2410{
2411 switch (arg)
2412 {
2413 case 8:
2414 EMIT_ASM (amd64_zero_ext_8,
2415 "and $0xff,%rax");
2416 break;
2417 case 16:
2418 EMIT_ASM (amd64_zero_ext_16,
2419 "and $0xffff,%rax");
2420 break;
2421 case 32:
2422 EMIT_ASM (amd64_zero_ext_32,
2423 "mov $0xffffffff,%rcx\n\t"
2424 "and %rcx,%rax");
2425 break;
2426 default:
2427 emit_error = 1;
2428 }
2429}
2430
2431static void
2432amd64_emit_swap (void)
2433{
2434 EMIT_ASM (amd64_swap,
2435 "mov %rax,%rcx\n\t"
2436 "pop %rax\n\t"
2437 "push %rcx");
2438}
2439
2440static void
2441amd64_emit_stack_adjust (int n)
2442{
2443 unsigned char buf[16];
2444 int i;
2445 CORE_ADDR buildaddr = current_insn_ptr;
2446
2447 i = 0;
2448 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2449 buf[i++] = 0x8d;
2450 buf[i++] = 0x64;
2451 buf[i++] = 0x24;
2452 /* This only handles adjustments up to 16, but we don't expect any more. */
2453 buf[i++] = n * 8;
2454 append_insns (&buildaddr, i, buf);
2455 current_insn_ptr = buildaddr;
2456}
2457
2458/* FN's prototype is `LONGEST(*fn)(int)'. */
2459
2460static void
2461amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2462{
2463 unsigned char buf[16];
2464 int i;
2465 CORE_ADDR buildaddr;
2466
2467 buildaddr = current_insn_ptr;
2468 i = 0;
2469 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2470 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2471 i += 4;
2472 append_insns (&buildaddr, i, buf);
2473 current_insn_ptr = buildaddr;
2474 amd64_emit_call (fn);
2475}
2476
4e29fb54 2477/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2478
2479static void
2480amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2481{
2482 unsigned char buf[16];
2483 int i;
2484 CORE_ADDR buildaddr;
2485
2486 buildaddr = current_insn_ptr;
2487 i = 0;
2488 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2489 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2490 i += 4;
2491 append_insns (&buildaddr, i, buf);
2492 current_insn_ptr = buildaddr;
2493 EMIT_ASM (amd64_void_call_2_a,
2494 /* Save away a copy of the stack top. */
2495 "push %rax\n\t"
2496 /* Also pass top as the second argument. */
2497 "mov %rax,%rsi");
2498 amd64_emit_call (fn);
2499 EMIT_ASM (amd64_void_call_2_b,
2500 /* Restore the stack top, %rax may have been trashed. */
2501 "pop %rax");
2502}
2503
6b9801d4
SS
2504void
2505amd64_emit_eq_goto (int *offset_p, int *size_p)
2506{
2507 EMIT_ASM (amd64_eq,
2508 "cmp %rax,(%rsp)\n\t"
2509 "jne .Lamd64_eq_fallthru\n\t"
2510 "lea 0x8(%rsp),%rsp\n\t"
2511 "pop %rax\n\t"
2512 /* jmp, but don't trust the assembler to choose the right jump */
2513 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2514 ".Lamd64_eq_fallthru:\n\t"
2515 "lea 0x8(%rsp),%rsp\n\t"
2516 "pop %rax");
2517
2518 if (offset_p)
2519 *offset_p = 13;
2520 if (size_p)
2521 *size_p = 4;
2522}
2523
2524void
2525amd64_emit_ne_goto (int *offset_p, int *size_p)
2526{
2527 EMIT_ASM (amd64_ne,
2528 "cmp %rax,(%rsp)\n\t"
2529 "je .Lamd64_ne_fallthru\n\t"
2530 "lea 0x8(%rsp),%rsp\n\t"
2531 "pop %rax\n\t"
2532 /* jmp, but don't trust the assembler to choose the right jump */
2533 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2534 ".Lamd64_ne_fallthru:\n\t"
2535 "lea 0x8(%rsp),%rsp\n\t"
2536 "pop %rax");
2537
2538 if (offset_p)
2539 *offset_p = 13;
2540 if (size_p)
2541 *size_p = 4;
2542}
2543
2544void
2545amd64_emit_lt_goto (int *offset_p, int *size_p)
2546{
2547 EMIT_ASM (amd64_lt,
2548 "cmp %rax,(%rsp)\n\t"
2549 "jnl .Lamd64_lt_fallthru\n\t"
2550 "lea 0x8(%rsp),%rsp\n\t"
2551 "pop %rax\n\t"
2552 /* jmp, but don't trust the assembler to choose the right jump */
2553 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2554 ".Lamd64_lt_fallthru:\n\t"
2555 "lea 0x8(%rsp),%rsp\n\t"
2556 "pop %rax");
2557
2558 if (offset_p)
2559 *offset_p = 13;
2560 if (size_p)
2561 *size_p = 4;
2562}
2563
2564void
2565amd64_emit_le_goto (int *offset_p, int *size_p)
2566{
2567 EMIT_ASM (amd64_le,
2568 "cmp %rax,(%rsp)\n\t"
2569 "jnle .Lamd64_le_fallthru\n\t"
2570 "lea 0x8(%rsp),%rsp\n\t"
2571 "pop %rax\n\t"
2572 /* jmp, but don't trust the assembler to choose the right jump */
2573 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2574 ".Lamd64_le_fallthru:\n\t"
2575 "lea 0x8(%rsp),%rsp\n\t"
2576 "pop %rax");
2577
2578 if (offset_p)
2579 *offset_p = 13;
2580 if (size_p)
2581 *size_p = 4;
2582}
2583
2584void
2585amd64_emit_gt_goto (int *offset_p, int *size_p)
2586{
2587 EMIT_ASM (amd64_gt,
2588 "cmp %rax,(%rsp)\n\t"
2589 "jng .Lamd64_gt_fallthru\n\t"
2590 "lea 0x8(%rsp),%rsp\n\t"
2591 "pop %rax\n\t"
2592 /* jmp, but don't trust the assembler to choose the right jump */
2593 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2594 ".Lamd64_gt_fallthru:\n\t"
2595 "lea 0x8(%rsp),%rsp\n\t"
2596 "pop %rax");
2597
2598 if (offset_p)
2599 *offset_p = 13;
2600 if (size_p)
2601 *size_p = 4;
2602}
2603
2604void
2605amd64_emit_ge_goto (int *offset_p, int *size_p)
2606{
2607 EMIT_ASM (amd64_ge,
2608 "cmp %rax,(%rsp)\n\t"
2609 "jnge .Lamd64_ge_fallthru\n\t"
2610 ".Lamd64_ge_jump:\n\t"
2611 "lea 0x8(%rsp),%rsp\n\t"
2612 "pop %rax\n\t"
2613 /* jmp, but don't trust the assembler to choose the right jump */
2614 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2615 ".Lamd64_ge_fallthru:\n\t"
2616 "lea 0x8(%rsp),%rsp\n\t"
2617 "pop %rax");
2618
2619 if (offset_p)
2620 *offset_p = 13;
2621 if (size_p)
2622 *size_p = 4;
2623}
2624
6a271cae
PA
2625struct emit_ops amd64_emit_ops =
2626 {
2627 amd64_emit_prologue,
2628 amd64_emit_epilogue,
2629 amd64_emit_add,
2630 amd64_emit_sub,
2631 amd64_emit_mul,
2632 amd64_emit_lsh,
2633 amd64_emit_rsh_signed,
2634 amd64_emit_rsh_unsigned,
2635 amd64_emit_ext,
2636 amd64_emit_log_not,
2637 amd64_emit_bit_and,
2638 amd64_emit_bit_or,
2639 amd64_emit_bit_xor,
2640 amd64_emit_bit_not,
2641 amd64_emit_equal,
2642 amd64_emit_less_signed,
2643 amd64_emit_less_unsigned,
2644 amd64_emit_ref,
2645 amd64_emit_if_goto,
2646 amd64_emit_goto,
2647 amd64_write_goto_address,
2648 amd64_emit_const,
2649 amd64_emit_call,
2650 amd64_emit_reg,
2651 amd64_emit_pop,
2652 amd64_emit_stack_flush,
2653 amd64_emit_zero_ext,
2654 amd64_emit_swap,
2655 amd64_emit_stack_adjust,
2656 amd64_emit_int_call_1,
6b9801d4
SS
2657 amd64_emit_void_call_2,
2658 amd64_emit_eq_goto,
2659 amd64_emit_ne_goto,
2660 amd64_emit_lt_goto,
2661 amd64_emit_le_goto,
2662 amd64_emit_gt_goto,
2663 amd64_emit_ge_goto
6a271cae
PA
2664 };
2665
2666#endif /* __x86_64__ */
2667
2668static void
2669i386_emit_prologue (void)
2670{
2671 EMIT_ASM32 (i386_prologue,
2672 "push %ebp\n\t"
bf15cbda
SS
2673 "mov %esp,%ebp\n\t"
2674 "push %ebx");
6a271cae
PA
2675 /* At this point, the raw regs base address is at 8(%ebp), and the
2676 value pointer is at 12(%ebp). */
2677}
2678
2679static void
2680i386_emit_epilogue (void)
2681{
2682 EMIT_ASM32 (i386_epilogue,
2683 "mov 12(%ebp),%ecx\n\t"
2684 "mov %eax,(%ecx)\n\t"
2685 "mov %ebx,0x4(%ecx)\n\t"
2686 "xor %eax,%eax\n\t"
bf15cbda 2687 "pop %ebx\n\t"
6a271cae
PA
2688 "pop %ebp\n\t"
2689 "ret");
2690}
2691
2692static void
2693i386_emit_add (void)
2694{
2695 EMIT_ASM32 (i386_add,
2696 "add (%esp),%eax\n\t"
2697 "adc 0x4(%esp),%ebx\n\t"
2698 "lea 0x8(%esp),%esp");
2699}
2700
2701static void
2702i386_emit_sub (void)
2703{
2704 EMIT_ASM32 (i386_sub,
2705 "subl %eax,(%esp)\n\t"
2706 "sbbl %ebx,4(%esp)\n\t"
2707 "pop %eax\n\t"
2708 "pop %ebx\n\t");
2709}
2710
2711static void
2712i386_emit_mul (void)
2713{
2714 emit_error = 1;
2715}
2716
2717static void
2718i386_emit_lsh (void)
2719{
2720 emit_error = 1;
2721}
2722
2723static void
2724i386_emit_rsh_signed (void)
2725{
2726 emit_error = 1;
2727}
2728
2729static void
2730i386_emit_rsh_unsigned (void)
2731{
2732 emit_error = 1;
2733}
2734
2735static void
2736i386_emit_ext (int arg)
2737{
2738 switch (arg)
2739 {
2740 case 8:
2741 EMIT_ASM32 (i386_ext_8,
2742 "cbtw\n\t"
2743 "cwtl\n\t"
2744 "movl %eax,%ebx\n\t"
2745 "sarl $31,%ebx");
2746 break;
2747 case 16:
2748 EMIT_ASM32 (i386_ext_16,
2749 "cwtl\n\t"
2750 "movl %eax,%ebx\n\t"
2751 "sarl $31,%ebx");
2752 break;
2753 case 32:
2754 EMIT_ASM32 (i386_ext_32,
2755 "movl %eax,%ebx\n\t"
2756 "sarl $31,%ebx");
2757 break;
2758 default:
2759 emit_error = 1;
2760 }
2761}
2762
2763static void
2764i386_emit_log_not (void)
2765{
2766 EMIT_ASM32 (i386_log_not,
2767 "or %ebx,%eax\n\t"
2768 "test %eax,%eax\n\t"
2769 "sete %cl\n\t"
2770 "xor %ebx,%ebx\n\t"
2771 "movzbl %cl,%eax");
2772}
2773
2774static void
2775i386_emit_bit_and (void)
2776{
2777 EMIT_ASM32 (i386_and,
2778 "and (%esp),%eax\n\t"
2779 "and 0x4(%esp),%ebx\n\t"
2780 "lea 0x8(%esp),%esp");
2781}
2782
2783static void
2784i386_emit_bit_or (void)
2785{
2786 EMIT_ASM32 (i386_or,
2787 "or (%esp),%eax\n\t"
2788 "or 0x4(%esp),%ebx\n\t"
2789 "lea 0x8(%esp),%esp");
2790}
2791
2792static void
2793i386_emit_bit_xor (void)
2794{
2795 EMIT_ASM32 (i386_xor,
2796 "xor (%esp),%eax\n\t"
2797 "xor 0x4(%esp),%ebx\n\t"
2798 "lea 0x8(%esp),%esp");
2799}
2800
2801static void
2802i386_emit_bit_not (void)
2803{
2804 EMIT_ASM32 (i386_bit_not,
2805 "xor $0xffffffff,%eax\n\t"
2806 "xor $0xffffffff,%ebx\n\t");
2807}
2808
2809static void
2810i386_emit_equal (void)
2811{
2812 EMIT_ASM32 (i386_equal,
2813 "cmpl %ebx,4(%esp)\n\t"
2814 "jne .Li386_equal_false\n\t"
2815 "cmpl %eax,(%esp)\n\t"
2816 "je .Li386_equal_true\n\t"
2817 ".Li386_equal_false:\n\t"
2818 "xor %eax,%eax\n\t"
2819 "jmp .Li386_equal_end\n\t"
2820 ".Li386_equal_true:\n\t"
2821 "mov $1,%eax\n\t"
2822 ".Li386_equal_end:\n\t"
2823 "xor %ebx,%ebx\n\t"
2824 "lea 0x8(%esp),%esp");
2825}
2826
2827static void
2828i386_emit_less_signed (void)
2829{
2830 EMIT_ASM32 (i386_less_signed,
2831 "cmpl %ebx,4(%esp)\n\t"
2832 "jl .Li386_less_signed_true\n\t"
2833 "jne .Li386_less_signed_false\n\t"
2834 "cmpl %eax,(%esp)\n\t"
2835 "jl .Li386_less_signed_true\n\t"
2836 ".Li386_less_signed_false:\n\t"
2837 "xor %eax,%eax\n\t"
2838 "jmp .Li386_less_signed_end\n\t"
2839 ".Li386_less_signed_true:\n\t"
2840 "mov $1,%eax\n\t"
2841 ".Li386_less_signed_end:\n\t"
2842 "xor %ebx,%ebx\n\t"
2843 "lea 0x8(%esp),%esp");
2844}
2845
2846static void
2847i386_emit_less_unsigned (void)
2848{
2849 EMIT_ASM32 (i386_less_unsigned,
2850 "cmpl %ebx,4(%esp)\n\t"
2851 "jb .Li386_less_unsigned_true\n\t"
2852 "jne .Li386_less_unsigned_false\n\t"
2853 "cmpl %eax,(%esp)\n\t"
2854 "jb .Li386_less_unsigned_true\n\t"
2855 ".Li386_less_unsigned_false:\n\t"
2856 "xor %eax,%eax\n\t"
2857 "jmp .Li386_less_unsigned_end\n\t"
2858 ".Li386_less_unsigned_true:\n\t"
2859 "mov $1,%eax\n\t"
2860 ".Li386_less_unsigned_end:\n\t"
2861 "xor %ebx,%ebx\n\t"
2862 "lea 0x8(%esp),%esp");
2863}
2864
2865static void
2866i386_emit_ref (int size)
2867{
2868 switch (size)
2869 {
2870 case 1:
2871 EMIT_ASM32 (i386_ref1,
2872 "movb (%eax),%al");
2873 break;
2874 case 2:
2875 EMIT_ASM32 (i386_ref2,
2876 "movw (%eax),%ax");
2877 break;
2878 case 4:
2879 EMIT_ASM32 (i386_ref4,
2880 "movl (%eax),%eax");
2881 break;
2882 case 8:
2883 EMIT_ASM32 (i386_ref8,
2884 "movl 4(%eax),%ebx\n\t"
2885 "movl (%eax),%eax");
2886 break;
2887 }
2888}
2889
2890static void
2891i386_emit_if_goto (int *offset_p, int *size_p)
2892{
2893 EMIT_ASM32 (i386_if_goto,
2894 "mov %eax,%ecx\n\t"
2895 "or %ebx,%ecx\n\t"
2896 "pop %eax\n\t"
2897 "pop %ebx\n\t"
2898 "cmpl $0,%ecx\n\t"
2899 /* Don't trust the assembler to choose the right jump */
2900 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2901
2902 if (offset_p)
2903 *offset_p = 11; /* be sure that this matches the sequence above */
2904 if (size_p)
2905 *size_p = 4;
2906}
2907
2908static void
2909i386_emit_goto (int *offset_p, int *size_p)
2910{
2911 EMIT_ASM32 (i386_goto,
2912 /* Don't trust the assembler to choose the right jump */
2913 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2914 if (offset_p)
2915 *offset_p = 1;
2916 if (size_p)
2917 *size_p = 4;
2918}
2919
2920static void
2921i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2922{
2923 int diff = (to - (from + size));
2924 unsigned char buf[sizeof (int)];
2925
2926 /* We're only doing 4-byte sizes at the moment. */
2927 if (size != 4)
2928 {
2929 emit_error = 1;
2930 return;
2931 }
2932
2933 memcpy (buf, &diff, sizeof (int));
2934 write_inferior_memory (from, buf, sizeof (int));
2935}
2936
2937static void
4e29fb54 2938i386_emit_const (LONGEST num)
6a271cae
PA
2939{
2940 unsigned char buf[16];
b00ad6ff 2941 int i, hi, lo;
6a271cae
PA
2942 CORE_ADDR buildaddr = current_insn_ptr;
2943
2944 i = 0;
2945 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2946 lo = num & 0xffffffff;
2947 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2948 i += 4;
2949 hi = ((num >> 32) & 0xffffffff);
2950 if (hi)
2951 {
2952 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2953 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2954 i += 4;
2955 }
2956 else
2957 {
2958 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2959 }
2960 append_insns (&buildaddr, i, buf);
2961 current_insn_ptr = buildaddr;
2962}
2963
2964static void
2965i386_emit_call (CORE_ADDR fn)
2966{
2967 unsigned char buf[16];
2968 int i, offset;
2969 CORE_ADDR buildaddr;
2970
2971 buildaddr = current_insn_ptr;
2972 i = 0;
2973 buf[i++] = 0xe8; /* call <reladdr> */
2974 offset = ((int) fn) - (buildaddr + 5);
2975 memcpy (buf + 1, &offset, 4);
2976 append_insns (&buildaddr, 5, buf);
2977 current_insn_ptr = buildaddr;
2978}
2979
2980static void
2981i386_emit_reg (int reg)
2982{
2983 unsigned char buf[16];
2984 int i;
2985 CORE_ADDR buildaddr;
2986
2987 EMIT_ASM32 (i386_reg_a,
2988 "sub $0x8,%esp");
2989 buildaddr = current_insn_ptr;
2990 i = 0;
2991 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2992 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2993 i += 4;
2994 append_insns (&buildaddr, i, buf);
2995 current_insn_ptr = buildaddr;
2996 EMIT_ASM32 (i386_reg_b,
2997 "mov %eax,4(%esp)\n\t"
2998 "mov 8(%ebp),%eax\n\t"
2999 "mov %eax,(%esp)");
3000 i386_emit_call (get_raw_reg_func_addr ());
3001 EMIT_ASM32 (i386_reg_c,
3002 "xor %ebx,%ebx\n\t"
3003 "lea 0x8(%esp),%esp");
3004}
3005
3006static void
3007i386_emit_pop (void)
3008{
3009 EMIT_ASM32 (i386_pop,
3010 "pop %eax\n\t"
3011 "pop %ebx");
3012}
3013
3014static void
3015i386_emit_stack_flush (void)
3016{
3017 EMIT_ASM32 (i386_stack_flush,
3018 "push %ebx\n\t"
3019 "push %eax");
3020}
3021
3022static void
3023i386_emit_zero_ext (int arg)
3024{
3025 switch (arg)
3026 {
3027 case 8:
3028 EMIT_ASM32 (i386_zero_ext_8,
3029 "and $0xff,%eax\n\t"
3030 "xor %ebx,%ebx");
3031 break;
3032 case 16:
3033 EMIT_ASM32 (i386_zero_ext_16,
3034 "and $0xffff,%eax\n\t"
3035 "xor %ebx,%ebx");
3036 break;
3037 case 32:
3038 EMIT_ASM32 (i386_zero_ext_32,
3039 "xor %ebx,%ebx");
3040 break;
3041 default:
3042 emit_error = 1;
3043 }
3044}
3045
3046static void
3047i386_emit_swap (void)
3048{
3049 EMIT_ASM32 (i386_swap,
3050 "mov %eax,%ecx\n\t"
3051 "mov %ebx,%edx\n\t"
3052 "pop %eax\n\t"
3053 "pop %ebx\n\t"
3054 "push %edx\n\t"
3055 "push %ecx");
3056}
3057
3058static void
3059i386_emit_stack_adjust (int n)
3060{
3061 unsigned char buf[16];
3062 int i;
3063 CORE_ADDR buildaddr = current_insn_ptr;
3064
3065 i = 0;
3066 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
3067 buf[i++] = 0x64;
3068 buf[i++] = 0x24;
3069 buf[i++] = n * 8;
3070 append_insns (&buildaddr, i, buf);
3071 current_insn_ptr = buildaddr;
3072}
3073
3074/* FN's prototype is `LONGEST(*fn)(int)'. */
3075
3076static void
3077i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
3078{
3079 unsigned char buf[16];
3080 int i;
3081 CORE_ADDR buildaddr;
3082
3083 EMIT_ASM32 (i386_int_call_1_a,
3084 /* Reserve a bit of stack space. */
3085 "sub $0x8,%esp");
3086 /* Put the one argument on the stack. */
3087 buildaddr = current_insn_ptr;
3088 i = 0;
3089 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3090 buf[i++] = 0x04;
3091 buf[i++] = 0x24;
b00ad6ff 3092 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
3093 i += 4;
3094 append_insns (&buildaddr, i, buf);
3095 current_insn_ptr = buildaddr;
3096 i386_emit_call (fn);
3097 EMIT_ASM32 (i386_int_call_1_c,
3098 "mov %edx,%ebx\n\t"
3099 "lea 0x8(%esp),%esp");
3100}
3101
4e29fb54 3102/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
3103
3104static void
3105i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
3106{
3107 unsigned char buf[16];
3108 int i;
3109 CORE_ADDR buildaddr;
3110
3111 EMIT_ASM32 (i386_void_call_2_a,
3112 /* Preserve %eax only; we don't have to worry about %ebx. */
3113 "push %eax\n\t"
3114 /* Reserve a bit of stack space for arguments. */
3115 "sub $0x10,%esp\n\t"
3116 /* Copy "top" to the second argument position. (Note that
3117 we can't assume function won't scribble on its
3118 arguments, so don't try to restore from this.) */
3119 "mov %eax,4(%esp)\n\t"
3120 "mov %ebx,8(%esp)");
3121 /* Put the first argument on the stack. */
3122 buildaddr = current_insn_ptr;
3123 i = 0;
3124 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3125 buf[i++] = 0x04;
3126 buf[i++] = 0x24;
b00ad6ff 3127 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
3128 i += 4;
3129 append_insns (&buildaddr, i, buf);
3130 current_insn_ptr = buildaddr;
3131 i386_emit_call (fn);
3132 EMIT_ASM32 (i386_void_call_2_b,
3133 "lea 0x10(%esp),%esp\n\t"
3134 /* Restore original stack top. */
3135 "pop %eax");
3136}
3137
6b9801d4
SS
3138
3139void
3140i386_emit_eq_goto (int *offset_p, int *size_p)
3141{
3142 EMIT_ASM32 (eq,
3143 /* Check low half first, more likely to be decider */
3144 "cmpl %eax,(%esp)\n\t"
3145 "jne .Leq_fallthru\n\t"
3146 "cmpl %ebx,4(%esp)\n\t"
3147 "jne .Leq_fallthru\n\t"
3148 "lea 0x8(%esp),%esp\n\t"
3149 "pop %eax\n\t"
3150 "pop %ebx\n\t"
3151 /* jmp, but don't trust the assembler to choose the right jump */
3152 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3153 ".Leq_fallthru:\n\t"
3154 "lea 0x8(%esp),%esp\n\t"
3155 "pop %eax\n\t"
3156 "pop %ebx");
3157
3158 if (offset_p)
3159 *offset_p = 18;
3160 if (size_p)
3161 *size_p = 4;
3162}
3163
3164void
3165i386_emit_ne_goto (int *offset_p, int *size_p)
3166{
3167 EMIT_ASM32 (ne,
3168 /* Check low half first, more likely to be decider */
3169 "cmpl %eax,(%esp)\n\t"
3170 "jne .Lne_jump\n\t"
3171 "cmpl %ebx,4(%esp)\n\t"
3172 "je .Lne_fallthru\n\t"
3173 ".Lne_jump:\n\t"
3174 "lea 0x8(%esp),%esp\n\t"
3175 "pop %eax\n\t"
3176 "pop %ebx\n\t"
3177 /* jmp, but don't trust the assembler to choose the right jump */
3178 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3179 ".Lne_fallthru:\n\t"
3180 "lea 0x8(%esp),%esp\n\t"
3181 "pop %eax\n\t"
3182 "pop %ebx");
3183
3184 if (offset_p)
3185 *offset_p = 18;
3186 if (size_p)
3187 *size_p = 4;
3188}
3189
3190void
3191i386_emit_lt_goto (int *offset_p, int *size_p)
3192{
3193 EMIT_ASM32 (lt,
3194 "cmpl %ebx,4(%esp)\n\t"
3195 "jl .Llt_jump\n\t"
3196 "jne .Llt_fallthru\n\t"
3197 "cmpl %eax,(%esp)\n\t"
3198 "jnl .Llt_fallthru\n\t"
3199 ".Llt_jump:\n\t"
3200 "lea 0x8(%esp),%esp\n\t"
3201 "pop %eax\n\t"
3202 "pop %ebx\n\t"
3203 /* jmp, but don't trust the assembler to choose the right jump */
3204 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3205 ".Llt_fallthru:\n\t"
3206 "lea 0x8(%esp),%esp\n\t"
3207 "pop %eax\n\t"
3208 "pop %ebx");
3209
3210 if (offset_p)
3211 *offset_p = 20;
3212 if (size_p)
3213 *size_p = 4;
3214}
3215
3216void
3217i386_emit_le_goto (int *offset_p, int *size_p)
3218{
3219 EMIT_ASM32 (le,
3220 "cmpl %ebx,4(%esp)\n\t"
3221 "jle .Lle_jump\n\t"
3222 "jne .Lle_fallthru\n\t"
3223 "cmpl %eax,(%esp)\n\t"
3224 "jnle .Lle_fallthru\n\t"
3225 ".Lle_jump:\n\t"
3226 "lea 0x8(%esp),%esp\n\t"
3227 "pop %eax\n\t"
3228 "pop %ebx\n\t"
3229 /* jmp, but don't trust the assembler to choose the right jump */
3230 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3231 ".Lle_fallthru:\n\t"
3232 "lea 0x8(%esp),%esp\n\t"
3233 "pop %eax\n\t"
3234 "pop %ebx");
3235
3236 if (offset_p)
3237 *offset_p = 20;
3238 if (size_p)
3239 *size_p = 4;
3240}
3241
3242void
3243i386_emit_gt_goto (int *offset_p, int *size_p)
3244{
3245 EMIT_ASM32 (gt,
3246 "cmpl %ebx,4(%esp)\n\t"
3247 "jg .Lgt_jump\n\t"
3248 "jne .Lgt_fallthru\n\t"
3249 "cmpl %eax,(%esp)\n\t"
3250 "jng .Lgt_fallthru\n\t"
3251 ".Lgt_jump:\n\t"
3252 "lea 0x8(%esp),%esp\n\t"
3253 "pop %eax\n\t"
3254 "pop %ebx\n\t"
3255 /* jmp, but don't trust the assembler to choose the right jump */
3256 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3257 ".Lgt_fallthru:\n\t"
3258 "lea 0x8(%esp),%esp\n\t"
3259 "pop %eax\n\t"
3260 "pop %ebx");
3261
3262 if (offset_p)
3263 *offset_p = 20;
3264 if (size_p)
3265 *size_p = 4;
3266}
3267
3268void
3269i386_emit_ge_goto (int *offset_p, int *size_p)
3270{
3271 EMIT_ASM32 (ge,
3272 "cmpl %ebx,4(%esp)\n\t"
3273 "jge .Lge_jump\n\t"
3274 "jne .Lge_fallthru\n\t"
3275 "cmpl %eax,(%esp)\n\t"
3276 "jnge .Lge_fallthru\n\t"
3277 ".Lge_jump:\n\t"
3278 "lea 0x8(%esp),%esp\n\t"
3279 "pop %eax\n\t"
3280 "pop %ebx\n\t"
3281 /* jmp, but don't trust the assembler to choose the right jump */
3282 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3283 ".Lge_fallthru:\n\t"
3284 "lea 0x8(%esp),%esp\n\t"
3285 "pop %eax\n\t"
3286 "pop %ebx");
3287
3288 if (offset_p)
3289 *offset_p = 20;
3290 if (size_p)
3291 *size_p = 4;
3292}
3293
6a271cae
PA
3294struct emit_ops i386_emit_ops =
3295 {
3296 i386_emit_prologue,
3297 i386_emit_epilogue,
3298 i386_emit_add,
3299 i386_emit_sub,
3300 i386_emit_mul,
3301 i386_emit_lsh,
3302 i386_emit_rsh_signed,
3303 i386_emit_rsh_unsigned,
3304 i386_emit_ext,
3305 i386_emit_log_not,
3306 i386_emit_bit_and,
3307 i386_emit_bit_or,
3308 i386_emit_bit_xor,
3309 i386_emit_bit_not,
3310 i386_emit_equal,
3311 i386_emit_less_signed,
3312 i386_emit_less_unsigned,
3313 i386_emit_ref,
3314 i386_emit_if_goto,
3315 i386_emit_goto,
3316 i386_write_goto_address,
3317 i386_emit_const,
3318 i386_emit_call,
3319 i386_emit_reg,
3320 i386_emit_pop,
3321 i386_emit_stack_flush,
3322 i386_emit_zero_ext,
3323 i386_emit_swap,
3324 i386_emit_stack_adjust,
3325 i386_emit_int_call_1,
6b9801d4
SS
3326 i386_emit_void_call_2,
3327 i386_emit_eq_goto,
3328 i386_emit_ne_goto,
3329 i386_emit_lt_goto,
3330 i386_emit_le_goto,
3331 i386_emit_gt_goto,
3332 i386_emit_ge_goto
6a271cae
PA
3333 };
3334
3335
3336static struct emit_ops *
3337x86_emit_ops (void)
3338{
3339#ifdef __x86_64__
3aee8918 3340 if (is_64bit_tdesc ())
6a271cae
PA
3341 return &amd64_emit_ops;
3342 else
3343#endif
3344 return &i386_emit_ops;
3345}
3346
c2d6af84
PA
3347static int
3348x86_supports_range_stepping (void)
3349{
3350 return 1;
3351}
3352
d0722149
DE
3353/* This is initialized assuming an amd64 target.
3354 x86_arch_setup will correct it for i386 or amd64 targets. */
3355
3356struct linux_target_ops the_low_target =
3357{
3358 x86_arch_setup,
3aee8918
PA
3359 x86_linux_regs_info,
3360 x86_cannot_fetch_register,
3361 x86_cannot_store_register,
c14dfd32 3362 NULL, /* fetch_register */
d0722149
DE
3363 x86_get_pc,
3364 x86_set_pc,
3365 x86_breakpoint,
3366 x86_breakpoint_len,
3367 NULL,
3368 1,
3369 x86_breakpoint_at,
aa5ca48f
DE
3370 x86_insert_point,
3371 x86_remove_point,
3372 x86_stopped_by_watchpoint,
3373 x86_stopped_data_address,
d0722149
DE
3374 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3375 native i386 case (no registers smaller than an xfer unit), and are not
3376 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3377 NULL,
3378 NULL,
3379 /* need to fix up i386 siginfo if host is amd64 */
3380 x86_siginfo_fixup,
aa5ca48f
DE
3381 x86_linux_new_process,
3382 x86_linux_new_thread,
1570b33e 3383 x86_linux_prepare_to_resume,
219f2f23 3384 x86_linux_process_qsupported,
fa593d66
PA
3385 x86_supports_tracepoints,
3386 x86_get_thread_area,
6a271cae 3387 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
3388 x86_emit_ops,
3389 x86_get_min_fast_tracepoint_insn_len,
c2d6af84 3390 x86_supports_range_stepping,
d0722149 3391};
3aee8918
PA
3392
3393void
3394initialize_low_arch (void)
3395{
3396 /* Initialize the Linux target descriptions. */
3397#ifdef __x86_64__
3398 init_registers_amd64_linux ();
3399 init_registers_amd64_avx_linux ();
a196ebeb
WT
3400 init_registers_amd64_mpx_linux ();
3401
3aee8918 3402 init_registers_x32_linux ();
7e5aaa09 3403 init_registers_x32_avx_linux ();
3aee8918
PA
3404
3405 tdesc_amd64_linux_no_xml = xmalloc (sizeof (struct target_desc));
3406 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
3407 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3408#endif
3409 init_registers_i386_linux ();
3410 init_registers_i386_mmx_linux ();
3411 init_registers_i386_avx_linux ();
a196ebeb 3412 init_registers_i386_mpx_linux ();
3aee8918
PA
3413
3414 tdesc_i386_linux_no_xml = xmalloc (sizeof (struct target_desc));
3415 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
3416 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3417
3418 initialize_regsets_info (&x86_regsets_info);
3419}
This page took 0.651243 seconds and 4 git commands to generate.